@@ -2262,6 +2262,8 @@ struct drm_i915_gem_request {
/** process identifier submitting this request */
struct pid *pid;
+ struct i915_scheduler_queue_entry *scheduler_qe;
+
/**
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the
@@ -2785,6 +2785,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
{
struct drm_i915_gem_request *req, *req_next;
unsigned long flags;
+ bool wake_sched = false;
u32 seqno;
if (list_empty(&ring->fence_signal_list)) {
@@ -2813,6 +2814,14 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
*/
list_del_init(&req->signal_link);
+ /*
+ * NB: Must notify the scheduler before signalling
+ * the node. Otherwise the node can get retired first
+ * and call scheduler_clean() while the scheduler
+ * thinks it is still active.
+ */
+ wake_sched |= i915_scheduler_notify_request(req);
+
if (!req->cancelled) {
fence_signal_locked(&req->fence);
trace_i915_gem_request_complete(req);
@@ -2829,6 +2838,13 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
if (!fence_locked)
spin_unlock_irqrestore(&ring->fence_lock, flags);
+
+ /* Necessary? Or does the fence_signal() call do an implicit wakeup? */
+ wake_up_all(&ring->irq_queue);
+
+ /* Final scheduler processing after all individual updates are done. */
+ if (wake_sched)
+ i915_scheduler_wakeup(ring->dev);
}
static const char *i915_gem_request_get_driver_name(struct fence *req_fence)
@@ -120,6 +120,9 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
node->stamp = jiffies;
i915_gem_request_reference(node->params.request);
+ BUG_ON(node->params.request->scheduler_qe);
+ node->params.request->scheduler_qe = node;
+
/* Need to determine the number of incomplete entries in the list as
* that will be the maximum size of the dependency list.
*
@@ -319,14 +322,16 @@ static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
* code has mapped it back to a request and will mark that request complete.
* It also calls this function to notify the scheduler about the completion
* so the scheduler's node can be updated appropriately.
- * Returns true if the request is scheduler managed, false if not.
+ * Returns true if the request is scheduler managed, false if not. The return
+ * value is combined for all freshly completed requests and if any were true
+ * then i915_scheduler_wakeup() is called so the scheduler can do further
+ * processing (submit more work) at the end.
*/
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
{
struct drm_i915_private *dev_priv = to_i915(req->ring->dev);
struct i915_scheduler *scheduler = dev_priv->scheduler;
- /* XXX: Need to map back from request to node */
- struct i915_scheduler_queue_entry *node = NULL;
+ struct i915_scheduler_queue_entry *node = req->scheduler_qe;
unsigned long flags;
if (!node)
@@ -344,16 +349,18 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
spin_unlock_irqrestore(&scheduler->lock, flags);
- /*
- * XXX: If the in-flight list is now empty then new work should be
- * submitted. However, this function is called from interrupt context
- * and thus cannot acquire mutex locks and other such things that are
- * necessary for fresh submission.
- */
-
return true;
}
+/*
+ * Called at the end of seqno interrupt processing if any request has
+ * completed that corresponds to a scheduler node.
+ */
+void i915_scheduler_wakeup(struct drm_device *dev)
+{
+ /* XXX: Need to call i915_scheduler_remove() via work handler. */
+}
+
int i915_scheduler_remove(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -468,6 +475,7 @@ int i915_scheduler_remove(struct intel_engine_cs *ring)
i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
/* Free everything that is owned by the node: */
+ node->params.request->scheduler_qe = NULL;
i915_gem_request_unreference(node->params.request);
kfree(node->params.cliprects);
kfree(node->dep_list);
@@ -89,5 +89,6 @@ int i915_scheduler_closefile(struct drm_device *dev,
struct drm_file *file);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
+void i915_scheduler_wakeup(struct drm_device *dev);
#endif /* _I915_SCHEDULER_H_ */