diff mbox

[12/39] drm/i915: Added scheduler hook into i915_gem_request_notify()

Message ID 1448278774-31376-13-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Nov. 23, 2015, 11:39 a.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The scheduler needs to know when requests have completed so that it
can keep its own internal state up to date and can submit new requests
to the hardware from its queue.

v2: Updated due to changes in request handling. The operation is now
reversed from before. Rather than the scheduler being in control of
completion events, it is now the request code itself. The scheduler
merely receives a notification event. It can then optionally request
it's worker thread be woken up after all completion processing is
complete.

Change-Id: I149250a8f9382586514ca324aba1c53063b83e19
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h       |  2 ++
 drivers/gpu/drm/i915/i915_gem.c       | 16 ++++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.c | 28 ++++++++++++++++++----------
 drivers/gpu/drm/i915/i915_scheduler.h |  1 +
 4 files changed, 37 insertions(+), 10 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 23aed32..8c576fe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2262,6 +2262,8 @@  struct drm_i915_gem_request {
 	/** process identifier submitting this request */
 	struct pid *pid;
 
+	struct i915_scheduler_queue_entry	*scheduler_qe;
+
 	/**
 	 * The ELSP only accepts two elements at a time, so we queue
 	 * context/tail pairs on a given queue (ring->execlist_queue) until the
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d38e013..8a2fc7c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2785,6 +2785,7 @@  void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 {
 	struct drm_i915_gem_request *req, *req_next;
 	unsigned long flags;
+	bool wake_sched = false;
 	u32 seqno;
 
 	if (list_empty(&ring->fence_signal_list)) {
@@ -2813,6 +2814,14 @@  void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 		 */
 		list_del_init(&req->signal_link);
 
+		/*
+		 * NB: Must notify the scheduler before signalling
+		 * the node. Otherwise the node can get retired first
+		 * and call scheduler_clean() while the scheduler
+		 * thinks it is still active.
+		 */
+		wake_sched |= i915_scheduler_notify_request(req);
+
 		if (!req->cancelled) {
 			fence_signal_locked(&req->fence);
 			trace_i915_gem_request_complete(req);
@@ -2829,6 +2838,13 @@  void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 
 	if (!fence_locked)
 		spin_unlock_irqrestore(&ring->fence_lock, flags);
+
+	/* Necessary? Or does the fence_signal() call do an implicit wakeup? */
+	wake_up_all(&ring->irq_queue);
+
+	/* Final scheduler processing after all individual updates are done. */
+	if (wake_sched)
+		i915_scheduler_wakeup(ring->dev);
 }
 
 static const char *i915_gem_request_get_driver_name(struct fence *req_fence)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 25e7ade..dd9c710 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -120,6 +120,9 @@  int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 	node->stamp  = jiffies;
 	i915_gem_request_reference(node->params.request);
 
+	BUG_ON(node->params.request->scheduler_qe);
+	node->params.request->scheduler_qe = node;
+
 	/* Need to determine the number of incomplete entries in the list as
 	 * that will be the maximum size of the dependency list.
 	 *
@@ -319,14 +322,16 @@  static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
  * code has mapped it back to a request and will mark that request complete.
  * It also calls this function to notify the scheduler about the completion
  * so the scheduler's node can be updated appropriately.
- * Returns true if the request is scheduler managed, false if not.
+ * Returns true if the request is scheduler managed, false if not. The return
+ * value is combined for all freshly completed requests and if any were true
+ * then i915_scheduler_wakeup() is called so the scheduler can do further
+ * processing (submit more work) at the end.
  */
 bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
 {
 	struct drm_i915_private *dev_priv  = to_i915(req->ring->dev);
 	struct i915_scheduler   *scheduler = dev_priv->scheduler;
-	/* XXX: Need to map back from request to node */
-	struct i915_scheduler_queue_entry *node = NULL;
+	struct i915_scheduler_queue_entry *node = req->scheduler_qe;
 	unsigned long       flags;
 
 	if (!node)
@@ -344,16 +349,18 @@  bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
 
 	spin_unlock_irqrestore(&scheduler->lock, flags);
 
-	/*
-	 * XXX: If the in-flight list is now empty then new work should be
-	 * submitted. However, this function is called from interrupt context
-	 * and thus cannot acquire mutex locks and other such things that are
-	 * necessary for fresh submission.
-	 */
-
 	return true;
 }
 
+/*
+ * Called at the end of seqno interrupt processing if any request has
+ * completed that corresponds to a scheduler node.
+ */
+void i915_scheduler_wakeup(struct drm_device *dev)
+{
+	/* XXX: Need to call i915_scheduler_remove() via work handler. */
+}
+
 int i915_scheduler_remove(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -468,6 +475,7 @@  int i915_scheduler_remove(struct intel_engine_cs *ring)
 			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
 
 		/* Free everything that is owned by the node: */
+		node->params.request->scheduler_qe = NULL;
 		i915_gem_request_unreference(node->params.request);
 		kfree(node->params.cliprects);
 		kfree(node->dep_list);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 9736b8d..a0b38b8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -89,5 +89,6 @@  int         i915_scheduler_closefile(struct drm_device *dev,
 				     struct drm_file *file);
 int         i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
 bool        i915_scheduler_notify_request(struct drm_i915_gem_request *req);
+void        i915_scheduler_wakeup(struct drm_device *dev);
 
 #endif  /* _I915_SCHEDULER_H_ */