diff mbox

[RFC,17/39] drm/i915: Hook scheduler node clean up into retire requests

Message ID 1437143628-6329-18-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison July 17, 2015, 2:33 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The scheduler keeps its own lock on various DRM objects in order to guarantee
safe access long after the original execbuff IOCTL has completed. This is
especially important when pre-emption is enabled as the batch buffer might need
to be submitted to the hardware multiple times. This patch hooks the clean up of
these locks into the request retire function. The request can only be retired
after it has completed on the hardware and thus is no longer eligible for
re-submission. Thus there is no point holding on to the locks beyond that time.

For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c       |  3 +++
 drivers/gpu/drm/i915/i915_scheduler.c | 51 ++++++++++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_scheduler.h |  1 +
 3 files changed, 39 insertions(+), 16 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 77a3b27..cb5af5d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1405,6 +1405,9 @@  static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	if (!list_empty(&request->signal_list))
 		request->cancelled = true;
 
+	if (request->scheduler_qe)
+		i915_gem_scheduler_clean_node(request->scheduler_qe);
+
 	i915_gem_request_unreference(request);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index f5fa968..df2e27f 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -432,6 +432,38 @@  int i915_scheduler_handle_irq(struct intel_engine_cs *ring)
 	return 0;
 }
 
+void i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
+{
+	uint32_t i;
+
+	if (WARN_ON(!I915_SQS_IS_COMPLETE(node)))
+		return;
+
+	if (node->params.batch_obj) {
+		/* The batch buffer must be unpinned before it is unreferenced
+		 * otherwise the unpin fails with a missing vma!? */
+		if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
+			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
+
+		node->params.batch_obj = NULL;
+	}
+
+	/* Release the locked buffers: */
+	for (i = 0; i < node->num_objs; i++) {
+		drm_gem_object_unreference(
+				    &node->saved_objects[i].obj->base);
+	}
+	kfree(node->saved_objects);
+	node->saved_objects = NULL;
+	node->num_objs = 0;
+
+	/* Context too: */
+	if (node->params.ctx) {
+		i915_gem_context_unreference(node->params.ctx);
+		node->params.ctx = NULL;
+	}
+}
+
 static int i915_scheduler_remove(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -441,7 +473,7 @@  static int i915_scheduler_remove(struct intel_engine_cs *ring)
 	int                 flying = 0, queued = 0;
 	int                 ret = 0;
 	bool                do_submit;
-	uint32_t            i, min_seqno;
+	uint32_t            min_seqno;
 	struct list_head    remove;
 
 	if (list_empty(&scheduler->node_queue[ring->id]))
@@ -535,21 +567,8 @@  static int i915_scheduler_remove(struct intel_engine_cs *ring)
 		node = list_first_entry(&remove, typeof(*node), link);
 		list_del(&node->link);
 
-		/* The batch buffer must be unpinned before it is unreferenced
-		 * otherwise the unpin fails with a missing vma!? */
-		if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
-			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
-
-		/* Release the locked buffers: */
-		for (i = 0; i < node->num_objs; i++) {
-			drm_gem_object_unreference(
-					    &node->saved_objects[i].obj->base);
-		}
-		kfree(node->saved_objects);
-
-		/* Context too: */
-		if (node->params.ctx)
-			i915_gem_context_unreference(node->params.ctx);
+		/* Free up all the DRM object references */
+		i915_gem_scheduler_clean_node(node);
 
 		/* And anything else owned by the node: */
 		node->params.request->scheduler_qe = NULL;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 15878a4..73c5e7d 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -88,6 +88,7 @@  bool        i915_scheduler_is_enabled(struct drm_device *dev);
 int         i915_scheduler_init(struct drm_device *dev);
 int         i915_scheduler_closefile(struct drm_device *dev,
 				     struct drm_file *file);
+void        i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
 int         i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
 int         i915_scheduler_handle_irq(struct intel_engine_cs *ring);
 void        i915_gem_scheduler_work_handler(struct work_struct *work);