diff mbox

[CI,3/5] drm/i915: Wrap port cancellation into a function

Message ID 20170922124307.10914-3-mika.kuoppala@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mika Kuoppala Sept. 22, 2017, 12:43 p.m. UTC
On reset and wedged path, we want to release the requests
that are tied to ports and then mark the ports to be unset.
Introduce a function for this.

v2: rebase
v3: drop local, keep GEM_BUG_ON (Michał, Chris)
v4: rebase

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3186be54bbd8..4f625371b5fe 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -568,21 +568,27 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 		execlists_submit_ports(engine);
 }
 
+static void execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(execlists->port); i++)
+		i915_gem_request_put(port_request(&execlists->port[i]));
+
+	memset(execlists->port, 0, sizeof(execlists->port));
+}
+
 static void execlists_cancel_requests(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
 	struct drm_i915_gem_request *rq, *rn;
 	struct rb_node *rb;
 	unsigned long flags;
-	unsigned long n;
 
 	spin_lock_irqsave(&engine->timeline->lock, flags);
 
 	/* Cancel the requests on the HW and clear the ELSP tracker. */
-	for (n = 0; n < ARRAY_SIZE(execlists->port); n++)
-		i915_gem_request_put(port_request(&port[n]));
-	memset(execlists->port, 0, sizeof(execlists->port));
+	execlist_cancel_port_requests(execlists);
 
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->timeline->requests, link) {
@@ -613,9 +619,10 @@  static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
+
 	execlists->queue = RB_ROOT;
 	execlists->first = NULL;
-	GEM_BUG_ON(port_isset(&port[0]));
+	GEM_BUG_ON(port_isset(&execlists->port[0]));
 
 	/*
 	 * The port is checked prior to scheduling a tasklet, but
@@ -1372,11 +1379,9 @@  static void reset_common_ring(struct intel_engine_cs *engine,
 			      struct drm_i915_gem_request *request)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
 	struct drm_i915_gem_request *rq, *rn;
 	struct intel_context *ce;
 	unsigned long flags;
-	unsigned int n;
 
 	spin_lock_irqsave(&engine->timeline->lock, flags);
 
@@ -1389,9 +1394,7 @@  static void reset_common_ring(struct intel_engine_cs *engine,
 	 * guessing the missed context-switch events by looking at what
 	 * requests were completed.
 	 */
-	for (n = 0; n < ARRAY_SIZE(execlists->port); n++)
-		i915_gem_request_put(port_request(&port[n]));
-	memset(execlists->port, 0, sizeof(execlists->port));
+	execlist_cancel_port_requests(execlists);
 
 	/* Push back any incomplete requests for replay after the reset. */
 	list_for_each_entry_safe_reverse(rq, rn,