diff mbox

[26/37] drm/i915/execlists: Flush the tasklet before unpinning

Message ID 20180629075348.27358-26-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 29, 2018, 7:53 a.m. UTC
Inside the execlists submission tasklet, we often make the mistake of
assuming that everything beneath the request is available for use.
However, the submission and the request live on two separate timelines,
and the request contents may be freed from an early retirement before we
have had a chance to run the submission tasklet (think ksoftirqd). To
safeguard ourselves against any mistakes, flush the tasklet before we
unpin the context if execlists still has a reference to this context.

References: 60367132a214 ("drm/i915: Avoid use-after-free of ctx in request tracepoints")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.h |  1 +
 drivers/gpu/drm/i915/intel_lrc.c        | 25 ++++++++++++++++++++++++-
 2 files changed, 25 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 8c55f8582469..bc440a7fe894 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,6 +160,7 @@  struct i915_gem_context {
 	/** engine: per-engine logical HW state */
 	struct intel_context {
 		struct i915_gem_context *gem_context;
+		struct intel_engine_cs *active;
 		struct i915_vma *state;
 		struct intel_ring *ring;
 		u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8a4a1c0b986e..d70973c51a33 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -504,8 +504,11 @@  static void execlists_submit_ports(struct intel_engine_cs *engine)
 		rq = port_unpack(&port[n], &count);
 		if (rq) {
 			GEM_BUG_ON(count > !n);
-			if (!count++)
+			if (!count++) {
+				GEM_BUG_ON(rq->hw_context->active);
 				execlists_context_schedule_in(rq);
+				rq->hw_context->active = engine;
+			}
 			port_set(&port[n], port_pack(rq, count));
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
@@ -812,6 +815,8 @@  execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 			  intel_engine_get_seqno(rq->engine));
 
 		GEM_BUG_ON(!execlists->active);
+
+		rq->hw_context->active = NULL;
 		execlists_context_schedule_out(rq,
 					       i915_request_completed(rq) ?
 					       INTEL_CONTEXT_SCHEDULE_OUT :
@@ -1105,6 +1110,7 @@  static void process_csb(struct intel_engine_cs *engine)
 			 */
 			GEM_BUG_ON(!i915_request_completed(rq));
 
+			rq->hw_context->active = NULL;
 			execlists_context_schedule_out(rq,
 						       INTEL_CONTEXT_SCHEDULE_OUT);
 			GEM_TRACE("%s completed ctx=%d\n",
@@ -1362,6 +1368,23 @@  static void execlists_context_destroy(struct intel_context *ce)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine;
+
+	/*
+	 * The tasklet may still be using a pointer to our state, via an
+	 * old request. However, since we know we only unpin the context
+	 * on retirement of the following request, we know that the last
+	 * request referencing us will have had a completion CS interrupt.
+	 * If we see that it is still active, it means that the tasklet hasn't
+	 * had the chance to run yet; let it run before we teardown the
+	 * reference it may use.
+	 */
+	engine = READ_ONCE(ce->active);
+	if (unlikely(engine)) {
+		tasklet_kill(&engine->execlists.tasklet);
+		GEM_BUG_ON(READ_ONCE(ce->active));
+	}
+
 	intel_ring_unpin(ce->ring);
 
 	ce->state->obj->pin_global--;