diff mbox

[v2,07/15] drm/i915/tdr: Restore engine state and start after reset

Message ID 1466147355-4635-8-git-send-email-arun.siluvery@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

arun.siluvery@linux.intel.com June 17, 2016, 7:09 a.m. UTC
We capture the state of an engine before resetting it, once the reset is
successful engine is restored with the same state and restarted.

The state includes head register and active request. We also nudge the head
forward if it hasn't advanced, otherwise when the engine is restarted HW
executes the same instruction and may hang again. Generally head
automatically advances to the next instruction as soon as HW reads current
instruction, without waiting for it to complete, however a MBOX wait
inserted directly to VCS/BCS engines doesn't behave in the same way,
instead head will still be pointing at the same instruction until it
completes.

If the head is modified, this is also updated in the context image so that
HW sees up to date value.

A valid request is expected in the state at this point otherwise we
wouldn't have reached this point, the context that submitted this request
is resubmitted to HW. The request that caused the hang would be at the
start of execlist queue, unless we resubmit and complete this request, it
cannot be removed from the queue.

Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 94 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h |  9 ++++
 2 files changed, 103 insertions(+)

Comments

Chris Wilson June 17, 2016, 7:32 a.m. UTC | #1
On Fri, Jun 17, 2016 at 08:09:07AM +0100, Arun Siluvery wrote:
> We capture the state of an engine before resetting it, once the reset is
> successful engine is restored with the same state and restarted.
> 
> The state includes head register and active request. We also nudge the head
> forward if it hasn't advanced, otherwise when the engine is restarted HW
> executes the same instruction and may hang again. Generally head
> automatically advances to the next instruction as soon as HW reads current
> instruction, without waiting for it to complete, however a MBOX wait
> inserted directly to VCS/BCS engines doesn't behave in the same way,
> instead head will still be pointing at the same instruction until it
> completes.
> 
> If the head is modified, this is also updated in the context image so that
> HW sees up to date value.
> 
> A valid request is expected in the state at this point otherwise we
> wouldn't have reached this point, the context that submitted this request
> is resubmitted to HW. The request that caused the hang would be at the
> start of execlist queue, unless we resubmit and complete this request, it
> cannot be removed from the queue.
> 
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Signed-off-by: Tomas Elf <tomas.elf@intel.com>
> Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/intel_lrc.c        | 94 +++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  9 ++++
>  2 files changed, 103 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index b83552a..c9aa2ca 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -487,6 +487,30 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine,
>  	execlists_submit_requests(req0, req1, tdr_resubmission);
>  }
>  
> +/**
> + * intel_execlists_resubmit()
> + * @engine: engine to do resubmission for
> + *
> + * In execlists mode, engine reset postprocess mainly includes resubmission of
> + * context after reset, for this we bypass the execlist queue. This is
> + * necessary since at the point of TDR hang recovery the hardware will be hung
> + * and resubmitting a fixed context (the context that the TDR has identified
> + * as hung and fixed up in order to move past the blocking batch buffer) to a
> + * hung execlist queue will lock up the TDR.  Instead, opt for direct ELSP
> + * submission without depending on the rest of the driver.
> + */
> +static void intel_execlists_resubmit(struct intel_engine_cs *engine)
> +{
> +	unsigned long flags;
> +
> +	if (WARN_ON(list_empty(&engine->execlist_queue)))
> +		return;
> +
> +	spin_lock_irqsave(&engine->execlist_lock, flags);
> +	execlists_context_unqueue(engine, true);
> +	spin_unlock_irqrestore(&engine->execlist_lock, flags);
> +}
> +
>  static unsigned int
>  execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
>  {
> @@ -1098,6 +1122,75 @@ static int gen8_engine_state_save(struct intel_engine_cs *engine,
>  	return 0;
>  }
>  
> +/**
> + * gen8_engine_start() - restore saved state and start engine
> + * @engine: engine to be started
> + * @state: state to be restored
> + *
> + * Returns:
> + *	0 if ok, otherwise propagates error codes.
> + */
> +static int gen8_engine_start(struct intel_engine_cs *engine,
> +			     struct intel_engine_cs_state *state)
> +{
> +	u32 head;
> +	u32 head_addr, tail_addr;
> +	u32 *reg_state;
> +	struct intel_ringbuffer *ringbuf;
> +	struct i915_gem_context *ctx;
> +	struct drm_i915_private *dev_priv = engine->i915;
> +
> +	ctx = state->req->ctx;
> +	ringbuf = ctx->engine[engine->id].ringbuf;
> +	reg_state = ctx->engine[engine->id].lrc_reg_state;
> +
> +	head = state->head;
> +	head_addr = head & HEAD_ADDR;
> +
> +	if (head == engine->hangcheck.last_head) {
> +		/*
> +		 * The engine has not advanced since the last time it hung,
> +		 * force it to advance to the next QWORD. In most cases the
> +		 * engine head pointer will automatically advance to the
> +		 * next instruction as soon as it has read the current
> +		 * instruction, without waiting for it to complete. This
> +		 * seems to be the default behaviour, however an MBOX wait
> +		 * inserted directly to the VCS/BCS engines does not behave
> +		 * in the same way, instead the head pointer will still be
> +		 * pointing at the MBOX instruction until it completes.
> +		 */
> +		head_addr = roundup(head_addr, 8);
> +		engine->hangcheck.last_head = head;
> +	} else if (head_addr & 0x7) {
> +		/* Ensure head pointer is pointing to a QWORD boundary */
> +		head_addr = ALIGN(head_addr, 8);
> +	}
> +
> +	tail_addr = reg_state[CTX_RING_TAIL+1] & TAIL_ADDR;
> +
> +	if (head_addr > tail_addr)
> +		head_addr = tail_addr;
> +	else if (head_addr >= ringbuf->size)
> +		head_addr = 0;
> +
> +	head &= ~HEAD_ADDR;
> +	head |= (head_addr & HEAD_ADDR);
> +
> +	/* Restore head */
> +	reg_state[CTX_RING_HEAD+1] = head;
> +	I915_WRITE_HEAD(engine, head);
> +
> +	/* set head */
> +	ringbuf->head = head;
> +	ringbuf->last_retired_head = -1;
> +	intel_ring_update_space(ringbuf);
> +
> +	if (state->req)
> +		intel_execlists_resubmit(engine);

So given that we have a request, why not just use the request to set the
state? We don't need to save anything as either we ensure the ring is
stopped (no new request) or submit the next request.

Also we already have a callback to start the engines, that would be easy
to extend to support starting at a particular request (was intended to
be).
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b83552a..c9aa2ca 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -487,6 +487,30 @@  static void execlists_context_unqueue(struct intel_engine_cs *engine,
 	execlists_submit_requests(req0, req1, tdr_resubmission);
 }
 
+/**
+ * intel_execlists_resubmit()
+ * @engine: engine to do resubmission for
+ *
+ * In execlists mode, engine reset postprocess mainly includes resubmission of
+ * context after reset, for this we bypass the execlist queue. This is
+ * necessary since at the point of TDR hang recovery the hardware will be hung
+ * and resubmitting a fixed context (the context that the TDR has identified
+ * as hung and fixed up in order to move past the blocking batch buffer) to a
+ * hung execlist queue will lock up the TDR.  Instead, opt for direct ELSP
+ * submission without depending on the rest of the driver.
+ */
+static void intel_execlists_resubmit(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (WARN_ON(list_empty(&engine->execlist_queue)))
+		return;
+
+	spin_lock_irqsave(&engine->execlist_lock, flags);
+	execlists_context_unqueue(engine, true);
+	spin_unlock_irqrestore(&engine->execlist_lock, flags);
+}
+
 static unsigned int
 execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
 {
@@ -1098,6 +1122,75 @@  static int gen8_engine_state_save(struct intel_engine_cs *engine,
 	return 0;
 }
 
+/**
+ * gen8_engine_start() - restore saved state and start engine
+ * @engine: engine to be started
+ * @state: state to be restored
+ *
+ * Returns:
+ *	0 if ok, otherwise propagates error codes.
+ */
+static int gen8_engine_start(struct intel_engine_cs *engine,
+			     struct intel_engine_cs_state *state)
+{
+	u32 head;
+	u32 head_addr, tail_addr;
+	u32 *reg_state;
+	struct intel_ringbuffer *ringbuf;
+	struct i915_gem_context *ctx;
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	ctx = state->req->ctx;
+	ringbuf = ctx->engine[engine->id].ringbuf;
+	reg_state = ctx->engine[engine->id].lrc_reg_state;
+
+	head = state->head;
+	head_addr = head & HEAD_ADDR;
+
+	if (head == engine->hangcheck.last_head) {
+		/*
+		 * The engine has not advanced since the last time it hung,
+		 * force it to advance to the next QWORD. In most cases the
+		 * engine head pointer will automatically advance to the
+		 * next instruction as soon as it has read the current
+		 * instruction, without waiting for it to complete. This
+		 * seems to be the default behaviour, however an MBOX wait
+		 * inserted directly to the VCS/BCS engines does not behave
+		 * in the same way, instead the head pointer will still be
+		 * pointing at the MBOX instruction until it completes.
+		 */
+		head_addr = roundup(head_addr, 8);
+		engine->hangcheck.last_head = head;
+	} else if (head_addr & 0x7) {
+		/* Ensure head pointer is pointing to a QWORD boundary */
+		head_addr = ALIGN(head_addr, 8);
+	}
+
+	tail_addr = reg_state[CTX_RING_TAIL+1] & TAIL_ADDR;
+
+	if (head_addr > tail_addr)
+		head_addr = tail_addr;
+	else if (head_addr >= ringbuf->size)
+		head_addr = 0;
+
+	head &= ~HEAD_ADDR;
+	head |= (head_addr & HEAD_ADDR);
+
+	/* Restore head */
+	reg_state[CTX_RING_HEAD+1] = head;
+	I915_WRITE_HEAD(engine, head);
+
+	/* set head */
+	ringbuf->head = head;
+	ringbuf->last_retired_head = -1;
+	intel_ring_update_space(ringbuf);
+
+	if (state->req)
+		intel_execlists_resubmit(engine);
+
+	return 0;
+}
+
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
@@ -2056,6 +2149,7 @@  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 
 	/* engine reset supporting functions */
 	engine->save = gen8_engine_state_save;
+	engine->start = gen8_engine_start;
 
 	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
 		engine->irq_seqno_barrier = bxt_a_seqno_barrier;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index daf2727..55cb0b5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -92,6 +92,13 @@  struct intel_ring_hangcheck {
 	enum intel_ring_hangcheck_action action;
 	int deadlock;
 	u32 instdone[I915_NUM_INSTDONE_REG];
+
+	/*
+	 * Last recorded ring head index.
+	 * This is only ever a ring index where as active
+	 * head may be a graphics address in a ring buffer
+	 */
+	u32 last_head;
 };
 
 struct intel_ringbuffer {
@@ -213,6 +220,8 @@  struct intel_engine_cs {
 	/* engine reset supporting functions */
 	int (*save)(struct intel_engine_cs *engine,
 		    struct intel_engine_cs_state *state);
+	int (*start)(struct intel_engine_cs *engine,
+		     struct intel_engine_cs_state *state);
 
 	/* GEN8 signal/wait table - never trust comments!
 	 *	  signal to	signal to    signal to   signal to      signal to