diff mbox

drm/i915: Restore engine->submit_request before unwedging

Message ID 20170309102225.25330-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson March 9, 2017, 10:22 a.m. UTC
When we wedge the device, we override engine->submit_request with a nop
to ensure that all in-flight requests are marked in error. However, igt
would like to unwedge the device to test -EIO handling. This requires us
to flush those in-flight requests and restore the original
engine->submit_request.

Fixes: 821ed7df6e2a ("drm/i915: Update reset path to fix incomplete requests")
Testcase: igt/gem_eio
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c         |  2 +-
 drivers/gpu/drm/i915/i915_drv.h         |  1 +
 drivers/gpu/drm/i915/i915_gem.c         | 41 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
 5 files changed, 57 insertions(+), 1 deletion(-)

Comments

Tvrtko Ursulin March 10, 2017, 12:59 p.m. UTC | #1
On 09/03/2017 10:22, Chris Wilson wrote:
> When we wedge the device, we override engine->submit_request with a nop
> to ensure that all in-flight requests are marked in error. However, igt
> would like to unwedge the device to test -EIO handling. This requires us
> to flush those in-flight requests and restore the original
> engine->submit_request.
>
> Fixes: 821ed7df6e2a ("drm/i915: Update reset path to fix incomplete requests")
> Testcase: igt/gem_eio
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_drv.c         |  2 +-
>  drivers/gpu/drm/i915/i915_drv.h         |  1 +
>  drivers/gpu/drm/i915/i915_gem.c         | 41 +++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++++++
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>  5 files changed, 57 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index b1e9027a4f80..576b03b0048c 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -1825,7 +1825,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
>  		return;
>
>  	/* Clear any previous failed attempts at recovery. Time to try again. */
> -	__clear_bit(I915_WEDGED, &error->flags);
> +	i915_gem_unset_wedged(dev_priv);
>  	error->reset_count++;
>
>  	pr_notice("drm/i915: Resetting chip after gpu hang\n");
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 3002996ddbed..c52aee5141ca 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3409,6 +3409,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
>  void i915_gem_reset(struct drm_i915_private *dev_priv);
>  void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
>  void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
> +void i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
>
>  void i915_gem_init_mmio(struct drm_i915_private *i915);
>  int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index aca1eaddafb4..0725e7a591a5 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2999,6 +2999,47 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
>  	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
>  }
>
> +void i915_gem_unset_wedged(struct drm_i915_private *dev_priv)
> +{
> +	struct i915_gem_timeline *tl;
> +	int i;
> +
> +	lockdep_assert_held(&dev_priv->drm.struct_mutex);
> +	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
> +		return;
> +
> +	/* Before unwedging, make sure that all pending operations
> +	 * are flushed and errored out. No more can be submitted until
> +	 * we reset the wedged bit.
> +	 */
> +	list_for_each_entry(tl, &dev_priv->gt.timelines, link) {
> +		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
> +			struct drm_i915_gem_request *rq;
> +
> +			rq = i915_gem_active_peek(&tl->engine[i].last_request,
> +						  &dev_priv->drm.struct_mutex);
> +			if (!rq)
> +				continue;
> +
> +			/* We can't use our normal waiter as we want to
> +			 * avoid recursively trying to handle the current
> +			 * reset.
> +			 */
> +			dma_fence_default_wait(&rq->fence, false,
> +					       MAX_SCHEDULE_TIMEOUT);

Who will signal these since GPU is stuck and this happens before the GEM 
reset calls in i915_reset? Obviously I totally don't understand how is 
this supposed to work.. :)

> +		}
> +	}
> +
> +	/* Undo nop_submit_request */
> +	if (i915.enable_execlists)
> +		intel_execlists_enable_submission(dev_priv);
> +	else
> +		intel_ringbuffer_enable_submission(dev_priv);

No need for stop machine as the wedging uses?

> +
> +	smp_mb__before_atomic();

What is this for?

> +	clear_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
> +}
> +
>  static void
>  i915_gem_retire_work_handler(struct work_struct *work)
>  {
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 4a864f8c9387..753586f6ddbe 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2224,3 +2224,15 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
>
>  	return intel_init_ring_buffer(engine);
>  }
> +
> +void intel_ringbuffer_enable_submission(struct drm_i915_private *i915)
> +{
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +
> +	for_each_engine(engine, i915, id) {
> +		engine->submit_request = i9xx_submit_request;
> +		if (IS_GEN6(i915) && id == VCS)
> +			engine->submit_request = gen6_bsd_submit_request;
> +	}

I wonder if it would be worth extracting setting of this vfunc (and 
schedule in execlists case) to a helper so the logic is not duplicated. 
Sounds a bit marginal at the moment, don't know.

> +}
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 0ef491df5b4e..5601c24b266a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -669,4 +669,6 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
>  bool intel_engine_is_idle(struct intel_engine_cs *engine);
>  bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
>
> +void intel_ringbuffer_enable_submission(struct drm_i915_private *i915);
> +
>  #endif /* _INTEL_RINGBUFFER_H_ */
>

Regards,

Tvrtko
Chris Wilson March 10, 2017, 1:18 p.m. UTC | #2
On Fri, Mar 10, 2017 at 12:59:47PM +0000, Tvrtko Ursulin wrote:
> 
> On 09/03/2017 10:22, Chris Wilson wrote:
> >+void i915_gem_unset_wedged(struct drm_i915_private *dev_priv)
> >+{
> >+	struct i915_gem_timeline *tl;
> >+	int i;
> >+
> >+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
> >+	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
> >+		return;
> >+
> >+	/* Before unwedging, make sure that all pending operations
> >+	 * are flushed and errored out. No more can be submitted until
> >+	 * we reset the wedged bit.
> >+	 */
> >+	list_for_each_entry(tl, &dev_priv->gt.timelines, link) {
> >+		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
> >+			struct drm_i915_gem_request *rq;
> >+
> >+			rq = i915_gem_active_peek(&tl->engine[i].last_request,
> >+						  &dev_priv->drm.struct_mutex);
> >+			if (!rq)
> >+				continue;
> >+
> >+			/* We can't use our normal waiter as we want to
> >+			 * avoid recursively trying to handle the current
> >+			 * reset.
> >+			 */
> >+			dma_fence_default_wait(&rq->fence, false,
> >+					       MAX_SCHEDULE_TIMEOUT);
> 
> Who will signal these since GPU is stuck and this happens before the
> GEM reset calls in i915_reset? Obviously I totally don't understand
> how is this supposed to work.. :)

All in-flight requests are completed by the reset and signaled. Then the
reset installs engine->submit_request = nop_submit_request to catch any
new requests that were waiting on a fence (either ours or a third party)
before being submitted. nop_submit_request() will advance the seqno and
wakeup the signaler who will then signal the fence (and
dma_fence_default_wait will ensure the signaler is armed).

It is a bit of a roundabout route just to ensure those outstanding
fences are indeed signaled. And the loop over all timelines is just as
ugly.

> >+		}
> >+	}
> >+
> >+	/* Undo nop_submit_request */
> >+	if (i915.enable_execlists)
> >+		intel_execlists_enable_submission(dev_priv);
> >+	else
> >+		intel_ringbuffer_enable_submission(dev_priv);
> 
> No need for stop machine as the wedging uses?

No. We prevent all new i915 requests from being queued (by
disallowing execbuf whilst wedged) so having waited for all active
requests above, we know the system is idle and do not have to worry
about a thread being inside engine->submit_request() as we swap over.
 
> >+
> >+	smp_mb__before_atomic();
> 
> What is this for?

Paranoid brain said that clear_bit() didn't guarantee a barrier.

/**
 * clear_bit - Clears a bit in memory
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * clear_bit() is atomic and may not be reordered.  However, it does
 * not contain a memory barrier, so if it is used for locking purposes,
 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 * in order to ensure changes are visible on other processors.
 */

Though we have struct_mutex as a barrier right now, this should be on
less surprise later.

> >+	clear_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
> >+}
> >+
> > static void
> > i915_gem_retire_work_handler(struct work_struct *work)
> > {
> >diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >index 4a864f8c9387..753586f6ddbe 100644
> >--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> >+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >@@ -2224,3 +2224,15 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
> >
> > 	return intel_init_ring_buffer(engine);
> > }
> >+
> >+void intel_ringbuffer_enable_submission(struct drm_i915_private *i915)
> >+{
> >+	struct intel_engine_cs *engine;
> >+	enum intel_engine_id id;
> >+
> >+	for_each_engine(engine, i915, id) {
> >+		engine->submit_request = i9xx_submit_request;
> >+		if (IS_GEN6(i915) && id == VCS)
> >+			engine->submit_request = gen6_bsd_submit_request;
> >+	}
> 
> I wonder if it would be worth extracting setting of this vfunc (and
> schedule in execlists case) to a helper so the logic is not
> duplicated. Sounds a bit marginal at the moment, don't know.

I'm not happy with it either at the moment. It is a quick dirty interface,
 that unfortunately with have the tendency to stick around :|

So I'm thinking something like

	engine->enable_submit() or engine->install_submit_request()

?
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b1e9027a4f80..576b03b0048c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1825,7 +1825,7 @@  void i915_reset(struct drm_i915_private *dev_priv)
 		return;
 
 	/* Clear any previous failed attempts at recovery. Time to try again. */
-	__clear_bit(I915_WEDGED, &error->flags);
+	i915_gem_unset_wedged(dev_priv);
 	error->reset_count++;
 
 	pr_notice("drm/i915: Resetting chip after gpu hang\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3002996ddbed..c52aee5141ca 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3409,6 +3409,7 @@  int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
 void i915_gem_reset(struct drm_i915_private *dev_priv);
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
+void i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
 
 void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aca1eaddafb4..0725e7a591a5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2999,6 +2999,47 @@  void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 }
 
+void i915_gem_unset_wedged(struct drm_i915_private *dev_priv)
+{
+	struct i915_gem_timeline *tl;
+	int i;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+		return;
+
+	/* Before unwedging, make sure that all pending operations
+	 * are flushed and errored out. No more can be submitted until
+	 * we reset the wedged bit.
+	 */
+	list_for_each_entry(tl, &dev_priv->gt.timelines, link) {
+		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+			struct drm_i915_gem_request *rq;
+
+			rq = i915_gem_active_peek(&tl->engine[i].last_request,
+						  &dev_priv->drm.struct_mutex);
+			if (!rq)
+				continue;
+
+			/* We can't use our normal waiter as we want to
+			 * avoid recursively trying to handle the current
+			 * reset.
+			 */
+			dma_fence_default_wait(&rq->fence, false,
+					       MAX_SCHEDULE_TIMEOUT);
+		}
+	}
+
+	/* Undo nop_submit_request */
+	if (i915.enable_execlists)
+		intel_execlists_enable_submission(dev_priv);
+	else
+		intel_ringbuffer_enable_submission(dev_priv);
+
+	smp_mb__before_atomic();
+	clear_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
+}
+
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4a864f8c9387..753586f6ddbe 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2224,3 +2224,15 @@  int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 
 	return intel_init_ring_buffer(engine);
 }
+
+void intel_ringbuffer_enable_submission(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, i915, id) {
+		engine->submit_request = i9xx_submit_request;
+		if (IS_GEN6(i915) && id == VCS)
+			engine->submit_request = gen6_bsd_submit_request;
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0ef491df5b4e..5601c24b266a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -669,4 +669,6 @@  static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
+void intel_ringbuffer_enable_submission(struct drm_i915_private *i915);
+
 #endif /* _INTEL_RINGBUFFER_H_ */