diff mbox series

[06/27] drm/i915/guc: Workaround reset G2H is received after schedule done G2H

Message ID 20210819061639.21051-7-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series Clean up GuC CI failures, simplify locking, and kernel DOC | expand

Commit Message

Matthew Brost Aug. 19, 2021, 6:16 a.m. UTC
If the context is reset as a result of the request cancelation the
context reset G2H is received after schedule disable done G2H which is
likely the wrong order. The schedule disable done G2H release the
waiting request cancelation code which resubmits the context. This races
with the context reset G2H which also wants to resubmit the context but
in this case it really should be a NOP as request cancelation code owns
the resubmit. Use some clever tricks of checking the context state to
seal this race until if / when the GuC firmware is fixed.

v2:
 (Checkpatch)
  - Fix typos

Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org>
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 43 ++++++++++++++++---
 1 file changed, 37 insertions(+), 6 deletions(-)

Comments

Daniele Ceraolo Spurio Aug. 24, 2021, 11:31 p.m. UTC | #1
On 8/18/2021 11:16 PM, Matthew Brost wrote:
> If the context is reset as a result of the request cancelation the
> context reset G2H is received after schedule disable done G2H which is
> likely the wrong order. The schedule disable done G2H release the
> waiting request cancelation code which resubmits the context. This races
> with the context reset G2H which also wants to resubmit the context but
> in this case it really should be a NOP as request cancelation code owns
> the resubmit. Use some clever tricks of checking the context state to
> seal this race until if / when the GuC firmware is fixed.

Did you raise this with the GuC team? If it's a GuC issue we definitely 
want a fix there ASAP so we can drop any i915-side WAs.

>
> v2:
>   (Checkpatch)
>    - Fix typos
>
> Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> Cc: <stable@vger.kernel.org>
> ---
>   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 43 ++++++++++++++++---
>   1 file changed, 37 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index e4a099f8f820..8f7a11e65ef5 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -832,17 +832,35 @@ __unwind_incomplete_requests(struct intel_context *ce)
>   static void __guc_reset_context(struct intel_context *ce, bool stalled)
>   {
>   	struct i915_request *rq;
> +	unsigned long flags;
>   	u32 head;
> +	bool skip = false;
>   
>   	intel_context_get(ce);
>   
>   	/*
> -	 * GuC will implicitly mark the context as non-schedulable
> -	 * when it sends the reset notification. Make sure our state
> -	 * reflects this change. The context will be marked enabled
> -	 * on resubmission.
> +	 * GuC will implicitly mark the context as non-schedulable when it sends
> +	 * the reset notification. Make sure our state reflects this change. The
> +	 * context will be marked enabled on resubmission.
> +	 *
> +	 * XXX: If the context is reset as a result of the request cancellation
> +	 * this G2H is received after the schedule disable complete G2H which is
> +	 * likely wrong as this creates a race between the request cancellation
> +	 * code re-submitting the context and this G2H handler. This likely
> +	 * should be fixed in the GuC but until if / when that gets fixed we
> +	 * need to workaround this. Convert this function to a NOP if a pending
> +	 * enable is in flight as this indicates that a request cancellation has
> +	 * occurred.
>   	 */

IMO this comment sounds like we're not clear on expected behavior. 
Either the ordering is wrong, in which case we have a GuC bug and this 
is a temporary WA, or the ordering is allowed and we need to cope with 
it. The way the comment is written sounds like we're not sure.

Code changes look ok.

Daniele

> -	clr_context_enabled(ce);
> +	spin_lock_irqsave(&ce->guc_state.lock, flags);
> +	if (likely(!context_pending_enable(ce))) {
> +		clr_context_enabled(ce);
> +	} else {
> +		skip = true;
> +	}
> +	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> +	if (unlikely(skip))
> +		goto out_put;
>   
>   	rq = intel_context_find_active_request(ce);
>   	if (!rq) {
> @@ -861,6 +879,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
>   out_replay:
>   	guc_reset_state(ce, head, stalled);
>   	__unwind_incomplete_requests(ce);
> +out_put:
>   	intel_context_put(ce);
>   }
>   
> @@ -1605,6 +1624,13 @@ static void guc_context_cancel_request(struct intel_context *ce,
>   			guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
>   					true);
>   		}
> +
> +		/*
> +		 * XXX: Racey if context is reset, see comment in
> +		 * __guc_reset_context().
> +		 */
> +		flush_work(&ce_to_guc(ce)->ct.requests.worker);
> +
>   		guc_context_unblock(ce);
>   	}
>   }
> @@ -2719,7 +2745,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
>   {
>   	trace_intel_context_reset(ce);
>   
> -	if (likely(!intel_context_is_banned(ce))) {
> +	/*
> +	 * XXX: Racey if request cancellation has occurred, see comment in
> +	 * __guc_reset_context().
> +	 */
> +	if (likely(!intel_context_is_banned(ce) &&
> +		   !context_blocked(ce))) {
>   		capture_error_state(guc, ce);
>   		guc_context_replay(ce);
>   	}
Matthew Brost Aug. 25, 2021, 4:05 a.m. UTC | #2
On Tue, Aug 24, 2021 at 04:31:21PM -0700, Daniele Ceraolo Spurio wrote:
> 
> 
> On 8/18/2021 11:16 PM, Matthew Brost wrote:
> > If the context is reset as a result of the request cancelation the
> > context reset G2H is received after schedule disable done G2H which is
> > likely the wrong order. The schedule disable done G2H release the
> > waiting request cancelation code which resubmits the context. This races
> > with the context reset G2H which also wants to resubmit the context but
> > in this case it really should be a NOP as request cancelation code owns
> > the resubmit. Use some clever tricks of checking the context state to
> > seal this race until if / when the GuC firmware is fixed.
> 
> Did you raise this with the GuC team? If it's a GuC issue we definitely want
> a fix there ASAP so we can drop any i915-side WAs.
>

Yep, def an issue with the GuC firmware behavior. Will get fixed, just
not sure when.
 
> > 
> > v2:
> >   (Checkpatch)
> >    - Fix typos
> > 
> > Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > Cc: <stable@vger.kernel.org>
> > ---
> >   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 43 ++++++++++++++++---
> >   1 file changed, 37 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index e4a099f8f820..8f7a11e65ef5 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -832,17 +832,35 @@ __unwind_incomplete_requests(struct intel_context *ce)
> >   static void __guc_reset_context(struct intel_context *ce, bool stalled)
> >   {
> >   	struct i915_request *rq;
> > +	unsigned long flags;
> >   	u32 head;
> > +	bool skip = false;
> >   	intel_context_get(ce);
> >   	/*
> > -	 * GuC will implicitly mark the context as non-schedulable
> > -	 * when it sends the reset notification. Make sure our state
> > -	 * reflects this change. The context will be marked enabled
> > -	 * on resubmission.
> > +	 * GuC will implicitly mark the context as non-schedulable when it sends
> > +	 * the reset notification. Make sure our state reflects this change. The
> > +	 * context will be marked enabled on resubmission.
> > +	 *
> > +	 * XXX: If the context is reset as a result of the request cancellation
> > +	 * this G2H is received after the schedule disable complete G2H which is
> > +	 * likely wrong as this creates a race between the request cancellation
> > +	 * code re-submitting the context and this G2H handler. This likely
> > +	 * should be fixed in the GuC but until if / when that gets fixed we
> > +	 * need to workaround this. Convert this function to a NOP if a pending
> > +	 * enable is in flight as this indicates that a request cancellation has
> > +	 * occurred.
> >   	 */
> 
> IMO this comment sounds like we're not clear on expected behavior. Either
> the ordering is wrong, in which case we have a GuC bug and this is a
> temporary WA, or the ordering is allowed and we need to cope with it. The
> way the comment is written sounds like we're not sure.
> 

Comments written prior to confirmation that GuC behavior was wrong, will
reword.

> Code changes look ok.
>

Ty. I'll think we have to carry this until we upgrade the GuC firmware
with a the proper behavior - until then without this workaround
canceling non-preemptable requests is 100% broken, hence why I added a
selftest. Will add a FIXME / XXX comment so we can remove this in the
future.

Matt

> Daniele
> 
> > -	clr_context_enabled(ce);
> > +	spin_lock_irqsave(&ce->guc_state.lock, flags);
> > +	if (likely(!context_pending_enable(ce))) {
> > +		clr_context_enabled(ce);
> > +	} else {
> > +		skip = true;
> > +	}
> > +	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> > +	if (unlikely(skip))
> > +		goto out_put;
> >   	rq = intel_context_find_active_request(ce);
> >   	if (!rq) {
> > @@ -861,6 +879,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
> >   out_replay:
> >   	guc_reset_state(ce, head, stalled);
> >   	__unwind_incomplete_requests(ce);
> > +out_put:
> >   	intel_context_put(ce);
> >   }
> > @@ -1605,6 +1624,13 @@ static void guc_context_cancel_request(struct intel_context *ce,
> >   			guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
> >   					true);
> >   		}
> > +
> > +		/*
> > +		 * XXX: Racey if context is reset, see comment in
> > +		 * __guc_reset_context().
> > +		 */
> > +		flush_work(&ce_to_guc(ce)->ct.requests.worker);
> > +
> >   		guc_context_unblock(ce);
> >   	}
> >   }
> > @@ -2719,7 +2745,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
> >   {
> >   	trace_intel_context_reset(ce);
> > -	if (likely(!intel_context_is_banned(ce))) {
> > +	/*
> > +	 * XXX: Racey if request cancellation has occurred, see comment in
> > +	 * __guc_reset_context().
> > +	 */
> > +	if (likely(!intel_context_is_banned(ce) &&
> > +		   !context_blocked(ce))) {
> >   		capture_error_state(guc, ce);
> >   		guc_context_replay(ce);
> >   	}
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index e4a099f8f820..8f7a11e65ef5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -832,17 +832,35 @@  __unwind_incomplete_requests(struct intel_context *ce)
 static void __guc_reset_context(struct intel_context *ce, bool stalled)
 {
 	struct i915_request *rq;
+	unsigned long flags;
 	u32 head;
+	bool skip = false;
 
 	intel_context_get(ce);
 
 	/*
-	 * GuC will implicitly mark the context as non-schedulable
-	 * when it sends the reset notification. Make sure our state
-	 * reflects this change. The context will be marked enabled
-	 * on resubmission.
+	 * GuC will implicitly mark the context as non-schedulable when it sends
+	 * the reset notification. Make sure our state reflects this change. The
+	 * context will be marked enabled on resubmission.
+	 *
+	 * XXX: If the context is reset as a result of the request cancellation
+	 * this G2H is received after the schedule disable complete G2H which is
+	 * likely wrong as this creates a race between the request cancellation
+	 * code re-submitting the context and this G2H handler. This likely
+	 * should be fixed in the GuC but until if / when that gets fixed we
+	 * need to workaround this. Convert this function to a NOP if a pending
+	 * enable is in flight as this indicates that a request cancellation has
+	 * occurred.
 	 */
-	clr_context_enabled(ce);
+	spin_lock_irqsave(&ce->guc_state.lock, flags);
+	if (likely(!context_pending_enable(ce))) {
+		clr_context_enabled(ce);
+	} else {
+		skip = true;
+	}
+	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+	if (unlikely(skip))
+		goto out_put;
 
 	rq = intel_context_find_active_request(ce);
 	if (!rq) {
@@ -861,6 +879,7 @@  static void __guc_reset_context(struct intel_context *ce, bool stalled)
 out_replay:
 	guc_reset_state(ce, head, stalled);
 	__unwind_incomplete_requests(ce);
+out_put:
 	intel_context_put(ce);
 }
 
@@ -1605,6 +1624,13 @@  static void guc_context_cancel_request(struct intel_context *ce,
 			guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
 					true);
 		}
+
+		/*
+		 * XXX: Racey if context is reset, see comment in
+		 * __guc_reset_context().
+		 */
+		flush_work(&ce_to_guc(ce)->ct.requests.worker);
+
 		guc_context_unblock(ce);
 	}
 }
@@ -2719,7 +2745,12 @@  static void guc_handle_context_reset(struct intel_guc *guc,
 {
 	trace_intel_context_reset(ce);
 
-	if (likely(!intel_context_is_banned(ce))) {
+	/*
+	 * XXX: Racey if request cancellation has occurred, see comment in
+	 * __guc_reset_context().
+	 */
+	if (likely(!intel_context_is_banned(ce) &&
+		   !context_blocked(ce))) {
 		capture_error_state(guc, ce);
 		guc_context_replay(ce);
 	}