diff mbox series

drm/i915: fix SFC reset flow

Message ID 20190916214104.27881-1-daniele.ceraolospurio@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: fix SFC reset flow | expand

Commit Message

Daniele Ceraolo Spurio Sept. 16, 2019, 9:41 p.m. UTC
Our assumption that the we can ask the HW to lock the SFC even if not
currently in use does not match the HW commitment. The expectation from
the HW is that SW will not try to lock the SFC if the engine is not
using it and if we do that the behavior is undefined; on ICL the HW
ends up to returning the ack and ignoring our lock request, but this is
not guaranteed and we shouldn't expect it going forward.

Reported-by: Owen Zhang <owen.zhang@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_reset.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

Comments

Chris Wilson Sept. 17, 2019, 7:57 a.m. UTC | #1
Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
> Our assumption that the we can ask the HW to lock the SFC even if not
> currently in use does not match the HW commitment. The expectation from
> the HW is that SW will not try to lock the SFC if the engine is not
> using it and if we do that the behavior is undefined; on ICL the HW
> ends up to returning the ack and ignoring our lock request, but this is
> not guaranteed and we shouldn't expect it going forward.
> 
> Reported-by: Owen Zhang <owen.zhang@intel.com>
> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> ---
> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
>                                          sfc_forced_lock_ack_bit,
>                                          sfc_forced_lock_ack_bit,
>                                          1000, 0, NULL)) {
> -               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
> +               /* did we race the unlock? */
> +               if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
> +                       DRM_ERROR("Wait for SFC forced lock ack failed\n");

What's our plan if this *ERROR* is ever triggered?

If it remains do nothing and check the logs on death, then it remains
just a debug splat. If there is a plan to actually do something to
handle the error, do it!
-Chris
Tvrtko Ursulin Sept. 17, 2019, 10:22 a.m. UTC | #2
On 16/09/2019 22:41, Daniele Ceraolo Spurio wrote:
> Our assumption that the we can ask the HW to lock the SFC even if not
> currently in use does not match the HW commitment. The expectation from
> the HW is that SW will not try to lock the SFC if the engine is not
> using it and if we do that the behavior is undefined; on ICL the HW
> ends up to returning the ack and ignoring our lock request, but this is
> not guaranteed and we shouldn't expect it going forward.
> 
> Reported-by: Owen Zhang <owen.zhang@intel.com>
> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_reset.c | 25 +++++++++++++++++--------
>   1 file changed, 17 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
> index 8327220ac558..900958804bd5 100644
> --- a/drivers/gpu/drm/i915/gt/intel_reset.c
> +++ b/drivers/gpu/drm/i915/gt/intel_reset.c
> @@ -352,13 +352,15 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
>   	}
>   
>   	/*
> -	 * Tell the engine that a software reset is going to happen. The engine
> -	 * will then try to force lock the SFC (if currently locked, it will
> -	 * remain so until we tell the engine it is safe to unlock; if currently
> -	 * unlocked, it will ignore this and all new lock requests). If SFC
> -	 * ends up being locked to the engine we want to reset, we have to reset
> -	 * it as well (we will unlock it once the reset sequence is completed).
> +	 * If the engine is using a SFC, tell the engine that a software reset
> +	 * is going to happen. The engine will then try to force lock the SFC.
> +	 * If SFC ends up being locked to the engine we want to reset, we have
> +	 * to reset it as well (we will unlock it once the reset sequence is
> +	 * completed).
>   	 */
> +	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
> +		return 0;
> +
>   	rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
>   
>   	if (__intel_wait_for_register_fw(uncore,
> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
>   					 sfc_forced_lock_ack_bit,
>   					 sfc_forced_lock_ack_bit,
>   					 1000, 0, NULL)) {
> -		DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
> +		/* did we race the unlock? */

How do we race here? Are we not in complete control of the engine at 
this point so the status of this engine using SFC or not should be 
static, no?

> +		if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
> +			DRM_ERROR("Wait for SFC forced lock ack failed\n");
>   		return 0;
>   	}
>   
> +	/* The HW could return the ack even if the sfc is not in use */

But the function checked whether SFC wasn't in use and bailed out early 
- so is this comment relevant? (I understand it is true against the 
specs just wondering about our exact code.)

>   	if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>   		return sfc_reset_bit;
>   
> @@ -382,6 +387,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
>   	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
>   	i915_reg_t sfc_forced_lock;
>   	u32 sfc_forced_lock_bit;
> +	u32 lock;
>   
>   	switch (engine->class) {
>   	case VIDEO_DECODE_CLASS:
> @@ -401,7 +407,10 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
>   		return;
>   	}
>   
> -	rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
> +	lock = intel_uncore_read_fw(uncore, sfc_forced_lock);
> +	if (lock & sfc_forced_lock_bit)
> +		intel_uncore_write_fw(uncore, sfc_forced_lock,
> +				      lock & ~sfc_forced_lock_bit);

Here we can't rely on the return code from gen11_lock_sfc and have to 
read the register ourselves? I guess it depends on my question about the 
race comment.

In addition to this I now see that gen11_reset_engines does not use the 
return value from gen11_lock_sfc when deciding which engines it needs to 
unlock. Should we change that as well?


>   }
>   
>   static int gen11_reset_engines(struct intel_gt *gt,
> 

Regards,

Tvrtko
Chris Wilson Sept. 17, 2019, 4:06 p.m. UTC | #3
Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
> @@ -401,7 +407,10 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
>                 return;
>         }
>  
> -       rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
> +       lock = intel_uncore_read_fw(uncore, sfc_forced_lock);
> +       if (lock & sfc_forced_lock_bit)
> +               intel_uncore_write_fw(uncore, sfc_forced_lock,
> +                                     lock & ~sfc_forced_lock_bit);

This is handled by rmw_clear_fw() itself now,
80fa64d62067 ("drm/i915: Only apply a rmw mmio update if the value changes")
-Chris
Daniele Ceraolo Spurio Sept. 17, 2019, 6:29 p.m. UTC | #4
On 9/17/2019 3:22 AM, Tvrtko Ursulin wrote:
>
> On 16/09/2019 22:41, Daniele Ceraolo Spurio wrote:
>> Our assumption that the we can ask the HW to lock the SFC even if not
>> currently in use does not match the HW commitment. The expectation from
>> the HW is that SW will not try to lock the SFC if the engine is not
>> using it and if we do that the behavior is undefined; on ICL the HW
>> ends up to returning the ack and ignoring our lock request, but this is
>> not guaranteed and we shouldn't expect it going forward.
>>
>> Reported-by: Owen Zhang <owen.zhang@intel.com>
>> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>> ---
>>   drivers/gpu/drm/i915/gt/intel_reset.c | 25 +++++++++++++++++--------
>>   1 file changed, 17 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c 
>> b/drivers/gpu/drm/i915/gt/intel_reset.c
>> index 8327220ac558..900958804bd5 100644
>> --- a/drivers/gpu/drm/i915/gt/intel_reset.c
>> +++ b/drivers/gpu/drm/i915/gt/intel_reset.c
>> @@ -352,13 +352,15 @@ static u32 gen11_lock_sfc(struct 
>> intel_engine_cs *engine)
>>       }
>>         /*
>> -     * Tell the engine that a software reset is going to happen. The 
>> engine
>> -     * will then try to force lock the SFC (if currently locked, it 
>> will
>> -     * remain so until we tell the engine it is safe to unlock; if 
>> currently
>> -     * unlocked, it will ignore this and all new lock requests). If SFC
>> -     * ends up being locked to the engine we want to reset, we have 
>> to reset
>> -     * it as well (we will unlock it once the reset sequence is 
>> completed).
>> +     * If the engine is using a SFC, tell the engine that a software 
>> reset
>> +     * is going to happen. The engine will then try to force lock 
>> the SFC.
>> +     * If SFC ends up being locked to the engine we want to reset, 
>> we have
>> +     * to reset it as well (we will unlock it once the reset 
>> sequence is
>> +     * completed).
>>        */
>> +    if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
>> +        return 0;
>> +
>>       rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
>>         if (__intel_wait_for_register_fw(uncore,
>> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct 
>> intel_engine_cs *engine)
>>                        sfc_forced_lock_ack_bit,
>>                        sfc_forced_lock_ack_bit,
>>                        1000, 0, NULL)) {
>> -        DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
>> +        /* did we race the unlock? */
>
> How do we race here? Are we not in complete control of the engine at 
> this point so the status of this engine using SFC or not should be 
> static, no?

The hang detection might be due to a long non-preemptable batch, in 
which case there is in theory a chance for the batch to release the SFC 
while we try to lock it. The chance is incredibly small though, so am I 
being too paranoid?

>
>> +        if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>> +            DRM_ERROR("Wait for SFC forced lock ack failed\n");
>>           return 0;
>>       }
>>   +    /* The HW could return the ack even if the sfc is not in use */
>
> But the function checked whether SFC wasn't in use and bailed out 
> early - so is this comment relevant? (I understand it is true against 
> the specs just wondering about our exact code.)
>

Same rationale as the above, if the engine relased the SFC while we were 
locking it, the locking might have been rejected, but on ICL we still 
get the ack.

>>       if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>>           return sfc_reset_bit;
>>   @@ -382,6 +387,7 @@ static void gen11_unlock_sfc(struct 
>> intel_engine_cs *engine)
>>       u8 vdbox_sfc_access = 
>> RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
>>       i915_reg_t sfc_forced_lock;
>>       u32 sfc_forced_lock_bit;
>> +    u32 lock;
>>         switch (engine->class) {
>>       case VIDEO_DECODE_CLASS:
>> @@ -401,7 +407,10 @@ static void gen11_unlock_sfc(struct 
>> intel_engine_cs *engine)
>>           return;
>>       }
>>   -    rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
>> +    lock = intel_uncore_read_fw(uncore, sfc_forced_lock);
>> +    if (lock & sfc_forced_lock_bit)
>> +        intel_uncore_write_fw(uncore, sfc_forced_lock,
>> +                      lock & ~sfc_forced_lock_bit);
>
> Here we can't rely on the return code from gen11_lock_sfc and have to 
> read the register ourselves? I guess it depends on my question about 
> the race comment.
>
> In addition to this I now see that gen11_reset_engines does not use 
> the return value from gen11_lock_sfc when deciding which engines it 
> needs to unlock. Should we change that as well?

Paranoia here as well, in case something went wrong with the locking I'd 
like to be sure the unlocking can still be performed independently so we 
can recover. e.g. the locking might have succeeded after we hit the 
timeout in gen11_lock_sfc , in which case the return from that function 
won't reflect the status of the HW.

Thanks,
Daniele

>
>
>>   }
>>     static int gen11_reset_engines(struct intel_gt *gt,
>>
>
> Regards,
>
> Tvrtko
Daniele Ceraolo Spurio Sept. 17, 2019, 6:36 p.m. UTC | #5
On 9/17/2019 12:57 AM, Chris Wilson wrote:
> Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
>> Our assumption that the we can ask the HW to lock the SFC even if not
>> currently in use does not match the HW commitment. The expectation from
>> the HW is that SW will not try to lock the SFC if the engine is not
>> using it and if we do that the behavior is undefined; on ICL the HW
>> ends up to returning the ack and ignoring our lock request, but this is
>> not guaranteed and we shouldn't expect it going forward.
>>
>> Reported-by: Owen Zhang <owen.zhang@intel.com>
>> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>> ---
>> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
>>                                           sfc_forced_lock_ack_bit,
>>                                           sfc_forced_lock_ack_bit,
>>                                           1000, 0, NULL)) {
>> -               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
>> +               /* did we race the unlock? */
>> +               if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>> +                       DRM_ERROR("Wait for SFC forced lock ack failed\n");
> What's our plan if this *ERROR* is ever triggered?
>
> If it remains do nothing and check the logs on death, then it remains
> just a debug splat. If there is a plan to actually do something to
> handle the error, do it!
> -Chris

AFAIU the only thing we can do is escalate to full gpu reset. However, 
the probability of this failing should be next to non-existent (only one 
engine can use the SFC at any time so there is no lock contention), so 
I'm not convinced the fallback is worth the effort. The error is still 
useful IMO to catch unexpected behavior on new platforms, as it happened 
in this case with the media team reporting seeing this message on gen12 
with the previous behavior. This said, I'm happy to add the extra logic 
if you believe it is worth it.

Daniele
Chris Wilson Sept. 17, 2019, 6:57 p.m. UTC | #6
Quoting Daniele Ceraolo Spurio (2019-09-17 19:36:35)
> 
> 
> On 9/17/2019 12:57 AM, Chris Wilson wrote:
> > Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
> >> Our assumption that the we can ask the HW to lock the SFC even if not
> >> currently in use does not match the HW commitment. The expectation from
> >> the HW is that SW will not try to lock the SFC if the engine is not
> >> using it and if we do that the behavior is undefined; on ICL the HW
> >> ends up to returning the ack and ignoring our lock request, but this is
> >> not guaranteed and we shouldn't expect it going forward.
> >>
> >> Reported-by: Owen Zhang <owen.zhang@intel.com>
> >> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> >> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> >> ---
> >> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
> >>                                           sfc_forced_lock_ack_bit,
> >>                                           sfc_forced_lock_ack_bit,
> >>                                           1000, 0, NULL)) {
> >> -               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
> >> +               /* did we race the unlock? */
> >> +               if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
> >> +                       DRM_ERROR("Wait for SFC forced lock ack failed\n");
> > What's our plan if this *ERROR* is ever triggered?
> >
> > If it remains do nothing and check the logs on death, then it remains
> > just a debug splat. If there is a plan to actually do something to
> > handle the error, do it!
> > -Chris
> 
> AFAIU the only thing we can do is escalate to full gpu reset. However, 
> the probability of this failing should be next to non-existent (only one 
> engine can use the SFC at any time so there is no lock contention), so 
> I'm not convinced the fallback is worth the effort. The error is still 
> useful IMO to catch unexpected behavior on new platforms, as it happened 
> in this case with the media team reporting seeing this message on gen12 
> with the previous behavior. This said, I'm happy to add the extra logic 
> if you believe it is worth it.

We've see this message on every icl run!
-Chris
Daniele Ceraolo Spurio Sept. 17, 2019, 7:45 p.m. UTC | #7
On 9/17/2019 11:57 AM, Chris Wilson wrote:
> Quoting Daniele Ceraolo Spurio (2019-09-17 19:36:35)
>>
>> On 9/17/2019 12:57 AM, Chris Wilson wrote:
>>> Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
>>>> Our assumption that the we can ask the HW to lock the SFC even if not
>>>> currently in use does not match the HW commitment. The expectation from
>>>> the HW is that SW will not try to lock the SFC if the engine is not
>>>> using it and if we do that the behavior is undefined; on ICL the HW
>>>> ends up to returning the ack and ignoring our lock request, but this is
>>>> not guaranteed and we shouldn't expect it going forward.
>>>>
>>>> Reported-by: Owen Zhang <owen.zhang@intel.com>
>>>> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
>>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>>> ---
>>>> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
>>>>                                            sfc_forced_lock_ack_bit,
>>>>                                            sfc_forced_lock_ack_bit,
>>>>                                            1000, 0, NULL)) {
>>>> -               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
>>>> +               /* did we race the unlock? */
>>>> +               if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>>>> +                       DRM_ERROR("Wait for SFC forced lock ack failed\n");
>>> What's our plan if this *ERROR* is ever triggered?
>>>
>>> If it remains do nothing and check the logs on death, then it remains
>>> just a debug splat. If there is a plan to actually do something to
>>> handle the error, do it!
>>> -Chris
>> AFAIU the only thing we can do is escalate to full gpu reset. However,
>> the probability of this failing should be next to non-existent (only one
>> engine can use the SFC at any time so there is no lock contention), so
>> I'm not convinced the fallback is worth the effort. The error is still
>> useful IMO to catch unexpected behavior on new platforms, as it happened
>> in this case with the media team reporting seeing this message on gen12
>> with the previous behavior. This said, I'm happy to add the extra logic
>> if you believe it is worth it.
> We've see this message on every icl run!
> -Chris

I've never noticed it, which tests are hitting it? My understanding from 
what the HW team said is that on ICL the ack will always come back (even 
if it is not part of the "official" SW/HW interface) and the HW tweak 
that stops that is a gen12 change. Something else might be wrong is this 
is firing off in our ICL CI, also because I don't think we have any test 
case that actually uses the SFC, do we?

Daniele
Chris Wilson Sept. 17, 2019, 7:49 p.m. UTC | #8
Quoting Daniele Ceraolo Spurio (2019-09-17 20:45:02)
> 
> 
> On 9/17/2019 11:57 AM, Chris Wilson wrote:
> > Quoting Daniele Ceraolo Spurio (2019-09-17 19:36:35)
> >>
> >> On 9/17/2019 12:57 AM, Chris Wilson wrote:
> >>> Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
> >>>> Our assumption that the we can ask the HW to lock the SFC even if not
> >>>> currently in use does not match the HW commitment. The expectation from
> >>>> the HW is that SW will not try to lock the SFC if the engine is not
> >>>> using it and if we do that the behavior is undefined; on ICL the HW
> >>>> ends up to returning the ack and ignoring our lock request, but this is
> >>>> not guaranteed and we shouldn't expect it going forward.
> >>>>
> >>>> Reported-by: Owen Zhang <owen.zhang@intel.com>
> >>>> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> >>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> >>>> ---
> >>>> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
> >>>>                                            sfc_forced_lock_ack_bit,
> >>>>                                            sfc_forced_lock_ack_bit,
> >>>>                                            1000, 0, NULL)) {
> >>>> -               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
> >>>> +               /* did we race the unlock? */
> >>>> +               if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
> >>>> +                       DRM_ERROR("Wait for SFC forced lock ack failed\n");
> >>> What's our plan if this *ERROR* is ever triggered?
> >>>
> >>> If it remains do nothing and check the logs on death, then it remains
> >>> just a debug splat. If there is a plan to actually do something to
> >>> handle the error, do it!
> >>> -Chris
> >> AFAIU the only thing we can do is escalate to full gpu reset. However,
> >> the probability of this failing should be next to non-existent (only one
> >> engine can use the SFC at any time so there is no lock contention), so
> >> I'm not convinced the fallback is worth the effort. The error is still
> >> useful IMO to catch unexpected behavior on new platforms, as it happened
> >> in this case with the media team reporting seeing this message on gen12
> >> with the previous behavior. This said, I'm happy to add the extra logic
> >> if you believe it is worth it.
> > We've see this message on every icl run!
> > -Chris
> 
> I've never noticed it, which tests are hitting it? My understanding from 
> what the HW team said is that on ICL the ack will always come back (even 
> if it is not part of the "official" SW/HW interface) and the HW tweak 
> that stops that is a gen12 change. Something else might be wrong is this 
> is firing off in our ICL CI, also because I don't think we have any test 
> case that actually uses the SFC, do we?

https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6911/fi-icl-u2/igt@i915_selftest@live_hangcheck.html

All icl, live_hangcheck or live_reset, for as long as I can remember.
-Chris
Daniele Ceraolo Spurio Sept. 17, 2019, 8:53 p.m. UTC | #9
On 9/17/2019 12:49 PM, Chris Wilson wrote:
> Quoting Daniele Ceraolo Spurio (2019-09-17 20:45:02)
>>
>> On 9/17/2019 11:57 AM, Chris Wilson wrote:
>>> Quoting Daniele Ceraolo Spurio (2019-09-17 19:36:35)
>>>> On 9/17/2019 12:57 AM, Chris Wilson wrote:
>>>>> Quoting Daniele Ceraolo Spurio (2019-09-16 22:41:04)
>>>>>> Our assumption that the we can ask the HW to lock the SFC even if not
>>>>>> currently in use does not match the HW commitment. The expectation from
>>>>>> the HW is that SW will not try to lock the SFC if the engine is not
>>>>>> using it and if we do that the behavior is undefined; on ICL the HW
>>>>>> ends up to returning the ack and ignoring our lock request, but this is
>>>>>> not guaranteed and we shouldn't expect it going forward.
>>>>>>
>>>>>> Reported-by: Owen Zhang <owen.zhang@intel.com>
>>>>>> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
>>>>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>>>>> ---
>>>>>> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
>>>>>>                                             sfc_forced_lock_ack_bit,
>>>>>>                                             sfc_forced_lock_ack_bit,
>>>>>>                                             1000, 0, NULL)) {
>>>>>> -               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
>>>>>> +               /* did we race the unlock? */
>>>>>> +               if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>>>>>> +                       DRM_ERROR("Wait for SFC forced lock ack failed\n");
>>>>> What's our plan if this *ERROR* is ever triggered?
>>>>>
>>>>> If it remains do nothing and check the logs on death, then it remains
>>>>> just a debug splat. If there is a plan to actually do something to
>>>>> handle the error, do it!
>>>>> -Chris
>>>> AFAIU the only thing we can do is escalate to full gpu reset. However,
>>>> the probability of this failing should be next to non-existent (only one
>>>> engine can use the SFC at any time so there is no lock contention), so
>>>> I'm not convinced the fallback is worth the effort. The error is still
>>>> useful IMO to catch unexpected behavior on new platforms, as it happened
>>>> in this case with the media team reporting seeing this message on gen12
>>>> with the previous behavior. This said, I'm happy to add the extra logic
>>>> if you believe it is worth it.
>>> We've see this message on every icl run!
>>> -Chris
>> I've never noticed it, which tests are hitting it? My understanding from
>> what the HW team said is that on ICL the ack will always come back (even
>> if it is not part of the "official" SW/HW interface) and the HW tweak
>> that stops that is a gen12 change. Something else might be wrong is this
>> is firing off in our ICL CI, also because I don't think we have any test
>> case that actually uses the SFC, do we?
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6911/fi-icl-u2/igt@i915_selftest@live_hangcheck.html
>
> All icl, live_hangcheck or live_reset, for as long as I can remember.
> -Chris

Thanks. I'm going to check with the HW team to see what their 
recommended timeout value is, in case ours is too short. It could also 
be that even on ICL the ack is not always returned if the SFC is not 
actually in use.

Daniele
Tvrtko Ursulin Sept. 18, 2019, 1:42 p.m. UTC | #10
On 17/09/2019 19:29, Daniele Ceraolo Spurio wrote:
> 
> 
> On 9/17/2019 3:22 AM, Tvrtko Ursulin wrote:
>>
>> On 16/09/2019 22:41, Daniele Ceraolo Spurio wrote:
>>> Our assumption that the we can ask the HW to lock the SFC even if not
>>> currently in use does not match the HW commitment. The expectation from
>>> the HW is that SW will not try to lock the SFC if the engine is not
>>> using it and if we do that the behavior is undefined; on ICL the HW
>>> ends up to returning the ack and ignoring our lock request, but this is
>>> not guaranteed and we shouldn't expect it going forward.
>>>
>>> Reported-by: Owen Zhang <owen.zhang@intel.com>
>>> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>> ---
>>>   drivers/gpu/drm/i915/gt/intel_reset.c | 25 +++++++++++++++++--------
>>>   1 file changed, 17 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c 
>>> b/drivers/gpu/drm/i915/gt/intel_reset.c
>>> index 8327220ac558..900958804bd5 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_reset.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_reset.c
>>> @@ -352,13 +352,15 @@ static u32 gen11_lock_sfc(struct 
>>> intel_engine_cs *engine)
>>>       }
>>>         /*
>>> -     * Tell the engine that a software reset is going to happen. The 
>>> engine
>>> -     * will then try to force lock the SFC (if currently locked, it 
>>> will
>>> -     * remain so until we tell the engine it is safe to unlock; if 
>>> currently
>>> -     * unlocked, it will ignore this and all new lock requests). If SFC
>>> -     * ends up being locked to the engine we want to reset, we have 
>>> to reset
>>> -     * it as well (we will unlock it once the reset sequence is 
>>> completed).
>>> +     * If the engine is using a SFC, tell the engine that a software 
>>> reset
>>> +     * is going to happen. The engine will then try to force lock 
>>> the SFC.
>>> +     * If SFC ends up being locked to the engine we want to reset, 
>>> we have
>>> +     * to reset it as well (we will unlock it once the reset 
>>> sequence is
>>> +     * completed).
>>>        */
>>> +    if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
>>> +        return 0;
>>> +
>>>       rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
>>>         if (__intel_wait_for_register_fw(uncore,
>>> @@ -366,10 +368,13 @@ static u32 gen11_lock_sfc(struct 
>>> intel_engine_cs *engine)
>>>                        sfc_forced_lock_ack_bit,
>>>                        sfc_forced_lock_ack_bit,
>>>                        1000, 0, NULL)) {
>>> -        DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
>>> +        /* did we race the unlock? */
>>
>> How do we race here? Are we not in complete control of the engine at 
>> this point so the status of this engine using SFC or not should be 
>> static, no?
> 
> The hang detection might be due to a long non-preemptable batch, in 
> which case there is in theory a chance for the batch to release the SFC 
> while we try to lock it. The chance is incredibly small though, so am I 
> being too paranoid?

I get it now, it is a legitimate race.

> 
>>
>>> +        if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>>> +            DRM_ERROR("Wait for SFC forced lock ack failed\n");
>>>           return 0;
>>>       }
>>>   +    /* The HW could return the ack even if the sfc is not in use */
>>
>> But the function checked whether SFC wasn't in use and bailed out 
>> early - so is this comment relevant? (I understand it is true against 
>> the specs just wondering about our exact code.)
>>
> 
> Same rationale as the above, if the engine relased the SFC while we were 
> locking it, the locking might have been rejected, but on ICL we still 
> get the ack.
> 
>>>       if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
>>>           return sfc_reset_bit;
>>>   @@ -382,6 +387,7 @@ static void gen11_unlock_sfc(struct 
>>> intel_engine_cs *engine)
>>>       u8 vdbox_sfc_access = 
>>> RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
>>>       i915_reg_t sfc_forced_lock;
>>>       u32 sfc_forced_lock_bit;
>>> +    u32 lock;
>>>         switch (engine->class) {
>>>       case VIDEO_DECODE_CLASS:
>>> @@ -401,7 +407,10 @@ static void gen11_unlock_sfc(struct 
>>> intel_engine_cs *engine)
>>>           return;
>>>       }
>>>   -    rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
>>> +    lock = intel_uncore_read_fw(uncore, sfc_forced_lock);
>>> +    if (lock & sfc_forced_lock_bit)
>>> +        intel_uncore_write_fw(uncore, sfc_forced_lock,
>>> +                      lock & ~sfc_forced_lock_bit);
>>
>> Here we can't rely on the return code from gen11_lock_sfc and have to 
>> read the register ourselves? I guess it depends on my question about 
>> the race comment.
>>
>> In addition to this I now see that gen11_reset_engines does not use 
>> the return value from gen11_lock_sfc when deciding which engines it 
>> needs to unlock. Should we change that as well?
> 
> Paranoia here as well, in case something went wrong with the locking I'd 
> like to be sure the unlocking can still be performed independently so we 
> can recover. e.g. the locking might have succeeded after we hit the 
> timeout in gen11_lock_sfc , in which case the return from that function 
> won't reflect the status of the HW.

Put in a comment here explaining what's the story and with that:

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Another option is to cross-check software vs hardware locked status at 
this point and to log a mismatch. Just so we get data how often this 
happens in practice. This is probably best as follow up.

Regards,

Tvrtko

> 
> Thanks,
> Daniele
> 
>>
>>
>>>   }
>>>     static int gen11_reset_engines(struct intel_gt *gt,
>>>
>>
>> Regards,
>>
>> Tvrtko
> 
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8327220ac558..900958804bd5 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -352,13 +352,15 @@  static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
 	}
 
 	/*
-	 * Tell the engine that a software reset is going to happen. The engine
-	 * will then try to force lock the SFC (if currently locked, it will
-	 * remain so until we tell the engine it is safe to unlock; if currently
-	 * unlocked, it will ignore this and all new lock requests). If SFC
-	 * ends up being locked to the engine we want to reset, we have to reset
-	 * it as well (we will unlock it once the reset sequence is completed).
+	 * If the engine is using a SFC, tell the engine that a software reset
+	 * is going to happen. The engine will then try to force lock the SFC.
+	 * If SFC ends up being locked to the engine we want to reset, we have
+	 * to reset it as well (we will unlock it once the reset sequence is
+	 * completed).
 	 */
+	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+		return 0;
+
 	rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
 
 	if (__intel_wait_for_register_fw(uncore,
@@ -366,10 +368,13 @@  static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
 					 sfc_forced_lock_ack_bit,
 					 sfc_forced_lock_ack_bit,
 					 1000, 0, NULL)) {
-		DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+		/* did we race the unlock? */
+		if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
+			DRM_ERROR("Wait for SFC forced lock ack failed\n");
 		return 0;
 	}
 
+	/* The HW could return the ack even if the sfc is not in use */
 	if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
 		return sfc_reset_bit;
 
@@ -382,6 +387,7 @@  static void gen11_unlock_sfc(struct intel_engine_cs *engine)
 	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
 	i915_reg_t sfc_forced_lock;
 	u32 sfc_forced_lock_bit;
+	u32 lock;
 
 	switch (engine->class) {
 	case VIDEO_DECODE_CLASS:
@@ -401,7 +407,10 @@  static void gen11_unlock_sfc(struct intel_engine_cs *engine)
 		return;
 	}
 
-	rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
+	lock = intel_uncore_read_fw(uncore, sfc_forced_lock);
+	if (lock & sfc_forced_lock_bit)
+		intel_uncore_write_fw(uncore, sfc_forced_lock,
+				      lock & ~sfc_forced_lock_bit);
 }
 
 static int gen11_reset_engines(struct intel_gt *gt,