diff mbox series

drm/i915: Disable atomics in L3 for gen9

Message ID 20190720143132.17522-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915: Disable atomics in L3 for gen9 | expand

Commit Message

Chris Wilson July 20, 2019, 2:31 p.m. UTC
Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
the machine stops responding milliseconds after receipt of the reset
request [GDRT]. By disabling the cached atomics, the hang do not occur
and we presume the GPU would reset normally for similar hangs.

Reported-by: Jason Ekstrand <jason@jlekstrand.net>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
Jason reports that Windows is not clearing L3SQCREG4:22 and does not
suffer the same GPU hang so it is likely some other w/a that interacts
badly. Fwiw, these 3 are the only registers I could find that mention
atomic ops (and appear to be part of the same chain for memory access).
---
 drivers/gpu/drm/i915/gt/intel_workarounds.c | 8 ++++++++
 drivers/gpu/drm/i915/i915_reg.h             | 7 +++++++
 2 files changed, 15 insertions(+)

Comments

Tvrtko Ursulin July 22, 2019, 11:41 a.m. UTC | #1
On 20/07/2019 15:31, Chris Wilson wrote:
> Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
> the machine stops responding milliseconds after receipt of the reset
> request [GDRT]. By disabling the cached atomics, the hang do not occur
> and we presume the GPU would reset normally for similar hangs.
> 
> Reported-by: Jason Ekstrand <jason@jlekstrand.net>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Jason Ekstrand <jason@jlekstrand.net>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> ---
> Jason reports that Windows is not clearing L3SQCREG4:22 and does not
> suffer the same GPU hang so it is likely some other w/a that interacts
> badly. Fwiw, these 3 are the only registers I could find that mention
> atomic ops (and appear to be part of the same chain for memory access).

Bit-toggling itself looks fine to me and matches what I could find in 
the docs. (All three bits across three registers should be equal.)

What I am curious about is what are the other consequences of disabling 
L3 atomics? Performance drop somewhere?

Regards,

Tvrtko


> ---
>   drivers/gpu/drm/i915/gt/intel_workarounds.c | 8 ++++++++
>   drivers/gpu/drm/i915/i915_reg.h             | 7 +++++++
>   2 files changed, 15 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
> index 704ace01e7f5..ac94ed3ba7b6 100644
> --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
> @@ -1349,6 +1349,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
>   		wa_write_or(wal,
>   			    GEN8_L3SQCREG4,
>   			    GEN8_LQSC_FLUSH_COHERENT_LINES);
> +
> +		/* Disable atomics in L3 to prevent unrecoverable hangs */
> +		wa_write_masked_or(wal, GEN9_SCRATCH_LNCF1,
> +				   GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
> +		wa_write_masked_or(wal, GEN8_L3SQCREG4,
> +				   GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
> +		wa_write_masked_or(wal, GEN9_SCRATCH1,
> +				   EVICTION_PERF_FIX_ENABLE, 0);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 24f2a52a2b42..e23b2200e7fc 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -7728,6 +7728,7 @@ enum {
>   #define  GEN11_LQSC_CLEAN_EVICT_DISABLE		(1 << 6)
>   #define  GEN8_LQSC_RO_PERF_DIS			(1 << 27)
>   #define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1 << 21)
> +#define  GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
>   
>   /* GEN8 chicken */
>   #define HDC_CHICKEN0				_MMIO(0x7300)
> @@ -11202,6 +11203,12 @@ enum skl_power_gate {
>   /* Media decoder 2 MOCS registers */
>   #define GEN11_MFX2_MOCS(i)	_MMIO(0x10000 + (i) * 4)
>   
> +#define GEN9_SCRATCH_LNCF1		_MMIO(0xb008)
> +#define   GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
> +
> +#define GEN9_SCRATCH1			_MMIO(0xb11c)
> +#define   EVICTION_PERF_FIX_ENABLE	REG_BIT(8)
> +
>   #define GEN10_SCRATCH_LNCF2		_MMIO(0xb0a0)
>   #define   PMFLUSHDONE_LNICRSDROP	(1 << 20)
>   #define   PMFLUSH_GAPL3UNBLOCK		(1 << 21)
>
Chris Wilson July 23, 2019, 11:55 a.m. UTC | #2
Quoting Tvrtko Ursulin (2019-07-22 12:41:36)
> 
> On 20/07/2019 15:31, Chris Wilson wrote:
> > Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
> > the machine stops responding milliseconds after receipt of the reset
> > request [GDRT]. By disabling the cached atomics, the hang do not occur
> > and we presume the GPU would reset normally for similar hangs.
> > 
> > Reported-by: Jason Ekstrand <jason@jlekstrand.net>
> > Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Jason Ekstrand <jason@jlekstrand.net>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> > ---
> > Jason reports that Windows is not clearing L3SQCREG4:22 and does not
> > suffer the same GPU hang so it is likely some other w/a that interacts
> > badly. Fwiw, these 3 are the only registers I could find that mention
> > atomic ops (and appear to be part of the same chain for memory access).
> 
> Bit-toggling itself looks fine to me and matches what I could find in 
> the docs. (All three bits across three registers should be equal.)
> 
> What I am curious about is what are the other consequences of disabling 
> L3 atomics? Performance drop somewhere?

The test I have where it goes from dead to passing, that's a considerable
performance improvement ;)

I imagine not being able to use L3 for atomics is pretty dire, whether that
has any impact, I have no clue.

It is still very likely that we see this because we are doing something
wrong elsewhere.
-Chris
Francisco Jerez July 23, 2019, 10:19 p.m. UTC | #3
Chris Wilson <chris@chris-wilson.co.uk> writes:

> Quoting Tvrtko Ursulin (2019-07-22 12:41:36)
>> 
>> On 20/07/2019 15:31, Chris Wilson wrote:
>> > Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
>> > the machine stops responding milliseconds after receipt of the reset
>> > request [GDRT]. By disabling the cached atomics, the hang do not occur
>> > and we presume the GPU would reset normally for similar hangs.
>> > 
>> > Reported-by: Jason Ekstrand <jason@jlekstrand.net>
>> > Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
>> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> > Cc: Jason Ekstrand <jason@jlekstrand.net>
>> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>> > ---
>> > Jason reports that Windows is not clearing L3SQCREG4:22 and does not
>> > suffer the same GPU hang so it is likely some other w/a that interacts
>> > badly. Fwiw, these 3 are the only registers I could find that mention
>> > atomic ops (and appear to be part of the same chain for memory access).
>> 
>> Bit-toggling itself looks fine to me and matches what I could find in 
>> the docs. (All three bits across three registers should be equal.)
>> 
>> What I am curious about is what are the other consequences of disabling 
>> L3 atomics? Performance drop somewhere?
>
> The test I have where it goes from dead to passing, that's a considerable
> performance improvement ;)
>
> I imagine not being able to use L3 for atomics is pretty dire, whether that
> has any impact, I have no clue.
>
> It is still very likely that we see this because we are doing something
> wrong elsewhere.

This reminds me of f3fc4884ebe6ae649d3723be14b219230d3b7fd2 followed by
d351f6d94893f3ba98b1b20c5ef44c35fc1da124 due to the massive impact (of
the order of 20x IIRC) using the L3 turned out to have on the
performance of HDC atomics, on at least that platform.  It seems
unfortunate that we're going to lose L3 atomics on Gen9 now, even though
it's only buffer atomics which are broken IIUC, and even though the
Windows driver is somehow getting away without disabling them.  Some of
our setup must be wrong either in the kernel or in userspace...  Are
these registers at least whitelisted so userspace can re-enable L3
atomics once the problem is addressed?  Wouldn't it be a more specific
workaround for userspace to simply use a non-L3-cacheable MOCS for
(rarely used) buffer surfaces, so it could benefit from L3 atomics
elsewhere?


> -Chris
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson July 24, 2019, 2:34 p.m. UTC | #4
Quoting Francisco Jerez (2019-07-23 23:19:13)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > Quoting Tvrtko Ursulin (2019-07-22 12:41:36)
> >> 
> >> On 20/07/2019 15:31, Chris Wilson wrote:
> >> > Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
> >> > the machine stops responding milliseconds after receipt of the reset
> >> > request [GDRT]. By disabling the cached atomics, the hang do not occur
> >> > and we presume the GPU would reset normally for similar hangs.
> >> > 
> >> > Reported-by: Jason Ekstrand <jason@jlekstrand.net>
> >> > Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
> >> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >> > Cc: Jason Ekstrand <jason@jlekstrand.net>
> >> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> >> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> >> > ---
> >> > Jason reports that Windows is not clearing L3SQCREG4:22 and does not
> >> > suffer the same GPU hang so it is likely some other w/a that interacts
> >> > badly. Fwiw, these 3 are the only registers I could find that mention
> >> > atomic ops (and appear to be part of the same chain for memory access).
> >> 
> >> Bit-toggling itself looks fine to me and matches what I could find in 
> >> the docs. (All three bits across three registers should be equal.)
> >> 
> >> What I am curious about is what are the other consequences of disabling 
> >> L3 atomics? Performance drop somewhere?
> >
> > The test I have where it goes from dead to passing, that's a considerable
> > performance improvement ;)
> >
> > I imagine not being able to use L3 for atomics is pretty dire, whether that
> > has any impact, I have no clue.
> >
> > It is still very likely that we see this because we are doing something
> > wrong elsewhere.
> 
> This reminds me of f3fc4884ebe6ae649d3723be14b219230d3b7fd2 followed by
> d351f6d94893f3ba98b1b20c5ef44c35fc1da124 due to the massive impact (of
> the order of 20x IIRC) using the L3 turned out to have on the
> performance of HDC atomics, on at least that platform.  It seems
> unfortunate that we're going to lose L3 atomics on Gen9 now, even though
> it's only buffer atomics which are broken IIUC, and even though the
> Windows driver is somehow getting away without disabling them.  Some of
> our setup must be wrong either in the kernel or in userspace...  Are
> these registers at least whitelisted so userspace can re-enable L3
> atomics once the problem is addressed?  Wouldn't it be a more specific
> workaround for userspace to simply use a non-L3-cacheable MOCS for
> (rarely used) buffer surfaces, so it could benefit from L3 atomics
> elsewhere?

If it was the case that disabling L3 atomics was the only way to prevent
the machine lockup under this scenario, then I think it is
unquestionably the right thing to do, and we could not leave it to
userspace to dtrt. We should never add non-context saved unsafe
registers to the whitelist (if setting a register may cause data
corruption or worse in another context/process, that is bad) despite our
repeated transgressions. However, there's no evidence to say that it does
prevent the machine lockup as it prevents the GPU hang that lead to the
lockup on reset.

Other than GPGPU requiring a flush around every sneeze, I did not see
anything in the gen9 w/a list that seemed like a match. Nevertheless, I
expect there is a more precise w/a than a blanket disable.
-Chris
Francisco Jerez July 24, 2019, 8:02 p.m. UTC | #5
Chris Wilson <chris@chris-wilson.co.uk> writes:

> Quoting Francisco Jerez (2019-07-23 23:19:13)
>> Chris Wilson <chris@chris-wilson.co.uk> writes:
>> 
>> > Quoting Tvrtko Ursulin (2019-07-22 12:41:36)
>> >> 
>> >> On 20/07/2019 15:31, Chris Wilson wrote:
>> >> > Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
>> >> > the machine stops responding milliseconds after receipt of the reset
>> >> > request [GDRT]. By disabling the cached atomics, the hang do not occur
>> >> > and we presume the GPU would reset normally for similar hangs.
>> >> > 
>> >> > Reported-by: Jason Ekstrand <jason@jlekstrand.net>
>> >> > Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
>> >> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> >> > Cc: Jason Ekstrand <jason@jlekstrand.net>
>> >> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>> >> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>> >> > ---
>> >> > Jason reports that Windows is not clearing L3SQCREG4:22 and does not
>> >> > suffer the same GPU hang so it is likely some other w/a that interacts
>> >> > badly. Fwiw, these 3 are the only registers I could find that mention
>> >> > atomic ops (and appear to be part of the same chain for memory access).
>> >> 
>> >> Bit-toggling itself looks fine to me and matches what I could find in 
>> >> the docs. (All three bits across three registers should be equal.)
>> >> 
>> >> What I am curious about is what are the other consequences of disabling 
>> >> L3 atomics? Performance drop somewhere?
>> >
>> > The test I have where it goes from dead to passing, that's a considerable
>> > performance improvement ;)
>> >
>> > I imagine not being able to use L3 for atomics is pretty dire, whether that
>> > has any impact, I have no clue.
>> >
>> > It is still very likely that we see this because we are doing something
>> > wrong elsewhere.
>> 
>> This reminds me of f3fc4884ebe6ae649d3723be14b219230d3b7fd2 followed by
>> d351f6d94893f3ba98b1b20c5ef44c35fc1da124 due to the massive impact (of
>> the order of 20x IIRC) using the L3 turned out to have on the
>> performance of HDC atomics, on at least that platform.  It seems
>> unfortunate that we're going to lose L3 atomics on Gen9 now, even though
>> it's only buffer atomics which are broken IIUC, and even though the
>> Windows driver is somehow getting away without disabling them.  Some of
>> our setup must be wrong either in the kernel or in userspace...  Are
>> these registers at least whitelisted so userspace can re-enable L3
>> atomics once the problem is addressed?  Wouldn't it be a more specific
>> workaround for userspace to simply use a non-L3-cacheable MOCS for
>> (rarely used) buffer surfaces, so it could benefit from L3 atomics
>> elsewhere?
>
> If it was the case that disabling L3 atomics was the only way to prevent
> the machine lockup under this scenario, then I think it is
> unquestionably the right thing to do, and we could not leave it to
> userspace to dtrt. We should never add non-context saved unsafe
> registers to the whitelist (if setting a register may cause data
> corruption or worse in another context/process, that is bad) despite our
> repeated transgressions. However, there's no evidence to say that it does
> prevent the machine lockup as it prevents the GPU hang that lead to the
> lockup on reset.
>
> Other than GPGPU requiring a flush around every sneeze, I did not see
> anything in the gen9 w/a list that seemed like a match. Nevertheless, I
> expect there is a more precise w/a than a blanket disable.
> -Chris

Supposedly there is a more precise one (setting the surface state MOCS
to UC for buffer images), but it relies on userspace doing the right
thing for the machine not to lock up.  There is a good chance that the
reason why L3 atomics hang on such buffers is ultimately under userspace
control, in which case we'll eventually have to undo the programming
done in this patch in order to re-enable L3 atomics once the problem is
addressed.  That means that userspace will have the freedom to hang the
machine hard once again, which sounds really bad, but it's no real news
for us (*cough* HSW *cough*), and it might be the only way to match the
performance of the Windows driver.

What can we do here?  Add an i915 option to enable performance features
that can lead to the system hanging hard under malicious (or
incompetent) userspace programming?  Probably only the user can tell
whether the trade-off between performance and security of the system is
acceptable...
Jason Ekstrand Nov. 9, 2020, 7:52 p.m. UTC | #6
We need to land this patch.  The number of bugs we have piling up in
Mesa gitlab related to this is getting a lot larger than I'd like.
I've gone back and forth with various HW and SW people internally for
countless e-mail threads and there is no other good workaround.  Yes,
the perf hit to atomics sucks but, fortunately, most games don't use
them heavily enough for it to make a significant impact.  We should
just eat the perf hit and fix the hangs.

Reviewed-by: Jason Ekstrand <jason@jlesktrand.net>

--Jason

On Wed, Jul 24, 2019 at 3:02 PM Francisco Jerez <currojerez@riseup.net> wrote:
>
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
> > Quoting Francisco Jerez (2019-07-23 23:19:13)
> >> Chris Wilson <chris@chris-wilson.co.uk> writes:
> >>
> >> > Quoting Tvrtko Ursulin (2019-07-22 12:41:36)
> >> >>
> >> >> On 20/07/2019 15:31, Chris Wilson wrote:
> >> >> > Enabling atomic operations in L3 leads to unrecoverable GPU hangs, as
> >> >> > the machine stops responding milliseconds after receipt of the reset
> >> >> > request [GDRT]. By disabling the cached atomics, the hang do not occur
> >> >> > and we presume the GPU would reset normally for similar hangs.
> >> >> >
> >> >> > Reported-by: Jason Ekstrand <jason@jlekstrand.net>
> >> >> > Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998
> >> >> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >> >> > Cc: Jason Ekstrand <jason@jlekstrand.net>
> >> >> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> >> >> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> >> >> > ---
> >> >> > Jason reports that Windows is not clearing L3SQCREG4:22 and does not
> >> >> > suffer the same GPU hang so it is likely some other w/a that interacts
> >> >> > badly. Fwiw, these 3 are the only registers I could find that mention
> >> >> > atomic ops (and appear to be part of the same chain for memory access).
> >> >>
> >> >> Bit-toggling itself looks fine to me and matches what I could find in
> >> >> the docs. (All three bits across three registers should be equal.)
> >> >>
> >> >> What I am curious about is what are the other consequences of disabling
> >> >> L3 atomics? Performance drop somewhere?
> >> >
> >> > The test I have where it goes from dead to passing, that's a considerable
> >> > performance improvement ;)
> >> >
> >> > I imagine not being able to use L3 for atomics is pretty dire, whether that
> >> > has any impact, I have no clue.
> >> >
> >> > It is still very likely that we see this because we are doing something
> >> > wrong elsewhere.
> >>
> >> This reminds me of f3fc4884ebe6ae649d3723be14b219230d3b7fd2 followed by
> >> d351f6d94893f3ba98b1b20c5ef44c35fc1da124 due to the massive impact (of
> >> the order of 20x IIRC) using the L3 turned out to have on the
> >> performance of HDC atomics, on at least that platform.  It seems
> >> unfortunate that we're going to lose L3 atomics on Gen9 now, even though
> >> it's only buffer atomics which are broken IIUC, and even though the
> >> Windows driver is somehow getting away without disabling them.  Some of
> >> our setup must be wrong either in the kernel or in userspace...  Are
> >> these registers at least whitelisted so userspace can re-enable L3
> >> atomics once the problem is addressed?  Wouldn't it be a more specific
> >> workaround for userspace to simply use a non-L3-cacheable MOCS for
> >> (rarely used) buffer surfaces, so it could benefit from L3 atomics
> >> elsewhere?
> >
> > If it was the case that disabling L3 atomics was the only way to prevent
> > the machine lockup under this scenario, then I think it is
> > unquestionably the right thing to do, and we could not leave it to
> > userspace to dtrt. We should never add non-context saved unsafe
> > registers to the whitelist (if setting a register may cause data
> > corruption or worse in another context/process, that is bad) despite our
> > repeated transgressions. However, there's no evidence to say that it does
> > prevent the machine lockup as it prevents the GPU hang that lead to the
> > lockup on reset.
> >
> > Other than GPGPU requiring a flush around every sneeze, I did not see
> > anything in the gen9 w/a list that seemed like a match. Nevertheless, I
> > expect there is a more precise w/a than a blanket disable.
> > -Chris
>
> Supposedly there is a more precise one (setting the surface state MOCS
> to UC for buffer images), but it relies on userspace doing the right
> thing for the machine not to lock up.  There is a good chance that the
> reason why L3 atomics hang on such buffers is ultimately under userspace
> control, in which case we'll eventually have to undo the programming
> done in this patch in order to re-enable L3 atomics once the problem is
> addressed.  That means that userspace will have the freedom to hang the
> machine hard once again, which sounds really bad, but it's no real news
> for us (*cough* HSW *cough*), and it might be the only way to match the
> performance of the Windows driver.
>
> What can we do here?  Add an i915 option to enable performance features
> that can lead to the system hanging hard under malicious (or
> incompetent) userspace programming?  Probably only the user can tell
> whether the trade-off between performance and security of the system is
> acceptable...
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson Nov. 9, 2020, 8:15 p.m. UTC | #7
Quoting Jason Ekstrand (2020-11-09 19:52:26)
> We need to land this patch.  The number of bugs we have piling up in
> Mesa gitlab related to this is getting a lot larger than I'd like.
> I've gone back and forth with various HW and SW people internally for
> countless e-mail threads and there is no other good workaround.  Yes,
> the perf hit to atomics sucks but, fortunately, most games don't use
> them heavily enough for it to make a significant impact.  We should
> just eat the perf hit and fix the hangs.

Drat, I thought you had found an alternative fix in the
bad GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC w/a.

So be it.
-Chris
Ville Syrjälä Nov. 9, 2020, 8:48 p.m. UTC | #8
On Mon, Nov 09, 2020 at 08:15:05PM +0000, Chris Wilson wrote:
> Quoting Jason Ekstrand (2020-11-09 19:52:26)
> > We need to land this patch.  The number of bugs we have piling up in
> > Mesa gitlab related to this is getting a lot larger than I'd like.
> > I've gone back and forth with various HW and SW people internally for
> > countless e-mail threads and there is no other good workaround.  Yes,
> > the perf hit to atomics sucks but, fortunately, most games don't use
> > them heavily enough for it to make a significant impact.  We should
> > just eat the perf hit and fix the hangs.
> 
> Drat, I thought you had found an alternative fix in the
> bad GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC w/a.
> 
> So be it.

I don't suppose this could be just lack of programming the magic
MOCS entry for L3 evictions?

--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -132,6 +132,9 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = {
        MOCS_ENTRY(I915_MOCS_CACHED,
                   LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
                   L3_3_WB)
+       MOCS_ENTRY(63,
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+                  L3_1_UC)
 };
 
 /* NOTE: the LE_TGT_CACHE is not used on Broxton */

The code seems to claim we can't even program that on gen9, but there's
nothing in the current spec to back that up AFAICS.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 704ace01e7f5..ac94ed3ba7b6 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1349,6 +1349,14 @@  rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 		wa_write_or(wal,
 			    GEN8_L3SQCREG4,
 			    GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+		/* Disable atomics in L3 to prevent unrecoverable hangs */
+		wa_write_masked_or(wal, GEN9_SCRATCH_LNCF1,
+				   GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+		wa_write_masked_or(wal, GEN8_L3SQCREG4,
+				   GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+		wa_write_masked_or(wal, GEN9_SCRATCH1,
+				   EVICTION_PERF_FIX_ENABLE, 0);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 24f2a52a2b42..e23b2200e7fc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7728,6 +7728,7 @@  enum {
 #define  GEN11_LQSC_CLEAN_EVICT_DISABLE		(1 << 6)
 #define  GEN8_LQSC_RO_PERF_DIS			(1 << 27)
 #define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1 << 21)
+#define  GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0				_MMIO(0x7300)
@@ -11202,6 +11203,12 @@  enum skl_power_gate {
 /* Media decoder 2 MOCS registers */
 #define GEN11_MFX2_MOCS(i)	_MMIO(0x10000 + (i) * 4)
 
+#define GEN9_SCRATCH_LNCF1		_MMIO(0xb008)
+#define   GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
+
+#define GEN9_SCRATCH1			_MMIO(0xb11c)
+#define   EVICTION_PERF_FIX_ENABLE	REG_BIT(8)
+
 #define GEN10_SCRATCH_LNCF2		_MMIO(0xb0a0)
 #define   PMFLUSHDONE_LNICRSDROP	(1 << 20)
 #define   PMFLUSH_GAPL3UNBLOCK		(1 << 21)