diff mbox series

drm/i915/gt: Flush gen3 relocs harder, again

Message ID 20200611160529.9558-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915/gt: Flush gen3 relocs harder, again | expand

Commit Message

Chris Wilson June 11, 2020, 4:05 p.m. UTC
gen3 does not fully flush MI stores to memory on MI_FLUSH, such that a
subsequent read from e.g. the sampler can bypass the store and read the
stale value from memory. This is a serious issue when we are using MI
stores to rewrite the batches for relocation, as it means that the batch
is reading from random user/kernel memory. While it is particularly
sensitive [and detectable] for relocations, reading stale data at any
time is a worry.

Having started with a small number of delaying stores and doubling until
no more incoherency was seen over a few hours (with and without
background memory pressure), 32 was the magic number.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2018
References: a889580c087a ("drm/i915: Flush GPU relocs harder for gen3")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
So gen3 requires a delay after to flush the previous stores, gen5 is
assuming it requires a delay between the seqno and the
MI_USER_INTERRUPT. Here I've made gen5 reuse the gen3 approach, but I
need to verify that it still holds.
---
 drivers/gpu/drm/i915/gt/gen2_engine_cs.c | 39 +++++++++---------------
 1 file changed, 15 insertions(+), 24 deletions(-)

Comments

Mika Kuoppala June 12, 2020, 9:14 a.m. UTC | #1
Chris Wilson <chris@chris-wilson.co.uk> writes:

> gen3 does not fully flush MI stores to memory on MI_FLUSH, such that a
> subsequent read from e.g. the sampler can bypass the store and read the
> stale value from memory. This is a serious issue when we are using MI
> stores to rewrite the batches for relocation, as it means that the batch
> is reading from random user/kernel memory. While it is particularly
> sensitive [and detectable] for relocations, reading stale data at any
> time is a worry.
>
> Having started with a small number of delaying stores and doubling until
> no more incoherency was seen over a few hours (with and without
> background memory pressure), 32 was the magic number.
>
> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2018
> References: a889580c087a ("drm/i915: Flush GPU relocs harder for gen3")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
> So gen3 requires a delay after to flush the previous stores, gen5 is
> assuming it requires a delay between the seqno and the
> MI_USER_INTERRUPT. Here I've made gen5 reuse the gen3 approach, but I
> need to verify that it still holds.
> ---
>  drivers/gpu/drm/i915/gt/gen2_engine_cs.c | 39 +++++++++---------------
>  1 file changed, 15 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> index 3fb0dc1fb910..342c476ec872 100644
> --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> @@ -142,19 +142,26 @@ int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
>  	return 0;
>  }
>  
> -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
> +static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int count)
>  {
>  	GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
>  	GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
>  
>  	*cs++ = MI_FLUSH;
> +	*cs++ = MI_NOOP;
> +
> +	while (count--) {
> +		*cs++ = MI_STORE_DWORD_INDEX;
> +		*cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
> +		*cs++ = rq->fence.seqno;
> +		*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;

Why would you avoid write flush here?
-Mika


> +	}
>  
>  	*cs++ = MI_STORE_DWORD_INDEX;
>  	*cs++ = I915_GEM_HWS_SEQNO_ADDR;
>  	*cs++ = rq->fence.seqno;
>  
>  	*cs++ = MI_USER_INTERRUPT;
> -	*cs++ = MI_NOOP;
>  
>  	rq->tail = intel_ring_offset(rq, cs);
>  	assert_ring_tail_valid(rq->ring, rq->tail);
> @@ -162,31 +169,15 @@ u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
>  	return cs;
>  }
>  
> -#define GEN5_WA_STORES 8 /* must be at least 1! */
> -u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
> +u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
>  {
> -	int i;
> -
> -	GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
> -	GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
> -
> -	*cs++ = MI_FLUSH;
> -
> -	BUILD_BUG_ON(GEN5_WA_STORES < 1);
> -	for (i = 0; i < GEN5_WA_STORES; i++) {
> -		*cs++ = MI_STORE_DWORD_INDEX;
> -		*cs++ = I915_GEM_HWS_SEQNO_ADDR;
> -		*cs++ = rq->fence.seqno;
> -	}
> -
> -	*cs++ = MI_USER_INTERRUPT;
> -
> -	rq->tail = intel_ring_offset(rq, cs);
> -	assert_ring_tail_valid(rq->ring, rq->tail);
> +	return __gen2_emit_breadcrumb(rq, cs, 32);
> +}
>  
> -	return cs;
> +u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
> +{
> +	return __gen2_emit_breadcrumb(rq, cs, 8);
>  }
> -#undef GEN5_WA_STORES
>  
>  /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
>  #define I830_BATCH_LIMIT SZ_256K
> -- 
> 2.20.1
Chris Wilson June 12, 2020, 9:23 a.m. UTC | #2
Quoting Mika Kuoppala (2020-06-12 10:14:55)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > gen3 does not fully flush MI stores to memory on MI_FLUSH, such that a
> > subsequent read from e.g. the sampler can bypass the store and read the
> > stale value from memory. This is a serious issue when we are using MI
> > stores to rewrite the batches for relocation, as it means that the batch
> > is reading from random user/kernel memory. While it is particularly
> > sensitive [and detectable] for relocations, reading stale data at any
> > time is a worry.
> >
> > Having started with a small number of delaying stores and doubling until
> > no more incoherency was seen over a few hours (with and without
> > background memory pressure), 32 was the magic number.
> >
> > Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2018
> > References: a889580c087a ("drm/i915: Flush GPU relocs harder for gen3")
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > ---
> > So gen3 requires a delay after to flush the previous stores, gen5 is
> > assuming it requires a delay between the seqno and the
> > MI_USER_INTERRUPT. Here I've made gen5 reuse the gen3 approach, but I
> > need to verify that it still holds.
> > ---
> >  drivers/gpu/drm/i915/gt/gen2_engine_cs.c | 39 +++++++++---------------
> >  1 file changed, 15 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> > index 3fb0dc1fb910..342c476ec872 100644
> > --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> > +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> > @@ -142,19 +142,26 @@ int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
> >       return 0;
> >  }
> >  
> > -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
> > +static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int count)
> >  {
> >       GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
> >       GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
> >  
> >       *cs++ = MI_FLUSH;
> > +     *cs++ = MI_NOOP;
> > +
> > +     while (count--) {
> > +             *cs++ = MI_STORE_DWORD_INDEX;
> > +             *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
> > +             *cs++ = rq->fence.seqno;
> > +             *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
> 
> Why would you avoid write flush here?

It's a flush of the render caches; all I'm using it for here is a delay.
As evidenced, MI_FLUSH does not flush the stores by itself.

32 is an awful lot of papering. I should note that for gen5 not only did
we have the delay in the breadcrumb but also in the invalidation. Maybe
that would help for gen3
-Chris
Chris Wilson June 12, 2020, 9:40 a.m. UTC | #3
Quoting Chris Wilson (2020-06-12 10:23:30)
> Quoting Mika Kuoppala (2020-06-12 10:14:55)
> > Chris Wilson <chris@chris-wilson.co.uk> writes:
> > 
> > > gen3 does not fully flush MI stores to memory on MI_FLUSH, such that a
> > > subsequent read from e.g. the sampler can bypass the store and read the
> > > stale value from memory. This is a serious issue when we are using MI
> > > stores to rewrite the batches for relocation, as it means that the batch
> > > is reading from random user/kernel memory. While it is particularly
> > > sensitive [and detectable] for relocations, reading stale data at any
> > > time is a worry.
> > >
> > > Having started with a small number of delaying stores and doubling until
> > > no more incoherency was seen over a few hours (with and without
> > > background memory pressure), 32 was the magic number.
> > >
> > > Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2018
> > > References: a889580c087a ("drm/i915: Flush GPU relocs harder for gen3")
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > > ---
> > > So gen3 requires a delay after to flush the previous stores, gen5 is
> > > assuming it requires a delay between the seqno and the
> > > MI_USER_INTERRUPT. Here I've made gen5 reuse the gen3 approach, but I
> > > need to verify that it still holds.
> > > ---
> > >  drivers/gpu/drm/i915/gt/gen2_engine_cs.c | 39 +++++++++---------------
> > >  1 file changed, 15 insertions(+), 24 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> > > index 3fb0dc1fb910..342c476ec872 100644
> > > --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> > > +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
> > > @@ -142,19 +142,26 @@ int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
> > >       return 0;
> > >  }
> > >  
> > > -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
> > > +static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int count)
> > >  {
> > >       GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
> > >       GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
> > >  
> > >       *cs++ = MI_FLUSH;
> > > +     *cs++ = MI_NOOP;
> > > +
> > > +     while (count--) {
> > > +             *cs++ = MI_STORE_DWORD_INDEX;
> > > +             *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
> > > +             *cs++ = rq->fence.seqno;
> > > +             *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
> > 
> > Why would you avoid write flush here?
> 
> It's a flush of the render caches; all I'm using it for here is a delay.
> As evidenced, MI_FLUSH does not flush the stores by itself.
> 
> 32 is an awful lot of papering. I should note that for gen5 not only did
> we have the delay in the breadcrumb but also in the invalidation. Maybe
> that would help for gen3

Well that was easy. Splitting the w/a between the breadcrumb and the
invalidate does not help to reduce the burden [the number of stores
required to make the incoherency go away] of the w/a.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 3fb0dc1fb910..342c476ec872 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -142,19 +142,26 @@  int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
 	return 0;
 }
 
-u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int count)
 {
 	GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
 	GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
 
 	*cs++ = MI_FLUSH;
+	*cs++ = MI_NOOP;
+
+	while (count--) {
+		*cs++ = MI_STORE_DWORD_INDEX;
+		*cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+		*cs++ = rq->fence.seqno;
+		*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+	}
 
 	*cs++ = MI_STORE_DWORD_INDEX;
 	*cs++ = I915_GEM_HWS_SEQNO_ADDR;
 	*cs++ = rq->fence.seqno;
 
 	*cs++ = MI_USER_INTERRUPT;
-	*cs++ = MI_NOOP;
 
 	rq->tail = intel_ring_offset(rq, cs);
 	assert_ring_tail_valid(rq->ring, rq->tail);
@@ -162,31 +169,15 @@  u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	return cs;
 }
 
-#define GEN5_WA_STORES 8 /* must be at least 1! */
-u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-	int i;
-
-	GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
-	GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
-	*cs++ = MI_FLUSH;
-
-	BUILD_BUG_ON(GEN5_WA_STORES < 1);
-	for (i = 0; i < GEN5_WA_STORES; i++) {
-		*cs++ = MI_STORE_DWORD_INDEX;
-		*cs++ = I915_GEM_HWS_SEQNO_ADDR;
-		*cs++ = rq->fence.seqno;
-	}
-
-	*cs++ = MI_USER_INTERRUPT;
-
-	rq->tail = intel_ring_offset(rq, cs);
-	assert_ring_tail_valid(rq->ring, rq->tail);
+	return __gen2_emit_breadcrumb(rq, cs, 32);
+}
 
-	return cs;
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+	return __gen2_emit_breadcrumb(rq, cs, 8);
 }
-#undef GEN5_WA_STORES
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 #define I830_BATCH_LIMIT SZ_256K