diff mbox series

[5/7] drm/i915/gt: Ignore stale context state upon resume

Message ID 20191229183153.3719869-5-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [1/7] drm/i915/gt: Ensure that all new contexts clear STOP_RING | expand

Commit Message

Chris Wilson Dec. 29, 2019, 6:31 p.m. UTC
We leave the kernel_context on the HW as we suspend (and while idle).
There is no guarantee that is complete in memory, so we try to inhibit
restoration from the kernel_context. Reinforce the inhibition by
scrubbing the context.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c             | 17 +++++++++++++++--
 drivers/gpu/drm/i915/gt/intel_ring_submission.c |  2 +-
 2 files changed, 16 insertions(+), 3 deletions(-)

Comments

Matthew Auld Dec. 30, 2019, 4:06 p.m. UTC | #1
On Sun, 29 Dec 2019 at 18:32, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> We leave the kernel_context on the HW as we suspend (and while idle).
> There is no guarantee that is complete in memory, so we try to inhibit
> restoration from the kernel_context. Reinforce the inhibition by
> scrubbing the context.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/gt/intel_lrc.c             | 17 +++++++++++++++--
>  drivers/gpu/drm/i915/gt/intel_ring_submission.c |  2 +-
>  2 files changed, 16 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 14e7e179855f..b1508dbd1063 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -2494,6 +2494,11 @@ static int execlists_context_alloc(struct intel_context *ce)
>
>  static void execlists_context_reset(struct intel_context *ce)
>  {
> +       u32 *regs;
> +
> +       CE_TRACE(ce, "reset\n");
> +       GEM_BUG_ON(!intel_context_is_pinned(ce));
> +
>         /*
>          * Because we emit WA_TAIL_DWORDS there may be a disparity
>          * between our bookkeeping in ce->ring->head and ce->ring->tail and
> @@ -2510,8 +2515,17 @@ static void execlists_context_reset(struct intel_context *ce)
>          * So to avoid that we reset the context images upon resume. For
>          * simplicity, we just zero everything out.
>          */
> -       intel_ring_reset(ce->ring, 0);
> +       intel_ring_reset(ce->ring, ce->ring->emit);
> +
> +       regs = memset(ce->lrc_reg_state, 0, PAGE_SIZE);
> +       execlists_init_reg_state(regs, ce, ce->engine, ce->ring, true);
>         __execlists_update_reg_state(ce, ce->engine);
> +
> +       /* Avoid trying to reload the garbage */
> +       regs[CTX_CONTEXT_CONTROL] |=
> +               _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
> +
> +       ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
>  }
>
>  static const struct intel_context_ops execlists_context_ops = {
> @@ -3968,7 +3982,6 @@ static void init_common_reg_state(u32 * const regs,
>                                             CTX_CTRL_RS_CTX_ENABLE);
>
>         regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
> -       regs[CTX_BB_STATE] = RING_BB_PPGTT;

Zero clue what that does...

Otherwise,
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Chris Wilson Dec. 30, 2019, 4:12 p.m. UTC | #2
Quoting Matthew Auld (2019-12-30 16:06:47)
> On Sun, 29 Dec 2019 at 18:32, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >  static const struct intel_context_ops execlists_context_ops = {
> > @@ -3968,7 +3982,6 @@ static void init_common_reg_state(u32 * const regs,
> >                                             CTX_CTRL_RS_CTX_ENABLE);
> >
> >         regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
> > -       regs[CTX_BB_STATE] = RING_BB_PPGTT;
> 
> Zero clue what that does...

It's supposed to be a readonly bit that shows the state of the current
batch buffer, and is supposed to be only set by MI_BB_START. Broadwell
and Braswell disagree with the bspec. C'est la vie.

I broke it out into a separate patch for clarity.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 14e7e179855f..b1508dbd1063 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2494,6 +2494,11 @@  static int execlists_context_alloc(struct intel_context *ce)
 
 static void execlists_context_reset(struct intel_context *ce)
 {
+	u32 *regs;
+
+	CE_TRACE(ce, "reset\n");
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+
 	/*
 	 * Because we emit WA_TAIL_DWORDS there may be a disparity
 	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
@@ -2510,8 +2515,17 @@  static void execlists_context_reset(struct intel_context *ce)
 	 * So to avoid that we reset the context images upon resume. For
 	 * simplicity, we just zero everything out.
 	 */
-	intel_ring_reset(ce->ring, 0);
+	intel_ring_reset(ce->ring, ce->ring->emit);
+
+	regs = memset(ce->lrc_reg_state, 0, PAGE_SIZE);
+	execlists_init_reg_state(regs, ce, ce->engine, ce->ring, true);
 	__execlists_update_reg_state(ce, ce->engine);
+
+	/* Avoid trying to reload the garbage */
+	regs[CTX_CONTEXT_CONTROL] |=
+		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+
+	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 }
 
 static const struct intel_context_ops execlists_context_ops = {
@@ -3968,7 +3982,6 @@  static void init_common_reg_state(u32 * const regs,
 					    CTX_CTRL_RS_CTX_ENABLE);
 
 	regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
-	regs[CTX_BB_STATE] = RING_BB_PPGTT;
 }
 
 static void init_wa_bb_reg_state(u32 * const regs,
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 066c4eddf5d0..843111b7b015 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1347,7 +1347,7 @@  static int ring_context_pin(struct intel_context *ce)
 
 static void ring_context_reset(struct intel_context *ce)
 {
-	intel_ring_reset(ce->ring, 0);
+	intel_ring_reset(ce->ring, ce->ring->emit);
 }
 
 static const struct intel_context_ops ring_context_ops = {