@@ -3504,6 +3504,18 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ /*
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. When we are idling on the kernel_context,
+ * no more new requests (with a context switch) are emitted and we
+ * can finally rest. A consequence is that the idle work handler is
+ * always called at least twice before idling (and if the system is
+ * idle that implies a round trip through the retire worker).
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_switch_to_kernel_context(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
@@ -4958,7 +4970,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
assert_kernel_context_is_current(dev_priv);
}
- i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_uc_suspend(dev_priv);
We can reduce our exposure to random neutrinos by resting on the kernel context having flushed out the user contexts to system memory and beyond. The corollary is that we then we require two passes through the idle handler to go to sleep, which on a truly idle system involves an extra pass through the slow and irregular retire work handler. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- Mainly just looking to see if this has any impact on masking drv_suspend/sysfs-reader -Chris --- drivers/gpu/drm/i915/i915_gem.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-)