@@ -645,6 +645,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ intel_gt_pm_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_uc_fini_misc(dev_priv);
@@ -1074,7 +1075,6 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
- intel_gt_pm_sanitize(dev_priv);
intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev_priv);
pci_dev_put(dev_priv->bridge_dev);
@@ -202,10 +202,6 @@ void i915_gem_unpark(struct drm_i915_private *i915)
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
- intel_gt_pm_enable_rps(i915);
- intel_gt_pm_enable_rc6(i915);
- intel_gt_pm_enable_llc(i915);
-
i915_update_gfx_val(i915);
if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915);
@@ -3342,11 +3338,38 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
i915_gem_reset_finish_engine(engine);
}
+ intel_gt_pm_sanitize(i915);
+
GEM_TRACE("end\n");
wake_up_all(&i915->gpu_error.reset_queue);
}
+static int load_power_context(struct drm_i915_private *i915)
+{
+ int err;
+
+ intel_gt_pm_sanitize(i915);
+ intel_gt_pm_enable_rps(i915);
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ goto err;
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ if (err)
+ goto err;
+
+ intel_gt_pm_enable_rc6(i915);
+ intel_gt_pm_enable_llc(i915);
+
+ return 0;
+
+err:
+ intel_gt_pm_sanitize(i915);
+ return err;
+}
+
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
@@ -5076,7 +5099,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
intel_uc_resume(i915);
/* Always reload a context for powersaving. */
- if (i915_gem_switch_to_kernel_context(i915))
+ if (load_power_context(i915))
goto err_wedged;
out_unlock:
@@ -5271,11 +5294,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
}
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- goto err_active;
-
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ /* Flush the default context image to memory, and enable powersaving. */
+ err = load_power_context(i915);
if (err)
goto err_active;
@@ -15685,10 +15685,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
- intel_gt_pm_disable_llc(dev_priv);
- intel_gt_pm_disable_rc6(dev_priv);
- intel_gt_pm_disable_rps(dev_priv);
-
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
@@ -15716,8 +15712,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_overlay(dev_priv);
- intel_gt_pm_fini(dev_priv);
-
intel_teardown_gmbus(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
@@ -2671,6 +2671,8 @@ void intel_gt_pm_disable_llc(struct drm_i915_private *i915)
void intel_gt_pm_fini(struct drm_i915_private *i915)
{
+ intel_gt_pm_sanitize(i915);
+
if (IS_VALLEYVIEW(i915))
valleyview_cleanup_gt_powersave(i915);
On Ironlake, we are required to not enable rc6 until the GPU is loaded with a valid context; after that point it can start to use a powersaving context for rc6. This seems a reasonable requirement to impose on all generations as we are already priming the system by loading a context on resume. We can simply then delay enabling rc6 until we know the GPU is awake. v2: Reorder intel_gt_pm_fini in i915_gem_fini to match setup ordering, and remove the superfluous intel_gt_pm_sanitize() on mmio cleanup. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com> --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 40 +++++++++++++++++++++------- drivers/gpu/drm/i915/intel_display.c | 6 ----- drivers/gpu/drm/i915/intel_gt_pm.c | 2 ++ 4 files changed, 33 insertions(+), 17 deletions(-)