@@ -921,6 +921,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
+ intel_runtime_pm_init_early(dev_priv);
intel_wopcm_init_early(&dev_priv->wopcm);
intel_uc_init_early(dev_priv);
intel_pm_setup(dev_priv);
@@ -2403,6 +2404,7 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n");
disable_rpm_wakeref_asserts(dev_priv);
+ lock_map_acquire(&dev_priv->runtime_pm.lock);
/*
* We are safe here against re-faults, since the fault handler takes
@@ -2437,11 +2439,23 @@ static int intel_runtime_suspend(struct device *kdev)
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
+ lock_map_release(&dev_priv->runtime_pm.lock);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
+ /*
+ * XXX We want to keep the runtime_pm.lock held across suspend.
+ *
+ * We want to treat the whole runtime sleep period as a locked state
+ * and catch any lock ordering problems that may arise from trying
+ * to access the device without first acquiring the runtime reference.
+ * However, this will need lockdep cross-release support as the lock
+ * will no longer be tied to a process. Once we have that working, we
+ * should endeavour on moving this lockdep_map to the PM core.
+ */
+ lock_map_release(&dev_priv->runtime_pm.lock);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
@@ -2496,6 +2510,7 @@ static int intel_runtime_resume(struct device *kdev)
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
+ lock_map_acquire(&dev_priv->runtime_pm.lock);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->runtime_pm.suspended = false;
@@ -2537,6 +2552,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_enable_ipc(dev_priv);
+ lock_map_release(&dev_priv->runtime_pm.lock);
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
@@ -1168,6 +1168,7 @@ struct skl_wm_params {
* For more, read the Documentation/power/runtime_pm.txt.
*/
struct i915_runtime_pm {
+ struct lockdep_map lock;
atomic_t wakeref_count;
bool suspended;
bool irqs_enabled;
@@ -1944,6 +1944,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -3698,6 +3698,8 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+ lock_map_acquire_read(&dev_priv->runtime_pm.lock);
+
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
}
@@ -3731,6 +3733,8 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
return false;
}
+ lock_map_acquire_read(&dev_priv->runtime_pm.lock);
+
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
@@ -3759,9 +3763,15 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
struct device *kdev = &pdev->dev;
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ WARN_ON_ONCE(!lock_is_held_type(&dev_priv->runtime_pm.lock, 1));
+#endif
+
assert_rpm_wakelock_held(dev_priv);
pm_runtime_get_noresume(kdev);
+ lock_map_acquire_read(&dev_priv->runtime_pm.lock);
+
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
}
@@ -3781,6 +3791,8 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
assert_rpm_wakelock_held(dev_priv);
atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+ lock_map_release(&dev_priv->runtime_pm.lock);
+
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
}
@@ -3826,3 +3838,11 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
*/
pm_runtime_put_autosuspend(kdev);
}
+
+void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv)
+{
+ static struct lock_class_key lock_key;
+
+ lockdep_init_map(&dev_priv->runtime_pm.lock,
+ "i915->runtime_pm", &lock_key, 0);
+}
Runtime power management acts as a type of "wakelock" that code must hold in order to access the device. Such a lock has all the ordering issues of a regular lock, and so it would be convenient to use lockdep to catch violations and cyclic deadlocks. In the long run, it will be interesting to use cross-release tracking so that we could mark the runtime wakelock as held for as long as the device was suspended, so that we catch whenever we might be trying to access the device having forgotten about acquiring the wakelock. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> --- drivers/gpu/drm/i915/i915_drv.c | 16 ++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_runtime_pm.c | 20 ++++++++++++++++++++ 4 files changed, 38 insertions(+)