@@ -1922,6 +1922,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -717,7 +717,39 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_work_sync(&dev_priv->psr.work);
}
-static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
+int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t reg;
+ u32 mask;
+
+ /*
+ * The sole user right now is intel_pipe_update_start(),
+ * which won't race with psr_enable/disable, which is
+ * where psr2_enabled is written to. So, we don't need
+ * to acquire the psr.lock. More importantly, we want the
+ * latency inside intel_pipe_update_start() to be as low
+ * as possible, so no need to acquire psr.lock when it is
+ * not needed and will induce latencies in the atomic
+ * update path.
+ */
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
+ }
+
+ /*
+ * Max time for PSR to idle = Inverse of the refresh rate +
+ * 6 ms of exit training time + 1.5 ms of aux channel
+ * handshake. 50 msec is defesive enough to cover everything.
+ */
+ return intel_wait_for_register(dev_priv, reg, mask,
+ EDP_PSR_STATUS_STATE_IDLE, 50);
+}
+
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
{
struct intel_dp *intel_dp;
i915_reg_t reg;
@@ -763,7 +795,7 @@ static void intel_psr_work(struct work_struct *work)
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!psr_wait_for_idle(dev_priv))
+ if (!__psr_wait_for_idle_locked(dev_priv))
goto unlock;
/*
This is a lockless version of the exisiting psr_wait_for_idle(). We want to wait for PSR to idle out inside intel_pipe_update_start. At the time of a pipe update, we should never race with any psr enable or disable code, which is a part of crtc enable/disable. The follow up patch will use this lockless wait inside pipe_update_ start to wait for PSR to idle out before checking for vblank evasion. We need to keep the wait in pipe_update_start to as less as it can be. So,we can live and flourish w/o taking any psr locks at all. Even if psr is never enabled, psr2_enabled will be false and this function will wait for PSR1 to idle out, which should just return immediately, so a very short (~1-2 usec) wait for cases where PSR is disabled. v2: Add comment to explain the 25msec timeout (DK) v3: Rename psr_wait_for_idle to __psr_wait_for_idle_locked to avoid naming conflicts and propagate err (if any) to the caller (Chris) v5: Form a series with the next patch v7: Better explain the need for lockless wait and increase the max timeout to handle refresh rates < 60 Hz (Daniel Vetter) v8: Rebase Signed-off-by: Tarun Vyas <tarun.vyas@intel.com> --- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_psr.c | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-)