@@ -1298,7 +1298,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->pcu_lock);
- pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+ dev_priv->pm_rps_events &=
+ ~(GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+
+ if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->pm_rps_events |= GEN6_PM_RP_UP_EI_EXPIRED;
+ pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+ }
adj = rps->last_adj;
new_delay = rps->cur_freq;
@@ -1310,10 +1316,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = rps->boost_freq;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (adj > 0)
+ if (adj > 0) {
+ dev_priv->pm_rps_events |= GEN6_PM_RP_UP_EI_EXPIRED;
adj *= 2;
- else /* CHV needs even encode values */
+ } else { /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+ }
if (new_delay >= rps->max_freq_softlimit)
adj = 0;
@@ -1326,15 +1334,21 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = rps->min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
- if (adj < 0)
+ if (adj < 0) {
+ dev_priv->pm_rps_events |= GEN6_PM_RP_DOWN_EI_EXPIRED;
adj *= 2;
- else /* CHV needs even encode values */
+ } else { /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+ }
if (new_delay <= rps->min_freq_softlimit)
adj = 0;
- } else { /* unknown event */
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD && adj > 0) {
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD && adj < 0) {
adj = 0;
+ } else {
+ /* unknown event */
}
rps->last_adj = adj;
@@ -6397,10 +6397,16 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
u32 mask = 0;
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > rps->min_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < rps->max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+ if (val > rps->min_freq_softlimit) {
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ }
+ if (val < rps->max_freq_softlimit) {
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_UP_THRESHOLD);
+ }
mask &= dev_priv->pm_rps_events;
Currently, we note congestion for the slow start ramping up of RPS only when we overshoot the target workload and have to reverse direction for our reclocking. That is, if we have a period where the current GPU frequency is enough to sustain the workload within our target utilisation, we should not trigger any RPS EI interrupts, and then may continue again with the previous last_adj after multiple periods causing us to dramatically overreact. To prevent us not noticing a period where the system is behaving correctly, we can schedule an extra interrupt that will not be associated with either an up or down event causing to reset last_adj back to zero, cancelling the slow start due to the congestion. v2: Separate up/down EI Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> --- drivers/gpu/drm/i915/i915_irq.c | 26 ++++++++++++++++++++------ drivers/gpu/drm/i915/intel_pm.c | 14 ++++++++++---- 2 files changed, 30 insertions(+), 10 deletions(-)