diff mbox series

[4/4] drm/i915: Dampen RPS slow start

Message ID 20180802100631.31305-4-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [1/4] drm/i915: Drop stray clearing of rps->last_adj | expand

Commit Message

Chris Wilson Aug. 2, 2018, 10:06 a.m. UTC
Currently, we note congestion for the slow start ramping up of RPS only
when we overshoot the target workload and have to reverse direction for
our reclocking. That is, if we have a period where the current GPU
frequency is enough to sustain the workload within our target
utilisation, we should not trigger any RPS EI interrupts, and then may
continue again with the previous last_adj after multiple periods causing
us to dramatically overreact. To prevent us not noticing a period where
the system is behaving correctly, we can schedule an extra interrupt
that will not be associated with either an up or down event causing to
reset last_adj back to zero, cancelling the slow start due to the
congestion.

v2: Separate up/down EI
v3: Reset rps events upon enabling

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_irq.c | 43 +++++++++++++++++++++------------
 drivers/gpu/drm/i915/intel_pm.c | 14 ++++++++---
 2 files changed, 38 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8084e35b25c5..69919a97ec2e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -493,6 +493,14 @@  void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON_ONCE(rps->pm_iir);
 
+	if (IS_VALLEYVIEW(dev_priv))
+		/* WaGsvRC0ResidencyMethod:vlv */
+		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+	else
+		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
+					   GEN6_PM_RP_DOWN_THRESHOLD |
+					   GEN6_PM_RP_DOWN_TIMEOUT);
+
 	if (INTEL_GEN(dev_priv) >= 11)
 		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
 	else
@@ -1298,7 +1306,13 @@  static void gen6_pm_rps_work(struct work_struct *work)
 
 	mutex_lock(&dev_priv->pcu_lock);
 
-	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+	dev_priv->pm_rps_events &=
+		~(GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+
+	if (IS_VALLEYVIEW(dev_priv)) {
+		dev_priv->pm_rps_events |= GEN6_PM_RP_UP_EI_EXPIRED;
+		pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+	}
 
 	adj = rps->last_adj;
 	new_delay = rps->cur_freq;
@@ -1310,10 +1324,12 @@  static void gen6_pm_rps_work(struct work_struct *work)
 		new_delay = rps->boost_freq;
 		adj = 0;
 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		if (adj > 0)
+		if (adj > 0) {
+			dev_priv->pm_rps_events |= GEN6_PM_RP_UP_EI_EXPIRED;
 			adj *= 2;
-		else /* CHV needs even encode values */
+		} else { /* CHV needs even encode values */
 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+		}
 
 		if (new_delay >= rps->max_freq_softlimit)
 			adj = 0;
@@ -1326,15 +1342,21 @@  static void gen6_pm_rps_work(struct work_struct *work)
 			new_delay = rps->min_freq_softlimit;
 		adj = 0;
 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
-		if (adj < 0)
+		if (adj < 0) {
+			dev_priv->pm_rps_events |= GEN6_PM_RP_DOWN_EI_EXPIRED;
 			adj *= 2;
-		else /* CHV needs even encode values */
+		} else { /* CHV needs even encode values */
 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+		}
 
 		if (new_delay <= rps->min_freq_softlimit)
 			adj = 0;
-	} else { /* unknown event */
+	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD && adj > 0) {
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD && adj < 0) {
 		adj = 0;
+	} else {
+		/* unknown event */
 	}
 
 	rps->last_adj = adj;
@@ -4773,15 +4795,6 @@  void intel_irq_init(struct drm_i915_private *dev_priv)
 	if (HAS_GUC_SCHED(dev_priv))
 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
 
-	/* Let's track the enabled rps events */
-	if (IS_VALLEYVIEW(dev_priv))
-		/* WaGsvRC0ResidencyMethod:vlv */
-		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
-	else
-		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
-					   GEN6_PM_RP_DOWN_THRESHOLD |
-					   GEN6_PM_RP_DOWN_TIMEOUT);
-
 	rps->pm_intrmsk_mbz = 0;
 
 	/*
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f90a3c7f1c40..d71a498ee3a1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6397,10 +6397,16 @@  static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 	u32 mask = 0;
 
 	/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
-	if (val > rps->min_freq_softlimit)
-		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
-	if (val < rps->max_freq_softlimit)
-		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+	if (val > rps->min_freq_softlimit) {
+		mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+			 GEN6_PM_RP_DOWN_EI_EXPIRED |
+			 GEN6_PM_RP_DOWN_THRESHOLD |
+			 GEN6_PM_RP_DOWN_TIMEOUT);
+	}
+	if (val < rps->max_freq_softlimit) {
+		mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+			 GEN6_PM_RP_UP_THRESHOLD);
+	}
 
 	mask &= dev_priv->pm_rps_events;