diff mbox

[09/11] drm/i915: turbo & RC6 support for VLV v3

Message ID 1364489747-2050-9-git-send-email-jbarnes@virtuousgeek.org (mailing list archive)
State New, archived
Headers show

Commit Message

Jesse Barnes March 28, 2013, 4:55 p.m. UTC
From: Ben Widawsky <ben@bwidawsk.net>

Uses slightly different interfaces than other platforms.

v2: track actual set freq, not requested (Rohit)
    fix debug prints in init code (Jesse)
v3: don't write sleep reg (Jesse)
    re-add RC6 wake limit write (Ben)
    fixup thresholds to match other platforms (Ben)
    clean up mem freq calculation (Ben)
    clean up debug prints (Ben)

Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
---
 drivers/gpu/drm/i915/i915_drv.h |    1 +
 drivers/gpu/drm/i915/i915_irq.c |    5 +-
 drivers/gpu/drm/i915/i915_reg.h |    1 +
 drivers/gpu/drm/i915/intel_pm.c |  145 +++++++++++++++++++++++++++++++++++++--
 4 files changed, 147 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 642cb1f..68f1256 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1816,6 +1816,7 @@  extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void valleyview_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 extern int intel_enable_rc6(const struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3c2b05c..46f6f1a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -394,7 +394,10 @@  static void gen6_pm_rps_work(struct work_struct *work)
 	 */
 	if (!(new_delay > dev_priv->rps.max_delay ||
 	      new_delay < dev_priv->rps.min_delay)) {
-		gen6_set_rps(dev_priv->dev, new_delay);
+		if (IS_VALLEYVIEW(dev_priv->dev))
+			valleyview_set_rps(dev_priv->dev, new_delay);
+		else
+			gen6_set_rps(dev_priv->dev, new_delay);
 	}
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 87a59eb..ea7d16d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4369,6 +4369,7 @@ 
 #define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
 #define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
 #define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   GEN7_RC_CTL_TO_MODE			(1<<28)
 #define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
 #define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
 #define GEN6_RP_DOWN_TIMEOUT			0xA010
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8256d0d..2b02702 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2477,6 +2477,47 @@  void gen6_set_rps(struct drm_device *dev, u8 val)
 	trace_intel_gpu_freq_change(val * 50);
 }
 
+void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+	u32 limits = gen6_rps_limits(dev_priv, &val);
+	u32 pval;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+	WARN_ON(val > dev_priv->rps.max_delay);
+	WARN_ON(val < dev_priv->rps.min_delay);
+
+	if (val == dev_priv->rps.cur_delay)
+		return;
+
+	valleyview_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+	do {
+		valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &pval);
+		if (time_after(jiffies, timeout)) {
+			DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
+			break;
+		}
+		udelay(10);
+	} while (pval & 1);
+
+	valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &pval);
+	if ((pval >> 8) != val)
+		DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n",
+			  val, pval >> 8);
+
+	/* Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+	dev_priv->rps.cur_delay = pval >> 8;
+
+	trace_intel_gpu_freq_change(val);
+}
+
+
 static void gen6_disable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2716,6 +2757,97 @@  static void gen6_update_ring_freq(struct drm_device *dev)
 	}
 }
 
+static void valleyview_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	u32 gtfifodbg, val;
+	int i;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	gen6_gt_force_wake_get(dev_priv);
+
+	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000);
+
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_CONT);
+
+	/* allows RC6 residency counter to work */
+	I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
+	I915_WRITE(GEN6_RC_CONTROL,
+		   GEN6_RC_CTL_RC6pp_ENABLE |
+		   GEN6_RC_CTL_RC6p_ENABLE |
+		   GEN6_RC_CTL_RC6_ENABLE |
+		   GEN7_RC_CTL_TO_MODE |
+		   GEN6_RC_CTL_HW_ENABLE);
+
+	valleyview_punit_read(dev_priv, PUNIT_FUSE_BUS1, &val);
+	DRM_DEBUG_DRIVER("max GPU freq: %d\n", (val >> 16) & 0xff);
+	dev_priv->rps.max_delay = (val >> 16) & 0xff;
+
+	valleyview_punit_read(dev_priv, PUNIT_REG_GPU_LFM, &val);
+	DRM_DEBUG_DRIVER("min GPU freq: %d\n", val & 0xff);
+	dev_priv->rps.min_delay = val & 0xff;
+
+	valleyview_punit_read(dev_priv, PUNIT_FUSE_BUS2, &val);
+	DRM_DEBUG_DRIVER("max GPLL freq: %d\n", val);
+
+	valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &val);
+	dev_priv->mem_freq = 800 + (266 * (val >> 6) & 3);
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 8 ? "yes" : "no");
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	DRM_DEBUG_DRIVER("current GPU freq: %x\n", (val >> 8) & 0xff);
+	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+
+	val = 0xd500;
+	DRM_DEBUG_DRIVER("setting GPU freq to %d\n", (val >> 8) & 0xff);
+
+	valleyview_set_rps(dev_priv->dev, (val >> 8) & 0xff);
+
+	/* requires MSI enabled */
+	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
+	I915_WRITE(GEN6_PMIMR, 0);
+	spin_unlock_irq(&dev_priv->rps.lock);
+	/* enable all PM interrupts */
+	I915_WRITE(GEN6_PMINTRMSK, 0);
+
+	gen6_gt_force_wake_put(dev_priv);
+}
+
 void ironlake_teardown_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3442,7 +3574,7 @@  void intel_disable_gt_powersave(struct drm_device *dev)
 	if (IS_IRONLAKE_M(dev)) {
 		ironlake_disable_drps(dev);
 		ironlake_disable_rc6(dev);
-	} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
 		mutex_lock(&dev_priv->rps.hw_lock);
 		gen6_disable_rps(dev);
@@ -3458,8 +3590,13 @@  static void intel_gen6_powersave_work(struct work_struct *work)
 	struct drm_device *dev = dev_priv->dev;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	gen6_enable_rps(dev);
-	gen6_update_ring_freq(dev);
+
+	if (IS_VALLEYVIEW(dev)) {
+		valleyview_enable_rps(dev);
+	} else {
+		gen6_enable_rps(dev);
+		gen6_update_ring_freq(dev);
+	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
@@ -3471,7 +3608,7 @@  void intel_enable_gt_powersave(struct drm_device *dev)
 		ironlake_enable_drps(dev);
 		ironlake_enable_rc6(dev);
 		intel_init_emon(dev);
-	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
 		/*
 		 * PCU communication is slow and this doesn't need to be
 		 * done at any specific time, so do this out of our fast path