diff mbox

[08/14] drm/i915: kill dev_priv->rps.lock

Message ID 1372973734-7601-9-git-send-email-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show

Commit Message

Daniel Vetter July 4, 2013, 9:35 p.m. UTC
Now that the rps interrupt locking isn't clearly separated (at elast
conceptually) from all the other interrupt locking having a different
lock stopped making sense: It protects much more than just the rps
workqueue it started out with. But with the addition of VECS the
separation started to blurr and resulted in some more complex locking
for the ring interrupt refcount.

With this we can (again) unifiy the ringbuffer irq refcounts without
causing a massive confusion, but that's for the next patch.

v2: Explain better why the rps.lock once made sense and why no longer,
requested by Ben.

Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_dma.c         |  1 -
 drivers/gpu/drm/i915/i915_drv.h         |  8 ++++----
 drivers/gpu/drm/i915/i915_irq.c         | 12 ++++++------
 drivers/gpu/drm/i915/intel_pm.c         | 16 ++++++++--------
 drivers/gpu/drm/i915/intel_ringbuffer.c |  8 ++++----
 drivers/gpu/drm/i915/intel_ringbuffer.h |  2 +-
 6 files changed, 23 insertions(+), 24 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0e22142..d874dab 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1612,7 +1612,6 @@  int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
-	spin_lock_init(&dev_priv->rps.lock);
 	spin_lock_init(&dev_priv->backlight.lock);
 	mutex_init(&dev_priv->dpio_lock);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ea86ad6..2179478 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -743,12 +743,12 @@  struct i915_suspend_saved_registers {
 };
 
 struct intel_gen6_power_mgmt {
+	/* work and pm_iir are protected by dev_priv->irq_lock */
 	struct work_struct work;
-	struct delayed_work vlv_work;
 	u32 pm_iir;
-	/* lock - irqsave spinlock that protectects the work_struct and
-	 * pm_iir. */
-	spinlock_t lock;
+
+	/* On vlv we need to manually drop to Vmin with a delayed work. */
+	struct delayed_work vlv_work;
 
 	/* The below variables an all the rps hw state are protected by
 	 * dev->struct mutext. */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e70c5df..27bf7c1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -717,13 +717,13 @@  static void gen6_pm_rps_work(struct work_struct *work)
 	u32 pm_iir, pm_imr;
 	u8 new_delay;
 
-	spin_lock_irq(&dev_priv->rps.lock);
+	spin_lock_irq(&dev_priv->irq_lock);
 	pm_iir = dev_priv->rps.pm_iir;
 	dev_priv->rps.pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
 	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
 	I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
-	spin_unlock_irq(&dev_priv->rps.lock);
+	spin_unlock_irq(&dev_priv->irq_lock);
 
 	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
 		return;
@@ -885,11 +885,11 @@  static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
 	 * The mask bit in IMR is cleared by dev_priv->rps.work.
 	 */
 
-	spin_lock(&dev_priv->rps.lock);
+	spin_lock(&dev_priv->irq_lock);
 	dev_priv->rps.pm_iir |= pm_iir;
 	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 	POSTING_READ(GEN6_PMIMR);
-	spin_unlock(&dev_priv->rps.lock);
+	spin_unlock(&dev_priv->irq_lock);
 
 	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
@@ -962,12 +962,12 @@  static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 pm_iir)
 {
 	if (pm_iir & GEN6_PM_RPS_EVENTS) {
-		spin_lock(&dev_priv->rps.lock);
+		spin_lock(&dev_priv->irq_lock);
 		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
 		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 		/* never want to mask useful interrupts. (also posting read) */
 		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
-		spin_unlock(&dev_priv->rps.lock);
+		spin_unlock(&dev_priv->irq_lock);
 
 		queue_work(dev_priv->wq, &dev_priv->rps.work);
 	}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 125a741..a9be0d1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3135,9 +3135,9 @@  static void gen6_disable_rps(struct drm_device *dev)
 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-	spin_lock_irq(&dev_priv->rps.lock);
+	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->rps.pm_iir = 0;
-	spin_unlock_irq(&dev_priv->rps.lock);
+	spin_unlock_irq(&dev_priv->irq_lock);
 
 	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 }
@@ -3154,9 +3154,9 @@  static void valleyview_disable_rps(struct drm_device *dev)
 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-	spin_lock_irq(&dev_priv->rps.lock);
+	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->rps.pm_iir = 0;
-	spin_unlock_irq(&dev_priv->rps.lock);
+	spin_unlock_irq(&dev_priv->irq_lock);
 
 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 
@@ -3321,13 +3321,13 @@  static void gen6_enable_rps(struct drm_device *dev)
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
-	spin_lock_irq(&dev_priv->rps.lock);
+	spin_lock_irq(&dev_priv->irq_lock);
 	/* FIXME: Our interrupt enabling sequence is bonghits.
 	 * dev_priv->rps.pm_iir really should be 0 here. */
 	dev_priv->rps.pm_iir = 0;
 	I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
 	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
-	spin_unlock_irq(&dev_priv->rps.lock);
+	spin_unlock_irq(&dev_priv->irq_lock);
 	/* unmask all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
@@ -3601,10 +3601,10 @@  static void valleyview_enable_rps(struct drm_device *dev)
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
-	spin_lock_irq(&dev_priv->rps.lock);
+	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON(dev_priv->rps.pm_iir != 0);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps.lock);
+	spin_unlock_irq(&dev_priv->irq_lock);
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5b647eb..8a5874b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1071,14 +1071,14 @@  hsw_vebox_get_irq(struct intel_ring_buffer *ring)
 	if (!dev->irq_enabled)
 		return false;
 
-	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount.pm++ == 0) {
 		u32 pm_imr = I915_READ(GEN6_PMIMR);
 		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 		I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
 		POSTING_READ(GEN6_PMIMR);
 	}
-	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
@@ -1093,14 +1093,14 @@  hsw_vebox_put_irq(struct intel_ring_buffer *ring)
 	if (!dev->irq_enabled)
 		return;
 
-	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount.pm == 0) {
 		u32 pm_imr = I915_READ(GEN6_PMIMR);
 		I915_WRITE_IMR(ring, ~0);
 		I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
 		POSTING_READ(GEN6_PMIMR);
 	}
-	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c..8a87b3f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -80,7 +80,7 @@  struct  intel_ring_buffer {
 
 	struct {
 		u32	gt; /*  protected by dev_priv->irq_lock */
-		u32	pm; /*  protected by dev_priv->rps.lock (sucks) */
+		u32	pm; /*  protected by dev_priv->irq_lock */
 	} irq_refcount;
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	u32		trace_irq_seqno;