diff mbox series

[RFC,2/8] drm/i915: Move IRQ related stuff from intel_rps to the new intel_irq.

Message ID 20190418205347.6402-3-rodrigo.vivi@intel.com (mailing list archive)
State New, archived
Headers show
Series Start some re-org around IRQ. | expand

Commit Message

Rodrigo Vivi April 18, 2019, 8:53 p.m. UTC
The plan is to consolidate all IRQ related stuff together
under the new intel_irq.

So let's continue with RPS stuff.

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  8 ++-----
 drivers/gpu/drm/i915/i915_irq.c | 41 ++++++++++++++++++---------------
 2 files changed, 24 insertions(+), 25 deletions(-)

Comments

Chris Wilson April 18, 2019, 9:15 p.m. UTC | #1
Quoting Rodrigo Vivi (2019-04-18 21:53:41)
> The plan is to consolidate all IRQ related stuff together
> under the new intel_irq.
> 
> So let's continue with RPS stuff.

Nah, this is the wrong direction as we don't have to pull this under a
more global lock but push it under a local one. If you are touching this
code, you could review the bug fixes first... :)
-Chris
Rodrigo Vivi April 18, 2019, 10:48 p.m. UTC | #2
On Thu, Apr 18, 2019 at 10:15:55PM +0100, Chris Wilson wrote:
> Quoting Rodrigo Vivi (2019-04-18 21:53:41)
> > The plan is to consolidate all IRQ related stuff together
> > under the new intel_irq.
> > 
> > So let's continue with RPS stuff.
> 
> Nah, this is the wrong direction

you mean just this rps moving to intel_irq?
or the whole idea of intel_irq is bad in your opinion?

> as we don't have to pull this under a
> more global lock but push it under a local one.

yeap, I noticed this big lock around everything and
that we need to reduce that indeed...


> If you are touching this
> code, you could review the bug fixes first... :)

:(

Thanks a lot,
Rodrigo.

> -Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0b4aa818d66b..06617a67002c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -650,16 +650,12 @@  struct intel_rps_ei {
 struct intel_irq {
 	/* protects the irq masks */
 	spinlock_t lock;
+	bool rps_interrupts_enabled;
+	u32 pm_iir;
 };
 
 struct intel_rps {
-	/*
-	 * work, interrupts_enabled and pm_iir are protected by
-	 * dev_priv->irq.lock
-	 */
 	struct work_struct work;
-	bool interrupts_enabled;
-	u32 pm_iir;
 
 	/* PM interrupt bits that should never be masked */
 	u32 pm_intrmsk_mbz;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 679dc63244d9..487ea27ea152 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -519,7 +519,7 @@  void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
 		;
 
-	dev_priv->gt_pm.rps.pm_iir = 0;
+	dev_priv->irq.pm_iir = 0;
 
 	spin_unlock_irq(&dev_priv->irq.lock);
 }
@@ -528,46 +528,47 @@  void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	spin_lock_irq(&dev_priv->irq.lock);
 	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
-	dev_priv->gt_pm.rps.pm_iir = 0;
+	dev_priv->irq.pm_iir = 0;
 	spin_unlock_irq(&dev_priv->irq.lock);
 }
 
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_irq *irq = &dev_priv->irq;
 
-	if (READ_ONCE(rps->interrupts_enabled))
+	if (READ_ONCE(irq->rps_interrupts_enabled))
 		return;
 
-	spin_lock_irq(&dev_priv->irq.lock);
-	WARN_ON_ONCE(rps->pm_iir);
+	spin_lock_irq(&irq->lock);
+	WARN_ON_ONCE(irq->pm_iir);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
 	else
 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
 
-	rps->interrupts_enabled = true;
+	irq->rps_interrupts_enabled = true;
 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
-	spin_unlock_irq(&dev_priv->irq.lock);
+	spin_unlock_irq(&irq->lock);
 }
 
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_irq *irq = &dev_priv->irq;
 
-	if (!READ_ONCE(rps->interrupts_enabled))
+	if (!READ_ONCE(irq->rps_interrupts_enabled))
 		return;
 
-	spin_lock_irq(&dev_priv->irq.lock);
-	rps->interrupts_enabled = false;
+	spin_lock_irq(&irq->lock);
+	irq->rps_interrupts_enabled = false;
 
 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
 	gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
 
-	spin_unlock_irq(&dev_priv->irq.lock);
+	spin_unlock_irq(&irq->lock);
 	synchronize_irq(dev_priv->drm.irq);
 
 	/* Now that we will not be generating any more work, flush any
@@ -1290,8 +1291,8 @@  static void gen6_pm_rps_work(struct work_struct *work)
 	u32 pm_iir = 0;
 
 	spin_lock_irq(&dev_priv->irq.lock);
-	if (rps->interrupts_enabled) {
-		pm_iir = fetch_and_zero(&rps->pm_iir);
+	if (dev_priv->irq.rps_interrupts_enabled) {
+		pm_iir = fetch_and_zero(&dev_priv->irq.pm_iir);
 		client_boost = atomic_read(&rps->num_waiters);
 	}
 	spin_unlock_irq(&dev_priv->irq.lock);
@@ -1372,7 +1373,7 @@  static void gen6_pm_rps_work(struct work_struct *work)
 out:
 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
 	spin_lock_irq(&dev_priv->irq.lock);
-	if (rps->interrupts_enabled)
+	if (dev_priv->irq.rps_interrupts_enabled)
 		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	spin_unlock_irq(&dev_priv->irq.lock);
 }
@@ -1843,6 +1844,7 @@  static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
 {
 	struct intel_rps *rps = &i915->gt_pm.rps;
+	struct intel_irq *irq = &i915->irq;
 	const u32 events = i915->pm_rps_events & pm_iir;
 
 	lockdep_assert_held(&i915->irq.lock);
@@ -1852,22 +1854,23 @@  static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
 
 	gen6_mask_pm_irq(i915, events);
 
-	if (!rps->interrupts_enabled)
+	if (!irq->rps_interrupts_enabled)
 		return;
 
-	rps->pm_iir |= events;
+	irq->pm_iir |= events;
 	schedule_work(&rps->work);
 }
 
 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_irq *irq = &dev_priv->irq;
 
 	if (pm_iir & dev_priv->pm_rps_events) {
 		spin_lock(&dev_priv->irq.lock);
 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
-		if (rps->interrupts_enabled) {
-			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
+		if (irq->rps_interrupts_enabled) {
+			irq->pm_iir |= pm_iir & dev_priv->pm_rps_events;
 			schedule_work(&rps->work);
 		}
 		spin_unlock(&dev_priv->irq.lock);