diff mbox

drm/i915: irq handlers don't need interrupt-safe spinlocks

Message ID 1372163273-21183-1-git-send-email-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show

Commit Message

Daniel Vetter June 25, 2013, 12:27 p.m. UTC
Since we only have one interrupt handler and interrupt handlers are
non-reentrant.

To drive the point really home give them all an _irq_handler suffix.

This is a tiny micro-optimization but even more important it makes it
clearer what locking we actually need. And in case someone screws this
up: lockdep will catch hardirq vs. other context deadlocks.

v2: Fix up compile fail.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_irq.c | 42 ++++++++++++++++++-----------------------
 1 file changed, 18 insertions(+), 24 deletions(-)

Comments

Paulo Zanoni June 27, 2013, 9:14 p.m. UTC | #1
2013/6/25 Daniel Vetter <daniel.vetter@ffwll.ch>:
> Since we only have one interrupt handler and interrupt handlers are
> non-reentrant.
>
> To drive the point really home give them all an _irq_handler suffix.

Could we also add WARN(!in_irq()) or something equivalent for better
protection? Big backtraces are a nice way to discover we did something
wrong.

Besides the suggestion, the patch looks correct.

>
> This is a tiny micro-optimization but even more important it makes it
> clearer what locking we actually need. And in case someone screws this
> up: lockdep will catch hardirq vs. other context deadlocks.
>
> v2: Fix up compile fail.
>
> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> ---
>  drivers/gpu/drm/i915/i915_irq.c | 42 ++++++++++++++++++-----------------------
>  1 file changed, 18 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 3e6524a..6906b0f 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -646,14 +646,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
>                 drm_kms_helper_hotplug_event(dev);
>  }
>
> -static void ironlake_handle_rps_change(struct drm_device *dev)
> +static void ironlake_rps_change_irq_handler(struct drm_device *dev)
>  {
>         drm_i915_private_t *dev_priv = dev->dev_private;
>         u32 busy_up, busy_down, max_avg, min_avg;
>         u8 new_delay;
> -       unsigned long flags;
>
> -       spin_lock_irqsave(&mchdev_lock, flags);
> +       spin_lock(&mchdev_lock);
>
>         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
>
> @@ -681,7 +680,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
>         if (ironlake_set_drps(dev, new_delay))
>                 dev_priv->ips.cur_delay = new_delay;
>
> -       spin_unlock_irqrestore(&mchdev_lock, flags);
> +       spin_unlock(&mchdev_lock);
>
>         return;
>  }
> @@ -817,18 +816,17 @@ static void ivybridge_parity_work(struct work_struct *work)
>         kfree(parity_event[1]);
>  }
>
> -static void ivybridge_handle_parity_error(struct drm_device *dev)
> +static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
>  {
>         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
> -       unsigned long flags;
>
>         if (!HAS_L3_GPU_CACHE(dev))
>                 return;
>
> -       spin_lock_irqsave(&dev_priv->irq_lock, flags);
> +       spin_lock(&dev_priv->irq_lock);
>         dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
>         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> -       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +       spin_unlock(&dev_priv->irq_lock);
>
>         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
>  }
> @@ -854,15 +852,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
>         }
>
>         if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
> -               ivybridge_handle_parity_error(dev);
> +               ivybridge_parity_error_irq_handler(dev);
>  }
>
>  /* Legacy way of handling PM interrupts */
> -static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
> -                               u32 pm_iir)
> +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
> +                                u32 pm_iir)
>  {
> -       unsigned long flags;
> -
>         /*
>          * IIR bits should never already be set because IMR should
>          * prevent an interrupt from being shown in IIR. The warning
> @@ -873,11 +869,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
>          * The mask bit in IMR is cleared by dev_priv->rps.work.
>          */
>
> -       spin_lock_irqsave(&dev_priv->rps.lock, flags);
> +       spin_lock(&dev_priv->rps.lock);
>         dev_priv->rps.pm_iir |= pm_iir;
>         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
>         POSTING_READ(GEN6_PMIMR);
> -       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
> +       spin_unlock(&dev_priv->rps.lock);
>
>         queue_work(dev_priv->wq, &dev_priv->rps.work);
>  }
> @@ -941,7 +937,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
>         wake_up_all(&dev_priv->gmbus_wait_queue);
>  }
>
> -/* Unlike gen6_queue_rps_work() from which this function is originally derived,
> +/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
>   * we must be able to deal with other PM interrupts. This is complicated because
>   * of the way in which we use the masks to defer the RPS work (which for
>   * posterity is necessary because of forcewake).
> @@ -949,9 +945,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
>  static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
>                                u32 pm_iir)
>  {
> -       unsigned long flags;
> -
> -       spin_lock_irqsave(&dev_priv->rps.lock, flags);
> +       spin_lock(&dev_priv->rps.lock);
>         dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
>         if (dev_priv->rps.pm_iir) {
>                 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
> @@ -960,7 +954,7 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
>                 /* TODO: if queue_work is slow, move it out of the spinlock */
>                 queue_work(dev_priv->wq, &dev_priv->rps.work);
>         }
> -       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
> +       spin_unlock(&dev_priv->rps.lock);
>
>         if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
>                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
> @@ -1042,7 +1036,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
>                         gmbus_irq_handler(dev);
>
>                 if (pm_iir & GEN6_PM_RPS_EVENTS)
> -                       gen6_queue_rps_work(dev_priv, pm_iir);
> +                       gen6_rps_irq_handler(dev_priv, pm_iir);
>
>                 I915_WRITE(GTIIR, gt_iir);
>                 I915_WRITE(GEN6_PMIIR, pm_iir);
> @@ -1280,7 +1274,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
>                 if (IS_HASWELL(dev))
>                         hsw_pm_irq_handler(dev_priv, pm_iir);
>                 else if (pm_iir & GEN6_PM_RPS_EVENTS)
> -                       gen6_queue_rps_work(dev_priv, pm_iir);
> +                       gen6_rps_irq_handler(dev_priv, pm_iir);
>                 I915_WRITE(GEN6_PMIIR, pm_iir);
>                 ret = IRQ_HANDLED;
>         }
> @@ -1397,10 +1391,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
>         }
>
>         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
> -               ironlake_handle_rps_change(dev);
> +               ironlake_rps_change_irq_handler(dev);
>
>         if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
> -               gen6_queue_rps_work(dev_priv, pm_iir);
> +               gen6_rps_irq_handler(dev_priv, pm_iir);
>
>         I915_WRITE(GTIIR, gt_iir);
>         I915_WRITE(DEIIR, de_iir);
> --
> 1.8.1.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Daniel Vetter June 27, 2013, 10:40 p.m. UTC | #2
On Thu, Jun 27, 2013 at 11:14 PM, Paulo Zanoni <przanoni@gmail.com> wrote:
> 2013/6/25 Daniel Vetter <daniel.vetter@ffwll.ch>:
>> Since we only have one interrupt handler and interrupt handlers are
>> non-reentrant.
>>
>> To drive the point really home give them all an _irq_handler suffix.
>
> Could we also add WARN(!in_irq()) or something equivalent for better
> protection? Big backtraces are a nice way to discover we did something
> wrong.

Lockdep checks hard/soft irq context constraints and will scream
horribly into dmesg if we get it wrong. So I don't think a in_irq
warning will add any value. Aside: hard/softirq isn't the only special
context lockdep checks for, it also checks for memory allocations,
e.g. if you hold a lock while calling kmalloc and your shrinker needs
the same locks it'll scream. That's why we have all the GFP_NORETRY
stuff when allocating memory and the trylock in the shrinker.
-Daniel
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
Paulo Zanoni June 28, 2013, 4:57 p.m. UTC | #3
2013/6/27 Daniel Vetter <daniel.vetter@ffwll.ch>:
> On Thu, Jun 27, 2013 at 11:14 PM, Paulo Zanoni <przanoni@gmail.com> wrote:
>> 2013/6/25 Daniel Vetter <daniel.vetter@ffwll.ch>:
>>> Since we only have one interrupt handler and interrupt handlers are
>>> non-reentrant.
>>>
>>> To drive the point really home give them all an _irq_handler suffix.
>>
>> Could we also add WARN(!in_irq()) or something equivalent for better
>> protection? Big backtraces are a nice way to discover we did something
>> wrong.
>
> Lockdep checks hard/soft irq context constraints and will scream
> horribly into dmesg if we get it wrong. So I don't think a in_irq
> warning will add any value. Aside: hard/softirq isn't the only special
> context lockdep checks for, it also checks for memory allocations,
> e.g. if you hold a lock while calling kmalloc and your shrinker needs
> the same locks it'll scream. That's why we have all the GFP_NORETRY
> stuff when allocating memory and the trylock in the shrinker.

Ok then. Looks like I forgot the stamp: Reviewed-by: Paulo Zanoni
<paulo.r.zanoni@intel.com>

> -Daniel
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3e6524a..6906b0f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -646,14 +646,13 @@  static void i915_hotplug_work_func(struct work_struct *work)
 		drm_kms_helper_hotplug_event(dev);
 }
 
-static void ironlake_handle_rps_change(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 busy_up, busy_down, max_avg, min_avg;
 	u8 new_delay;
-	unsigned long flags;
 
-	spin_lock_irqsave(&mchdev_lock, flags);
+	spin_lock(&mchdev_lock);
 
 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
@@ -681,7 +680,7 @@  static void ironlake_handle_rps_change(struct drm_device *dev)
 	if (ironlake_set_drps(dev, new_delay))
 		dev_priv->ips.cur_delay = new_delay;
 
-	spin_unlock_irqrestore(&mchdev_lock, flags);
+	spin_unlock(&mchdev_lock);
 
 	return;
 }
@@ -817,18 +816,17 @@  static void ivybridge_parity_work(struct work_struct *work)
 	kfree(parity_event[1]);
 }
 
-static void ivybridge_handle_parity_error(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long flags;
 
 	if (!HAS_L3_GPU_CACHE(dev))
 		return;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	spin_lock(&dev_priv->irq_lock);
 	dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_unlock(&dev_priv->irq_lock);
 
 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
@@ -854,15 +852,13 @@  static void snb_gt_irq_handler(struct drm_device *dev,
 	}
 
 	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-		ivybridge_handle_parity_error(dev);
+		ivybridge_parity_error_irq_handler(dev);
 }
 
 /* Legacy way of handling PM interrupts */
-static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
-				u32 pm_iir)
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
+				 u32 pm_iir)
 {
-	unsigned long flags;
-
 	/*
 	 * IIR bits should never already be set because IMR should
 	 * prevent an interrupt from being shown in IIR. The warning
@@ -873,11 +869,11 @@  static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 	 * The mask bit in IMR is cleared by dev_priv->rps.work.
 	 */
 
-	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	spin_lock(&dev_priv->rps.lock);
 	dev_priv->rps.pm_iir |= pm_iir;
 	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 	POSTING_READ(GEN6_PMIMR);
-	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+	spin_unlock(&dev_priv->rps.lock);
 
 	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
@@ -941,7 +937,7 @@  static void dp_aux_irq_handler(struct drm_device *dev)
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
-/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
  * we must be able to deal with other PM interrupts. This is complicated because
  * of the way in which we use the masks to defer the RPS work (which for
  * posterity is necessary because of forcewake).
@@ -949,9 +945,7 @@  static void dp_aux_irq_handler(struct drm_device *dev)
 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 pm_iir)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	spin_lock(&dev_priv->rps.lock);
 	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
 	if (dev_priv->rps.pm_iir) {
 		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
@@ -960,7 +954,7 @@  static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
 		/* TODO: if queue_work is slow, move it out of the spinlock */
 		queue_work(dev_priv->wq, &dev_priv->rps.work);
 	}
-	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+	spin_unlock(&dev_priv->rps.lock);
 
 	if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
@@ -1042,7 +1036,7 @@  static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 			gmbus_irq_handler(dev);
 
 		if (pm_iir & GEN6_PM_RPS_EVENTS)
-			gen6_queue_rps_work(dev_priv, pm_iir);
+			gen6_rps_irq_handler(dev_priv, pm_iir);
 
 		I915_WRITE(GTIIR, gt_iir);
 		I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1280,7 +1274,7 @@  static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 		if (IS_HASWELL(dev))
 			hsw_pm_irq_handler(dev_priv, pm_iir);
 		else if (pm_iir & GEN6_PM_RPS_EVENTS)
-			gen6_queue_rps_work(dev_priv, pm_iir);
+			gen6_rps_irq_handler(dev_priv, pm_iir);
 		I915_WRITE(GEN6_PMIIR, pm_iir);
 		ret = IRQ_HANDLED;
 	}
@@ -1397,10 +1391,10 @@  static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 	}
 
 	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
-		ironlake_handle_rps_change(dev);
+		ironlake_rps_change_irq_handler(dev);
 
 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
-		gen6_queue_rps_work(dev_priv, pm_iir);
+		gen6_rps_irq_handler(dev_priv, pm_iir);
 
 	I915_WRITE(GTIIR, gt_iir);
 	I915_WRITE(DEIIR, de_iir);