diff mbox

[08/18] drm/i915: Create a more generic pm handler for hsw+

Message ID 1369794154-1639-9-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky May 29, 2013, 2:22 a.m. UTC
HSW has some special requirements for the VEBOX. Splitting out the
interrupt handler will make the code a bit nicer and less error prone
when we begin to handle those.

The slight functional change in this patch (queueing work while holding
the spinlock) is intentional as it makes a subsequent patch a bit nicer.
The change should also only effect HSW platforms.

Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_irq.c | 30 +++++++++++++++++++++++++++++-
 1 file changed, 29 insertions(+), 1 deletion(-)

Comments

Daniel Vetter May 29, 2013, 7:19 p.m. UTC | #1
On Tue, May 28, 2013 at 07:22:24PM -0700, Ben Widawsky wrote:
> HSW has some special requirements for the VEBOX. Splitting out the
> interrupt handler will make the code a bit nicer and less error prone
> when we begin to handle those.
> 
> The slight functional change in this patch (queueing work while holding
> the spinlock) is intentional as it makes a subsequent patch a bit nicer.
> The change should also only effect HSW platforms.
> 
> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> ---
>  drivers/gpu/drm/i915/i915_irq.c | 30 +++++++++++++++++++++++++++++-
>  1 file changed, 29 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 557acd3..c7b51c2 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -842,6 +842,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
>  		ivybridge_handle_parity_error(dev);
>  }
>  
> +/* Legacy way of handling PM interrupts */
>  static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
>  				u32 pm_iir)
>  {
> @@ -921,6 +922,31 @@ static void dp_aux_irq_handler(struct drm_device *dev)
>  	wake_up_all(&dev_priv->gmbus_wait_queue);
>  }
>  
> +/* Unlike gen6_queue_rps_work() from which this function is originally derived,
> + * we must be able to deal with other PM interrupts. This is complicated because
> + * of the way in which we use the masks to defer the RPS work (which for
> + * posterity is necessary because of forcewake).
> + */
> +static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
> +			       u32 pm_iir)
> +{
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&dev_priv->rps.lock, flags);
> +	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_DEFERRED_EVENTS;
> +	if (dev_priv->rps.pm_iir) {
> +		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
> +		/* never want to mask useful interrupts. (also posting read) */
> +		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_DEFERRED_EVENTS);
> +		/* TODO: if queue_work is slow, move it out of the spinlock */
> +		queue_work(dev_priv->wq, &dev_priv->rps.work);

Not happy how hsw and gen6 rps queueing now differs ever so slightly. It's
rather completely irrelevant where we actually queue the work since:
- interrupt handlers are non-reentrant
- queue_work is irq safe anyway.
So smells like cargo-culting too me.

So I've killed your TODO and moved it out. If there's indeed a good reason
later on we can reconsider.
-Daniel

> +	}
> +	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
> +
> +	if (pm_iir & ~GEN6_PM_DEFERRED_EVENTS)
> +		DRM_ERROR("Unexpected PM interrupted\n");
> +}
> +
>  static irqreturn_t valleyview_irq_handler(int irq, void *arg)
>  {
>  	struct drm_device *dev = (struct drm_device *) arg;
> @@ -1231,7 +1257,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
>  
>  	pm_iir = I915_READ(GEN6_PMIIR);
>  	if (pm_iir) {
> -		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
> +		if (IS_HASWELL(dev))
> +			hsw_pm_irq_handler(dev_priv, pm_iir);
> +		else if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
>  			gen6_queue_rps_work(dev_priv, pm_iir);
>  		I915_WRITE(GEN6_PMIIR, pm_iir);
>  		ret = IRQ_HANDLED;
> -- 
> 1.8.3
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Daniel Vetter May 31, 2013, 6:25 p.m. UTC | #2
On Wed, May 29, 2013 at 09:19:25PM +0200, Daniel Vetter wrote:
> On Tue, May 28, 2013 at 07:22:24PM -0700, Ben Widawsky wrote:
> > HSW has some special requirements for the VEBOX. Splitting out the
> > interrupt handler will make the code a bit nicer and less error prone
> > when we begin to handle those.
> > 
> > The slight functional change in this patch (queueing work while holding
> > the spinlock) is intentional as it makes a subsequent patch a bit nicer.
> > The change should also only effect HSW platforms.
> > 
> > Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> > ---
> >  drivers/gpu/drm/i915/i915_irq.c | 30 +++++++++++++++++++++++++++++-
> >  1 file changed, 29 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> > index 557acd3..c7b51c2 100644
> > --- a/drivers/gpu/drm/i915/i915_irq.c
> > +++ b/drivers/gpu/drm/i915/i915_irq.c
> > @@ -842,6 +842,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
> >  		ivybridge_handle_parity_error(dev);
> >  }
> >  
> > +/* Legacy way of handling PM interrupts */
> >  static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
> >  				u32 pm_iir)
> >  {
> > @@ -921,6 +922,31 @@ static void dp_aux_irq_handler(struct drm_device *dev)
> >  	wake_up_all(&dev_priv->gmbus_wait_queue);
> >  }
> >  
> > +/* Unlike gen6_queue_rps_work() from which this function is originally derived,
> > + * we must be able to deal with other PM interrupts. This is complicated because
> > + * of the way in which we use the masks to defer the RPS work (which for
> > + * posterity is necessary because of forcewake).
> > + */
> > +static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
> > +			       u32 pm_iir)
> > +{
> > +	unsigned long flags;
> > +
> > +	spin_lock_irqsave(&dev_priv->rps.lock, flags);
> > +	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_DEFERRED_EVENTS;
> > +	if (dev_priv->rps.pm_iir) {
> > +		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
> > +		/* never want to mask useful interrupts. (also posting read) */
> > +		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_DEFERRED_EVENTS);
> > +		/* TODO: if queue_work is slow, move it out of the spinlock */
> > +		queue_work(dev_priv->wq, &dev_priv->rps.work);
> 
> Not happy how hsw and gen6 rps queueing now differs ever so slightly. It's
> rather completely irrelevant where we actually queue the work since:
> - interrupt handlers are non-reentrant
> - queue_work is irq safe anyway.
> So smells like cargo-culting too me.
> 
> So I've killed your TODO and moved it out. If there's indeed a good reason
> later on we can reconsider.

Dropped this change again since it affects correctness later on.
-Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 557acd3..c7b51c2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -842,6 +842,7 @@  static void snb_gt_irq_handler(struct drm_device *dev,
 		ivybridge_handle_parity_error(dev);
 }
 
+/* Legacy way of handling PM interrupts */
 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 				u32 pm_iir)
 {
@@ -921,6 +922,31 @@  static void dp_aux_irq_handler(struct drm_device *dev)
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+ * we must be able to deal with other PM interrupts. This is complicated because
+ * of the way in which we use the masks to defer the RPS work (which for
+ * posterity is necessary because of forcewake).
+ */
+static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
+			       u32 pm_iir)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_DEFERRED_EVENTS;
+	if (dev_priv->rps.pm_iir) {
+		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+		/* never want to mask useful interrupts. (also posting read) */
+		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_DEFERRED_EVENTS);
+		/* TODO: if queue_work is slow, move it out of the spinlock */
+		queue_work(dev_priv->wq, &dev_priv->rps.work);
+	}
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+	if (pm_iir & ~GEN6_PM_DEFERRED_EVENTS)
+		DRM_ERROR("Unexpected PM interrupted\n");
+}
+
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -1231,7 +1257,9 @@  static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
 	pm_iir = I915_READ(GEN6_PMIIR);
 	if (pm_iir) {
-		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+		if (IS_HASWELL(dev))
+			hsw_pm_irq_handler(dev_priv, pm_iir);
+		else if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
 			gen6_queue_rps_work(dev_priv, pm_iir);
 		I915_WRITE(GEN6_PMIIR, pm_iir);
 		ret = IRQ_HANDLED;