diff mbox

[18/24] drm/i915: unify ring irq refcounts (again)

Message ID 1371037046-3732-19-git-send-email-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show

Commit Message

Daniel Vetter June 12, 2013, 11:37 a.m. UTC
With the simplified locking there's no reason any more to keep the
refcounts seperate.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 20 ++++++++++----------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 +----
 2 files changed, 11 insertions(+), 14 deletions(-)

Comments

Ben Widawsky June 28, 2013, 5:24 p.m. UTC | #1
On Wed, Jun 12, 2013 at 01:37:20PM +0200, Daniel Vetter wrote:
> With the simplified locking there's no reason any more to keep the
> refcounts seperate.

I guess my nitpick from previous patch is echoed here. The reason still
exists, the benefit just doesn't outweigh the complexity, and I think
we'd want separate enable_masks for pm/gt to make it worthwhile to use
separate refcounts.

Anyway, can you add a comment in the code about what irq_refcount does.
with that, it's
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>

> 
> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> ---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 20 ++++++++++----------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  5 +----
>  2 files changed, 11 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index a7c9934..b75e9d0 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -819,7 +819,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
>  		return false;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (ring->irq_refcount.gt++ == 0) {
> +	if (ring->irq_refcount++ == 0) {
>  		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
>  		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
>  		POSTING_READ(GTIMR);
> @@ -837,7 +837,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--ring->irq_refcount.gt == 0) {
> +	if (--ring->irq_refcount == 0) {
>  		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
>  		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
>  		POSTING_READ(GTIMR);
> @@ -856,7 +856,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
>  		return false;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (ring->irq_refcount.gt++ == 0) {
> +	if (ring->irq_refcount++ == 0) {
>  		dev_priv->irq_mask &= ~ring->irq_enable_mask;
>  		I915_WRITE(IMR, dev_priv->irq_mask);
>  		POSTING_READ(IMR);
> @@ -874,7 +874,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--ring->irq_refcount.gt == 0) {
> +	if (--ring->irq_refcount == 0) {
>  		dev_priv->irq_mask |= ring->irq_enable_mask;
>  		I915_WRITE(IMR, dev_priv->irq_mask);
>  		POSTING_READ(IMR);
> @@ -893,7 +893,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
>  		return false;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (ring->irq_refcount.gt++ == 0) {
> +	if (ring->irq_refcount++ == 0) {
>  		dev_priv->irq_mask &= ~ring->irq_enable_mask;
>  		I915_WRITE16(IMR, dev_priv->irq_mask);
>  		POSTING_READ16(IMR);
> @@ -911,7 +911,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--ring->irq_refcount.gt == 0) {
> +	if (--ring->irq_refcount == 0) {
>  		dev_priv->irq_mask |= ring->irq_enable_mask;
>  		I915_WRITE16(IMR, dev_priv->irq_mask);
>  		POSTING_READ16(IMR);
> @@ -1004,7 +1004,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
>  	gen6_gt_force_wake_get(dev_priv);
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (ring->irq_refcount.gt++ == 0) {
> +	if (ring->irq_refcount++ == 0) {
>  		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
>  			I915_WRITE_IMR(ring,
>  				       ~(ring->irq_enable_mask |
> @@ -1028,7 +1028,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--ring->irq_refcount.gt == 0) {
> +	if (--ring->irq_refcount == 0) {
>  		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
>  			I915_WRITE_IMR(ring,
>  				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
> @@ -1054,7 +1054,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
>  		return false;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (ring->irq_refcount.pm++ == 0) {
> +	if (ring->irq_refcount++ == 0) {
>  		u32 pm_imr = I915_READ(GEN6_PMIMR);
>  		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
>  		I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
> @@ -1076,7 +1076,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
>  		return;
>  
>  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--ring->irq_refcount.pm == 0) {
> +	if (--ring->irq_refcount == 0) {
>  		u32 pm_imr = I915_READ(GEN6_PMIMR);
>  		I915_WRITE_IMR(ring, ~0);
>  		I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index f960805..26e304c 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -74,10 +74,7 @@ struct  intel_ring_buffer {
>  	 */
>  	u32		last_retired_head;
>  
> -	struct {
> -		u32	gt; /*  protected by dev_priv->irq_lock */
> -		u32	pm; /*  protected by dev_priv->irq_lock */
> -	} irq_refcount;
> +	unsigned irq_refcount;
>  	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
>  	u32		trace_irq_seqno;
>  	u32		sync_seqno[I915_NUM_RINGS-1];
> -- 
> 1.8.1.4
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Daniel Vetter July 4, 2013, 8:52 p.m. UTC | #2
On Fri, Jun 28, 2013 at 10:24:32AM -0700, Ben Widawsky wrote:
> On Wed, Jun 12, 2013 at 01:37:20PM +0200, Daniel Vetter wrote:
> > With the simplified locking there's no reason any more to keep the
> > refcounts seperate.
> 
> I guess my nitpick from previous patch is echoed here. The reason still
> exists, the benefit just doesn't outweigh the complexity, and I think
> we'd want separate enable_masks for pm/gt to make it worthwhile to use
> separate refcounts.

Imo it's clear enough that ring->irq_refcount is a refcount for the ring
interrupt. I've added the lost comment how it is protected by the device
irq_lock again as compensation.
-Daniel

> 
> Anyway, can you add a comment in the code about what irq_refcount does.
> with that, it's
> Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
> 
> > 
> > Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > ---
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 20 ++++++++++----------
> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  5 +----
> >  2 files changed, 11 insertions(+), 14 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index a7c9934..b75e9d0 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -819,7 +819,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
> >  		return false;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (ring->irq_refcount.gt++ == 0) {
> > +	if (ring->irq_refcount++ == 0) {
> >  		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
> >  		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> >  		POSTING_READ(GTIMR);
> > @@ -837,7 +837,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
> >  	unsigned long flags;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (--ring->irq_refcount.gt == 0) {
> > +	if (--ring->irq_refcount == 0) {
> >  		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
> >  		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> >  		POSTING_READ(GTIMR);
> > @@ -856,7 +856,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
> >  		return false;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (ring->irq_refcount.gt++ == 0) {
> > +	if (ring->irq_refcount++ == 0) {
> >  		dev_priv->irq_mask &= ~ring->irq_enable_mask;
> >  		I915_WRITE(IMR, dev_priv->irq_mask);
> >  		POSTING_READ(IMR);
> > @@ -874,7 +874,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
> >  	unsigned long flags;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (--ring->irq_refcount.gt == 0) {
> > +	if (--ring->irq_refcount == 0) {
> >  		dev_priv->irq_mask |= ring->irq_enable_mask;
> >  		I915_WRITE(IMR, dev_priv->irq_mask);
> >  		POSTING_READ(IMR);
> > @@ -893,7 +893,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
> >  		return false;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (ring->irq_refcount.gt++ == 0) {
> > +	if (ring->irq_refcount++ == 0) {
> >  		dev_priv->irq_mask &= ~ring->irq_enable_mask;
> >  		I915_WRITE16(IMR, dev_priv->irq_mask);
> >  		POSTING_READ16(IMR);
> > @@ -911,7 +911,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
> >  	unsigned long flags;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (--ring->irq_refcount.gt == 0) {
> > +	if (--ring->irq_refcount == 0) {
> >  		dev_priv->irq_mask |= ring->irq_enable_mask;
> >  		I915_WRITE16(IMR, dev_priv->irq_mask);
> >  		POSTING_READ16(IMR);
> > @@ -1004,7 +1004,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
> >  	gen6_gt_force_wake_get(dev_priv);
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (ring->irq_refcount.gt++ == 0) {
> > +	if (ring->irq_refcount++ == 0) {
> >  		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
> >  			I915_WRITE_IMR(ring,
> >  				       ~(ring->irq_enable_mask |
> > @@ -1028,7 +1028,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
> >  	unsigned long flags;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (--ring->irq_refcount.gt == 0) {
> > +	if (--ring->irq_refcount == 0) {
> >  		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
> >  			I915_WRITE_IMR(ring,
> >  				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
> > @@ -1054,7 +1054,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
> >  		return false;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (ring->irq_refcount.pm++ == 0) {
> > +	if (ring->irq_refcount++ == 0) {
> >  		u32 pm_imr = I915_READ(GEN6_PMIMR);
> >  		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
> >  		I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
> > @@ -1076,7 +1076,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
> >  		return;
> >  
> >  	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> > -	if (--ring->irq_refcount.pm == 0) {
> > +	if (--ring->irq_refcount == 0) {
> >  		u32 pm_imr = I915_READ(GEN6_PMIMR);
> >  		I915_WRITE_IMR(ring, ~0);
> >  		I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > index f960805..26e304c 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > @@ -74,10 +74,7 @@ struct  intel_ring_buffer {
> >  	 */
> >  	u32		last_retired_head;
> >  
> > -	struct {
> > -		u32	gt; /*  protected by dev_priv->irq_lock */
> > -		u32	pm; /*  protected by dev_priv->irq_lock */
> > -	} irq_refcount;
> > +	unsigned irq_refcount;
> >  	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
> >  	u32		trace_irq_seqno;
> >  	u32		sync_seqno[I915_NUM_RINGS-1];
> > -- 
> > 1.8.1.4
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Ben Widawsky, Intel Open Source Technology Center
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a7c9934..b75e9d0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -819,7 +819,7 @@  gen5_ring_get_irq(struct intel_ring_buffer *ring)
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount.gt++ == 0) {
+	if (ring->irq_refcount++ == 0) {
 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
@@ -837,7 +837,7 @@  gen5_ring_put_irq(struct intel_ring_buffer *ring)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount.gt == 0) {
+	if (--ring->irq_refcount == 0) {
 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
@@ -856,7 +856,7 @@  i9xx_ring_get_irq(struct intel_ring_buffer *ring)
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount.gt++ == 0) {
+	if (ring->irq_refcount++ == 0) {
 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
@@ -874,7 +874,7 @@  i9xx_ring_put_irq(struct intel_ring_buffer *ring)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount.gt == 0) {
+	if (--ring->irq_refcount == 0) {
 		dev_priv->irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
@@ -893,7 +893,7 @@  i8xx_ring_get_irq(struct intel_ring_buffer *ring)
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount.gt++ == 0) {
+	if (ring->irq_refcount++ == 0) {
 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE16(IMR, dev_priv->irq_mask);
 		POSTING_READ16(IMR);
@@ -911,7 +911,7 @@  i8xx_ring_put_irq(struct intel_ring_buffer *ring)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount.gt == 0) {
+	if (--ring->irq_refcount == 0) {
 		dev_priv->irq_mask |= ring->irq_enable_mask;
 		I915_WRITE16(IMR, dev_priv->irq_mask);
 		POSTING_READ16(IMR);
@@ -1004,7 +1004,7 @@  gen6_ring_get_irq(struct intel_ring_buffer *ring)
 	gen6_gt_force_wake_get(dev_priv);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount.gt++ == 0) {
+	if (ring->irq_refcount++ == 0) {
 		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring,
 				       ~(ring->irq_enable_mask |
@@ -1028,7 +1028,7 @@  gen6_ring_put_irq(struct intel_ring_buffer *ring)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount.gt == 0) {
+	if (--ring->irq_refcount == 0) {
 		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring,
 				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
@@ -1054,7 +1054,7 @@  hsw_vebox_get_irq(struct intel_ring_buffer *ring)
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount.pm++ == 0) {
+	if (ring->irq_refcount++ == 0) {
 		u32 pm_imr = I915_READ(GEN6_PMIMR);
 		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 		I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
@@ -1076,7 +1076,7 @@  hsw_vebox_put_irq(struct intel_ring_buffer *ring)
 		return;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount.pm == 0) {
+	if (--ring->irq_refcount == 0) {
 		u32 pm_imr = I915_READ(GEN6_PMIMR);
 		I915_WRITE_IMR(ring, ~0);
 		I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f960805..26e304c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -74,10 +74,7 @@  struct  intel_ring_buffer {
 	 */
 	u32		last_retired_head;
 
-	struct {
-		u32	gt; /*  protected by dev_priv->irq_lock */
-		u32	pm; /*  protected by dev_priv->irq_lock */
-	} irq_refcount;
+	unsigned irq_refcount;
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	u32		trace_irq_seqno;
 	u32		sync_seqno[I915_NUM_RINGS-1];