diff mbox

[19/21] drm/i915: Move the get/put irq locking into the caller

Message ID 1464970133-29859-20-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 3, 2016, 4:08 p.m. UTC
With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
we can reduce the code size by moving the common preamble into the
caller, and we can also eliminate the reference counting.

For completeness, as we are no longer doing reference counting on irq,
rename the get/put vfunctions to enable/disable respectively.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_irq.c          |   8 +-
 drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
 drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
 drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
 5 files changed, 108 insertions(+), 218 deletions(-)

Comments

Tvrtko Ursulin June 7, 2016, 12:46 p.m. UTC | #1
On 03/06/16 17:08, Chris Wilson wrote:
> With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
> we can reduce the code size by moving the common preamble into the
> caller, and we can also eliminate the reference counting.
>
> For completeness, as we are no longer doing reference counting on irq,
> rename the get/put vfunctions to enable/disable respectively.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_irq.c          |   8 +-
>   drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
>   drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
>   drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
>   drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
>   5 files changed, 108 insertions(+), 218 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 14b3d65bb604..5bdb433dde8c 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
>   	dev_priv->gt_irq_mask &= ~interrupt_mask;
>   	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
>   	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> -	POSTING_READ(GTIMR);
>   }
>
>   void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
>   {
>   	ilk_update_gt_irq(dev_priv, mask, mask);
> +	POSTING_READ_FW(GTIMR);
>   }

Unrelated hunks?

How is POSTING_READ_FW correct?

Also removes the posting read from disable, OK?

>
>   void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
> @@ -2818,9 +2818,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
>   }
>
>   static bool
> -ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
> +ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
>   {
> -	if (INTEL_GEN(dev_priv) >= 8) {
> +	if (INTEL_GEN(engine->i915) >= 8) {
>   		return (ipehr >> 23) == 0x1c;
>   	} else {
>   		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
> @@ -2891,7 +2891,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
>   		return NULL;
>
>   	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
> -	if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
> +	if (!ipehr_is_semaphore_wait(engine, ipehr))
>   		return NULL;

Two hunks of meh as some would say. :)

>
>   	/*
> diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> index 8ab508ed4248..dc65a007fa20 100644
> --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> @@ -50,12 +50,18 @@ static void irq_enable(struct intel_engine_cs *engine)
>   	 * just in case.
>   	 */
>   	engine->irq_posted = true;
> -	WARN_ON(!engine->irq_get(engine));
> +
> +	spin_lock_irq(&engine->i915->irq_lock);
> +	engine->irq_enable(engine);
> +	spin_unlock_irq(&engine->i915->irq_lock);
>   }
>
>   static void irq_disable(struct intel_engine_cs *engine)
>   {
> -	engine->irq_put(engine);
> +	spin_lock_irq(&engine->i915->irq_lock);
> +	engine->irq_disable(engine);
> +	spin_unlock_irq(&engine->i915->irq_lock);
> +
>   	engine->irq_posted = false;
>   }
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 32b5eae7dd11..9e19b2c5b3ae 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -1578,36 +1578,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>   	return 0;
>   }
>
> -static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
> +static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
> -		return false;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0) {
> -		I915_WRITE_IMR(engine,
> -			       ~(engine->irq_enable_mask | engine->irq_keep_mask));
> -		POSTING_READ(RING_IMR(engine->mmio_base));
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> -
> -	return true;
> +	I915_WRITE_IMR(engine,
> +		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
> +	POSTING_READ_FW(RING_IMR(engine->mmio_base));

Hm, more of _FW following normal access. What am I missing? You are not 
by any chance banking on the auto-release window?

>   }
>
> -static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
> +static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0) {
> -		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
> -		POSTING_READ(RING_IMR(engine->mmio_base));
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);

Another posting read gone here?

>   }
>
>   static int gen8_emit_flush(struct drm_i915_gem_request *request,
> @@ -1895,8 +1877,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>   	engine->init_hw = gen8_init_common_ring;
>   	engine->emit_request = gen8_emit_request;
>   	engine->emit_flush = gen8_emit_flush;
> -	engine->irq_get = gen8_logical_ring_get_irq;
> -	engine->irq_put = gen8_logical_ring_put_irq;
> +	engine->irq_enable = gen8_logical_ring_enable_irq;
> +	engine->irq_disable = gen8_logical_ring_disable_irq;
>   	engine->emit_bb_start = gen8_emit_bb_start;
>   	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
>   		engine->irq_seqno_barrier = bxt_a_seqno_barrier;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 30e400d77d23..ba84b469f13f 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1551,103 +1551,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
>   	spin_unlock_irq(&dev_priv->uncore.lock);
>   }
>
> -static bool
> -gen5_ring_get_irq(struct intel_engine_cs *engine)
> +static void
> +gen5_ring_enable_irq(struct intel_engine_cs *engine)
>   {
> -	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
> -		return false;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0)
> -		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> -
> -	return true;
> +	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
>   }
>
>   static void
> -gen5_ring_put_irq(struct intel_engine_cs *engine)
> +gen5_ring_disable_irq(struct intel_engine_cs *engine)
>   {
> -	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0)
> -		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
>   }
>
> -static bool
> -i9xx_ring_get_irq(struct intel_engine_cs *engine)
> +static void
> +i9xx_ring_enable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	if (!intel_irqs_enabled(dev_priv))
> -		return false;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0) {
> -		dev_priv->irq_mask &= ~engine->irq_enable_mask;
> -		I915_WRITE(IMR, dev_priv->irq_mask);
> -		POSTING_READ(IMR);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
>
> -	return true;
> +	dev_priv->irq_mask &= ~engine->irq_enable_mask;
> +	I915_WRITE(IMR, dev_priv->irq_mask);
> +	POSTING_READ_FW(RING_IMR(engine->mmio_base));
>   }
>
>   static void
> -i9xx_ring_put_irq(struct intel_engine_cs *engine)
> +i9xx_ring_disable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0) {
> -		dev_priv->irq_mask |= engine->irq_enable_mask;
> -		I915_WRITE(IMR, dev_priv->irq_mask);
> -		POSTING_READ(IMR);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	dev_priv->irq_mask |= engine->irq_enable_mask;
> +	I915_WRITE(IMR, dev_priv->irq_mask);
>   }
>
> -static bool
> -i8xx_ring_get_irq(struct intel_engine_cs *engine)
> +static void
> +i8xx_ring_enable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	if (!intel_irqs_enabled(dev_priv))
> -		return false;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0) {
> -		dev_priv->irq_mask &= ~engine->irq_enable_mask;
> -		I915_WRITE16(IMR, dev_priv->irq_mask);
> -		POSTING_READ16(IMR);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> -
> -	return true;
> +	dev_priv->irq_mask &= ~engine->irq_enable_mask;
> +	I915_WRITE16(IMR, dev_priv->irq_mask);
> +	POSTING_READ16(RING_IMR(engine->mmio_base));
>   }
>
>   static void
> -i8xx_ring_put_irq(struct intel_engine_cs *engine)
> +i8xx_ring_disable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0) {
> -		dev_priv->irq_mask |= engine->irq_enable_mask;
> -		I915_WRITE16(IMR, dev_priv->irq_mask);
> -		POSTING_READ16(IMR);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	dev_priv->irq_mask |= engine->irq_enable_mask;
> +	I915_WRITE16(IMR, dev_priv->irq_mask);
>   }
>
>   static int
> @@ -1688,122 +1639,74 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>   	return 0;
>   }
>
> -static bool
> -gen6_ring_get_irq(struct intel_engine_cs *engine)
> +static void
> +gen6_ring_enable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
> -		return false;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0) {
> -		if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
> -			I915_WRITE_IMR(engine,
> -				       ~(engine->irq_enable_mask |
> -					 GT_PARITY_ERROR(dev_priv)));
> -		else
> -			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
> -		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> -
> -	return true;
> +	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
> +		I915_WRITE_IMR(engine,
> +			       ~(engine->irq_enable_mask |
> +				 GT_PARITY_ERROR(dev_priv)));
> +	else
> +		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
> +	gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
>   }
>
>   static void
> -gen6_ring_put_irq(struct intel_engine_cs *engine)
> +gen6_ring_disable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0) {
> -		if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
> -			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
> -		else
> -			I915_WRITE_IMR(engine, ~0);
> -		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
> +		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
> +	else
> +		I915_WRITE_IMR(engine, ~0);
> +	gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
>   }
>
> -static bool
> -hsw_vebox_get_irq(struct intel_engine_cs *engine)
> +static void
> +hsw_vebox_enable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
> -
> -	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
> -		return false;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0) {
> -		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
> -		gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> -
> -	return true;
> +	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
> +	gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
>   }
>
>   static void
> -hsw_vebox_put_irq(struct intel_engine_cs *engine)
> +hsw_vebox_disable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0) {
> -		I915_WRITE_IMR(engine, ~0);
> -		gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	I915_WRITE_IMR(engine, ~0);
> +	gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
>   }
>
> -static bool
> -gen8_ring_get_irq(struct intel_engine_cs *engine)
> +static void
> +gen8_ring_enable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
> -		return false;
> -
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (engine->irq_refcount++ == 0) {
> -		if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
> -			I915_WRITE_IMR(engine,
> -				       ~(engine->irq_enable_mask |
> -					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
> -		} else {
> -			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
> -		}
> -		POSTING_READ(RING_IMR(engine->mmio_base));
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> -
> -	return true;
> +	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
> +		I915_WRITE_IMR(engine,
> +			       ~(engine->irq_enable_mask |
> +				 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
> +	else
> +		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
> +	POSTING_READ_FW(RING_IMR(engine->mmio_base));
>   }
>
>   static void
> -gen8_ring_put_irq(struct intel_engine_cs *engine)
> +gen8_ring_disable_irq(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
> -	unsigned long flags;
>
> -	spin_lock_irqsave(&dev_priv->irq_lock, flags);
> -	if (--engine->irq_refcount == 0) {
> -		if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
> -			I915_WRITE_IMR(engine,
> -				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
> -		} else {
> -			I915_WRITE_IMR(engine, ~0);
> -		}
> -		POSTING_READ(RING_IMR(engine->mmio_base));
> -	}
> -	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
> +	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
> +		I915_WRITE_IMR(engine,
> +			       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
> +	else
> +		I915_WRITE_IMR(engine, ~0);
>   }
>
>   static int
> @@ -2739,8 +2642,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>   		engine->init_context = intel_rcs_ctx_init;
>   		engine->add_request = gen8_render_add_request;
>   		engine->flush = gen8_render_ring_flush;
> -		engine->irq_get = gen8_ring_get_irq;
> -		engine->irq_put = gen8_ring_put_irq;
> +		engine->irq_enable = gen8_ring_enable_irq;
> +		engine->irq_disable = gen8_ring_disable_irq;
>   		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
>   		if (i915_semaphore_is_enabled(dev_priv)) {
>   			WARN_ON(!dev_priv->semaphore_obj);
> @@ -2754,8 +2657,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>   		engine->flush = gen7_render_ring_flush;
>   		if (IS_GEN6(dev_priv))
>   			engine->flush = gen6_render_ring_flush;
> -		engine->irq_get = gen6_ring_get_irq;
> -		engine->irq_put = gen6_ring_put_irq;
> +		engine->irq_enable = gen6_ring_enable_irq;
> +		engine->irq_disable = gen6_ring_disable_irq;
>   		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
>   		engine->irq_seqno_barrier = gen6_seqno_barrier;
>   		if (i915_semaphore_is_enabled(dev_priv)) {
> @@ -2782,8 +2685,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>   	} else if (IS_GEN5(dev_priv)) {
>   		engine->add_request = i9xx_add_request;
>   		engine->flush = gen4_render_ring_flush;
> -		engine->irq_get = gen5_ring_get_irq;
> -		engine->irq_put = gen5_ring_put_irq;
> +		engine->irq_enable = gen5_ring_enable_irq;
> +		engine->irq_disable = gen5_ring_disable_irq;
>   		engine->irq_seqno_barrier = gen5_seqno_barrier;
>   		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
>   	} else {
> @@ -2793,11 +2696,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>   		else
>   			engine->flush = gen4_render_ring_flush;
>   		if (IS_GEN2(dev_priv)) {
> -			engine->irq_get = i8xx_ring_get_irq;
> -			engine->irq_put = i8xx_ring_put_irq;
> +			engine->irq_enable = i8xx_ring_enable_irq;
> +			engine->irq_disable = i8xx_ring_disable_irq;
>   		} else {
> -			engine->irq_get = i9xx_ring_get_irq;
> -			engine->irq_put = i9xx_ring_put_irq;
> +			engine->irq_enable = i9xx_ring_enable_irq;
> +			engine->irq_disable = i9xx_ring_disable_irq;
>   		}
>   		engine->irq_enable_mask = I915_USER_INTERRUPT;
>   	}
> @@ -2857,8 +2760,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
>   		if (INTEL_GEN(dev_priv) >= 8) {
>   			engine->irq_enable_mask =
>   				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
> -			engine->irq_get = gen8_ring_get_irq;
> -			engine->irq_put = gen8_ring_put_irq;
> +			engine->irq_enable = gen8_ring_enable_irq;
> +			engine->irq_disable = gen8_ring_disable_irq;
>   			engine->dispatch_execbuffer =
>   				gen8_ring_dispatch_execbuffer;
>   			if (i915_semaphore_is_enabled(dev_priv)) {
> @@ -2868,8 +2771,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
>   			}
>   		} else {
>   			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
> -			engine->irq_get = gen6_ring_get_irq;
> -			engine->irq_put = gen6_ring_put_irq;
> +			engine->irq_enable = gen6_ring_enable_irq;
> +			engine->irq_disable = gen6_ring_disable_irq;
>   			engine->dispatch_execbuffer =
>   				gen6_ring_dispatch_execbuffer;
>   			if (i915_semaphore_is_enabled(dev_priv)) {
> @@ -2893,13 +2796,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
>   		engine->add_request = i9xx_add_request;
>   		if (IS_GEN5(dev_priv)) {
>   			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
> -			engine->irq_get = gen5_ring_get_irq;
> -			engine->irq_put = gen5_ring_put_irq;
> +			engine->irq_enable = gen5_ring_enable_irq;
> +			engine->irq_disable = gen5_ring_disable_irq;
>   			engine->irq_seqno_barrier = gen5_seqno_barrier;
>   		} else {
>   			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
> -			engine->irq_get = i9xx_ring_get_irq;
> -			engine->irq_put = i9xx_ring_put_irq;
> +			engine->irq_enable = i9xx_ring_enable_irq;
> +			engine->irq_disable = i9xx_ring_disable_irq;
>   		}
>   		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
>   	}
> @@ -2928,8 +2831,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
>   	engine->irq_seqno_barrier = gen6_seqno_barrier;
>   	engine->irq_enable_mask =
>   			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
> -	engine->irq_get = gen8_ring_get_irq;
> -	engine->irq_put = gen8_ring_put_irq;
> +	engine->irq_enable = gen8_ring_enable_irq;
> +	engine->irq_disable = gen8_ring_disable_irq;
>   	engine->dispatch_execbuffer =
>   			gen8_ring_dispatch_execbuffer;
>   	if (i915_semaphore_is_enabled(dev_priv)) {
> @@ -2960,8 +2863,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
>   	if (INTEL_GEN(dev_priv) >= 8) {
>   		engine->irq_enable_mask =
>   			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
> -		engine->irq_get = gen8_ring_get_irq;
> -		engine->irq_put = gen8_ring_put_irq;
> +		engine->irq_enable = gen8_ring_enable_irq;
> +		engine->irq_disable = gen8_ring_disable_irq;
>   		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
>   		if (i915_semaphore_is_enabled(dev_priv)) {
>   			engine->semaphore.sync_to = gen8_ring_sync;
> @@ -2970,8 +2873,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
>   		}
>   	} else {
>   		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
> -		engine->irq_get = gen6_ring_get_irq;
> -		engine->irq_put = gen6_ring_put_irq;
> +		engine->irq_enable = gen6_ring_enable_irq;
> +		engine->irq_disable = gen6_ring_disable_irq;
>   		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
>   		if (i915_semaphore_is_enabled(dev_priv)) {
>   			engine->semaphore.signal = gen6_signal;
> @@ -3019,8 +2922,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
>   	if (INTEL_GEN(dev_priv) >= 8) {
>   		engine->irq_enable_mask =
>   			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
> -		engine->irq_get = gen8_ring_get_irq;
> -		engine->irq_put = gen8_ring_put_irq;
> +		engine->irq_enable = gen8_ring_enable_irq;
> +		engine->irq_disable = gen8_ring_disable_irq;
>   		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
>   		if (i915_semaphore_is_enabled(dev_priv)) {
>   			engine->semaphore.sync_to = gen8_ring_sync;
> @@ -3029,8 +2932,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
>   		}
>   	} else {
>   		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
> -		engine->irq_get = hsw_vebox_get_irq;
> -		engine->irq_put = hsw_vebox_put_irq;
> +		engine->irq_enable = hsw_vebox_enable_irq;
> +		engine->irq_disable = hsw_vebox_disable_irq;
>   		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
>   		if (i915_semaphore_is_enabled(dev_priv)) {
>   			engine->semaphore.sync_to = gen6_ring_sync;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 5f7cb3d0ea1c..182cae767bf1 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -201,11 +201,10 @@ struct intel_engine_cs {
>   	struct intel_hw_status_page status_page;
>   	struct i915_ctx_workarounds wa_ctx;
>
> -	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
>   	bool		irq_posted;
>   	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
> -	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
> -	void		(*irq_put)(struct intel_engine_cs *ring);
> +	void		(*irq_enable)(struct intel_engine_cs *ring);
> +	void		(*irq_disable)(struct intel_engine_cs *ring);
>
>   	int		(*init_hw)(struct intel_engine_cs *ring);
>
>

Some more instances of things I've already asked about.

Apart from those opens looks OK.

Regards,

Tvrtko
Chris Wilson June 8, 2016, 10:01 a.m. UTC | #2
On Tue, Jun 07, 2016 at 01:46:53PM +0100, Tvrtko Ursulin wrote:
> 
> On 03/06/16 17:08, Chris Wilson wrote:
> >With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
> >we can reduce the code size by moving the common preamble into the
> >caller, and we can also eliminate the reference counting.
> >
> >For completeness, as we are no longer doing reference counting on irq,
> >rename the get/put vfunctions to enable/disable respectively.
> >
> >Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >---
> >  drivers/gpu/drm/i915/i915_irq.c          |   8 +-
> >  drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
> >  drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
> >  drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
> >  drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
> >  5 files changed, 108 insertions(+), 218 deletions(-)
> >
> >diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> >index 14b3d65bb604..5bdb433dde8c 100644
> >--- a/drivers/gpu/drm/i915/i915_irq.c
> >+++ b/drivers/gpu/drm/i915/i915_irq.c
> >@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
> >  	dev_priv->gt_irq_mask &= ~interrupt_mask;
> >  	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
> >  	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> >-	POSTING_READ(GTIMR);
> >  }
> >
> >  void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
> >  {
> >  	ilk_update_gt_irq(dev_priv, mask, mask);
> >+	POSTING_READ_FW(GTIMR);
> >  }
> 
> Unrelated hunks?
> 
> How is POSTING_READ_FW correct?

The requirement here is an uncached read of the mmio register in order
to flush the previous write to hw. A grander scheme would be to convert
all posting reads, but that requires double checking to see if anyone
has been cheating!

> Also removes the posting read from disable, OK?

Correct, we only depend upon the ordering with hw on the enable path.
This is one of those rare instances where the barrier is required (and
UC write is not enough!), if we don't we check the "post-enabled seqno"
before the interrupt is ready.

> >+	I915_WRITE_IMR(engine,
> >+		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
> >+	POSTING_READ_FW(RING_IMR(engine->mmio_base));
> 
> Hm, more of _FW following normal access. What am I missing? You are
> not by any chance banking on the auto-release window?

Nope.
-Chris
Tvrtko Ursulin June 8, 2016, 10:18 a.m. UTC | #3
On 08/06/16 11:01, Chris Wilson wrote:
> On Tue, Jun 07, 2016 at 01:46:53PM +0100, Tvrtko Ursulin wrote:
>>
>> On 03/06/16 17:08, Chris Wilson wrote:
>>> With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
>>> we can reduce the code size by moving the common preamble into the
>>> caller, and we can also eliminate the reference counting.
>>>
>>> For completeness, as we are no longer doing reference counting on irq,
>>> rename the get/put vfunctions to enable/disable respectively.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> ---
>>>   drivers/gpu/drm/i915/i915_irq.c          |   8 +-
>>>   drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
>>>   drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
>>>   drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
>>>   drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
>>>   5 files changed, 108 insertions(+), 218 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
>>> index 14b3d65bb604..5bdb433dde8c 100644
>>> --- a/drivers/gpu/drm/i915/i915_irq.c
>>> +++ b/drivers/gpu/drm/i915/i915_irq.c
>>> @@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
>>>   	dev_priv->gt_irq_mask &= ~interrupt_mask;
>>>   	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
>>>   	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
>>> -	POSTING_READ(GTIMR);
>>>   }
>>>
>>>   void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
>>>   {
>>>   	ilk_update_gt_irq(dev_priv, mask, mask);
>>> +	POSTING_READ_FW(GTIMR);
>>>   }
>>
>> Unrelated hunks?
>>
>> How is POSTING_READ_FW correct?
>
> The requirement here is an uncached read of the mmio register in order
> to flush the previous write to hw. A grander scheme would be to convert
> all posting reads, but that requires double checking to see if anyone
> has been cheating!

So what prevents to force-wake for getting released between the 
I915_WRITE and POSTING_READ_FW ?

Regards,

Tvrtko
Chris Wilson June 8, 2016, 11:10 a.m. UTC | #4
On Wed, Jun 08, 2016 at 11:18:59AM +0100, Tvrtko Ursulin wrote:
> 
> On 08/06/16 11:01, Chris Wilson wrote:
> >On Tue, Jun 07, 2016 at 01:46:53PM +0100, Tvrtko Ursulin wrote:
> >>
> >>On 03/06/16 17:08, Chris Wilson wrote:
> >>>With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
> >>>we can reduce the code size by moving the common preamble into the
> >>>caller, and we can also eliminate the reference counting.
> >>>
> >>>For completeness, as we are no longer doing reference counting on irq,
> >>>rename the get/put vfunctions to enable/disable respectively.
> >>>
> >>>Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>>---
> >>>  drivers/gpu/drm/i915/i915_irq.c          |   8 +-
> >>>  drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
> >>>  drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
> >>>  drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
> >>>  drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
> >>>  5 files changed, 108 insertions(+), 218 deletions(-)
> >>>
> >>>diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> >>>index 14b3d65bb604..5bdb433dde8c 100644
> >>>--- a/drivers/gpu/drm/i915/i915_irq.c
> >>>+++ b/drivers/gpu/drm/i915/i915_irq.c
> >>>@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
> >>>  	dev_priv->gt_irq_mask &= ~interrupt_mask;
> >>>  	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
> >>>  	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> >>>-	POSTING_READ(GTIMR);
> >>>  }
> >>>
> >>>  void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
> >>>  {
> >>>  	ilk_update_gt_irq(dev_priv, mask, mask);
> >>>+	POSTING_READ_FW(GTIMR);
> >>>  }
> >>
> >>Unrelated hunks?
> >>
> >>How is POSTING_READ_FW correct?
> >
> >The requirement here is an uncached read of the mmio register in order
> >to flush the previous write to hw. A grander scheme would be to convert
> >all posting reads, but that requires double checking to see if anyone
> >has been cheating!
> 
> So what prevents to force-wake for getting released between the
> I915_WRITE and POSTING_READ_FW ?

Nothing. The point is that the FW is not required for the correctness or
operation of the POSTING_READ as a barrier to hardware enabling the
interrupt.
-Chris
Tvrtko Ursulin June 8, 2016, 11:49 a.m. UTC | #5
On 08/06/16 12:10, Chris Wilson wrote:
> On Wed, Jun 08, 2016 at 11:18:59AM +0100, Tvrtko Ursulin wrote:
>>
>> On 08/06/16 11:01, Chris Wilson wrote:
>>> On Tue, Jun 07, 2016 at 01:46:53PM +0100, Tvrtko Ursulin wrote:
>>>>
>>>> On 03/06/16 17:08, Chris Wilson wrote:
>>>>> With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
>>>>> we can reduce the code size by moving the common preamble into the
>>>>> caller, and we can also eliminate the reference counting.
>>>>>
>>>>> For completeness, as we are no longer doing reference counting on irq,
>>>>> rename the get/put vfunctions to enable/disable respectively.
>>>>>
>>>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>>>> ---
>>>>>   drivers/gpu/drm/i915/i915_irq.c          |   8 +-
>>>>>   drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
>>>>>   drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
>>>>>   drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
>>>>>   drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
>>>>>   5 files changed, 108 insertions(+), 218 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
>>>>> index 14b3d65bb604..5bdb433dde8c 100644
>>>>> --- a/drivers/gpu/drm/i915/i915_irq.c
>>>>> +++ b/drivers/gpu/drm/i915/i915_irq.c
>>>>> @@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
>>>>>   	dev_priv->gt_irq_mask &= ~interrupt_mask;
>>>>>   	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
>>>>>   	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
>>>>> -	POSTING_READ(GTIMR);
>>>>>   }
>>>>>
>>>>>   void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
>>>>>   {
>>>>>   	ilk_update_gt_irq(dev_priv, mask, mask);
>>>>> +	POSTING_READ_FW(GTIMR);
>>>>>   }
>>>>
>>>> Unrelated hunks?
>>>>
>>>> How is POSTING_READ_FW correct?
>>>
>>> The requirement here is an uncached read of the mmio register in order
>>> to flush the previous write to hw. A grander scheme would be to convert
>>> all posting reads, but that requires double checking to see if anyone
>>> has been cheating!
>>
>> So what prevents to force-wake for getting released between the
>> I915_WRITE and POSTING_READ_FW ?
>
> Nothing. The point is that the FW is not required for the correctness or
> operation of the POSTING_READ as a barrier to hardware enabling the
> interrupt.

So sleeping hardware is OK with being read from? It won't hang or 
anything, just provide bad data?

Why not change POSTING_READ to be I915_READ_FW always then?

Regards,

Tvrtko
Chris Wilson June 8, 2016, 12:54 p.m. UTC | #6
On Wed, Jun 08, 2016 at 12:49:14PM +0100, Tvrtko Ursulin wrote:
> 
> On 08/06/16 12:10, Chris Wilson wrote:
> >On Wed, Jun 08, 2016 at 11:18:59AM +0100, Tvrtko Ursulin wrote:
> >>
> >>On 08/06/16 11:01, Chris Wilson wrote:
> >>>On Tue, Jun 07, 2016 at 01:46:53PM +0100, Tvrtko Ursulin wrote:
> >>>>
> >>>>On 03/06/16 17:08, Chris Wilson wrote:
> >>>>>With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
> >>>>>we can reduce the code size by moving the common preamble into the
> >>>>>caller, and we can also eliminate the reference counting.
> >>>>>
> >>>>>For completeness, as we are no longer doing reference counting on irq,
> >>>>>rename the get/put vfunctions to enable/disable respectively.
> >>>>>
> >>>>>Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>>>>---
> >>>>>  drivers/gpu/drm/i915/i915_irq.c          |   8 +-
> >>>>>  drivers/gpu/drm/i915/intel_breadcrumbs.c |  10 +-
> >>>>>  drivers/gpu/drm/i915/intel_lrc.c         |  34 +---
> >>>>>  drivers/gpu/drm/i915/intel_ringbuffer.c  | 269 ++++++++++---------------------
> >>>>>  drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
> >>>>>  5 files changed, 108 insertions(+), 218 deletions(-)
> >>>>>
> >>>>>diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> >>>>>index 14b3d65bb604..5bdb433dde8c 100644
> >>>>>--- a/drivers/gpu/drm/i915/i915_irq.c
> >>>>>+++ b/drivers/gpu/drm/i915/i915_irq.c
> >>>>>@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
> >>>>>  	dev_priv->gt_irq_mask &= ~interrupt_mask;
> >>>>>  	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
> >>>>>  	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
> >>>>>-	POSTING_READ(GTIMR);
> >>>>>  }
> >>>>>
> >>>>>  void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
> >>>>>  {
> >>>>>  	ilk_update_gt_irq(dev_priv, mask, mask);
> >>>>>+	POSTING_READ_FW(GTIMR);
> >>>>>  }
> >>>>
> >>>>Unrelated hunks?
> >>>>
> >>>>How is POSTING_READ_FW correct?
> >>>
> >>>The requirement here is an uncached read of the mmio register in order
> >>>to flush the previous write to hw. A grander scheme would be to convert
> >>>all posting reads, but that requires double checking to see if anyone
> >>>has been cheating!
> >>
> >>So what prevents to force-wake for getting released between the
> >>I915_WRITE and POSTING_READ_FW ?
> >
> >Nothing. The point is that the FW is not required for the correctness or
> >operation of the POSTING_READ as a barrier to hardware enabling the
> >interrupt.
> 
> So sleeping hardware is OK with being read from? It won't hang or
> anything, just provide bad data?

Just garbage.
 
> Why not change POSTING_READ to be I915_READ_FW always then?

First plan was to purge all the posting-reads. That proves some are
required. So now, if it appears in a profile, it is asked to justify its
existence.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 14b3d65bb604..5bdb433dde8c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -259,12 +259,12 @@  static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
 	dev_priv->gt_irq_mask &= ~interrupt_mask;
 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-	POSTING_READ(GTIMR);
 }
 
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	ilk_update_gt_irq(dev_priv, mask, mask);
+	POSTING_READ_FW(GTIMR);
 }
 
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -2818,9 +2818,9 @@  ring_idle(struct intel_engine_cs *engine, u32 seqno)
 }
 
 static bool
-ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
 {
-	if (INTEL_GEN(dev_priv) >= 8) {
+	if (INTEL_GEN(engine->i915) >= 8) {
 		return (ipehr >> 23) == 0x1c;
 	} else {
 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2891,7 +2891,7 @@  semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 		return NULL;
 
 	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-	if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
+	if (!ipehr_is_semaphore_wait(engine, ipehr))
 		return NULL;
 
 	/*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 8ab508ed4248..dc65a007fa20 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -50,12 +50,18 @@  static void irq_enable(struct intel_engine_cs *engine)
 	 * just in case.
 	 */
 	engine->irq_posted = true;
-	WARN_ON(!engine->irq_get(engine));
+
+	spin_lock_irq(&engine->i915->irq_lock);
+	engine->irq_enable(engine);
+	spin_unlock_irq(&engine->i915->irq_lock);
 }
 
 static void irq_disable(struct intel_engine_cs *engine)
 {
-	engine->irq_put(engine);
+	spin_lock_irq(&engine->i915->irq_lock);
+	engine->irq_disable(engine);
+	spin_unlock_irq(&engine->i915->irq_lock);
+
 	engine->irq_posted = false;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 32b5eae7dd11..9e19b2c5b3ae 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1578,36 +1578,18 @@  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0) {
-		I915_WRITE_IMR(engine,
-			       ~(engine->irq_enable_mask | engine->irq_keep_mask));
-		POSTING_READ(RING_IMR(engine->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	I915_WRITE_IMR(engine,
+		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0) {
-		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
-		POSTING_READ(RING_IMR(engine->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1895,8 +1877,8 @@  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->init_hw = gen8_init_common_ring;
 	engine->emit_request = gen8_emit_request;
 	engine->emit_flush = gen8_emit_flush;
-	engine->irq_get = gen8_logical_ring_get_irq;
-	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->irq_enable = gen8_logical_ring_enable_irq;
+	engine->irq_disable = gen8_logical_ring_disable_irq;
 	engine->emit_bb_start = gen8_emit_bb_start;
 	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
 		engine->irq_seqno_barrier = bxt_a_seqno_barrier;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 30e400d77d23..ba84b469f13f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1551,103 +1551,54 @@  gen6_seqno_barrier(struct intel_engine_cs *engine)
 	spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen5_ring_enable_irq(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0)
-		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
+gen5_ring_disable_irq(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0)
-		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i9xx_ring_enable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~engine->irq_enable_mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-	return true;
+	dev_priv->irq_mask &= ~engine->irq_enable_mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_ring_disable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0) {
-		dev_priv->irq_mask |= engine->irq_enable_mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	dev_priv->irq_mask |= engine->irq_enable_mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
 }
 
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i8xx_ring_enable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	if (!intel_irqs_enabled(dev_priv))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~engine->irq_enable_mask;
-		I915_WRITE16(IMR, dev_priv->irq_mask);
-		POSTING_READ16(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	dev_priv->irq_mask &= ~engine->irq_enable_mask;
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+	POSTING_READ16(RING_IMR(engine->mmio_base));
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_ring_disable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0) {
-		dev_priv->irq_mask |= engine->irq_enable_mask;
-		I915_WRITE16(IMR, dev_priv->irq_mask);
-		POSTING_READ16(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	dev_priv->irq_mask |= engine->irq_enable_mask;
+	I915_WRITE16(IMR, dev_priv->irq_mask);
 }
 
 static int
@@ -1688,122 +1639,74 @@  i9xx_add_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_ring_enable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
-			I915_WRITE_IMR(engine,
-				       ~(engine->irq_enable_mask |
-					 GT_PARITY_ERROR(dev_priv)));
-		else
-			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+		I915_WRITE_IMR(engine,
+			       ~(engine->irq_enable_mask |
+				 GT_PARITY_ERROR(dev_priv)));
+	else
+		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+	gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_ring_disable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
-			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
-		else
-			I915_WRITE_IMR(engine, ~0);
-		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
+	else
+		I915_WRITE_IMR(engine, ~0);
+	gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_enable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0) {
-		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-		gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+	gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_disable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0) {
-		I915_WRITE_IMR(engine, ~0);
-		gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	I915_WRITE_IMR(engine, ~0);
+	gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_ring_enable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (engine->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
-			I915_WRITE_IMR(engine,
-				       ~(engine->irq_enable_mask |
-					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-		} else {
-			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-		}
-		POSTING_READ(RING_IMR(engine->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+		I915_WRITE_IMR(engine,
+			       ~(engine->irq_enable_mask |
+				 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+	else
+		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+	POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_ring_disable_irq(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--engine->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
-			I915_WRITE_IMR(engine,
-				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-		} else {
-			I915_WRITE_IMR(engine, ~0);
-		}
-		POSTING_READ(RING_IMR(engine->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+		I915_WRITE_IMR(engine,
+			       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	else
+		I915_WRITE_IMR(engine, ~0);
 }
 
 static int
@@ -2739,8 +2642,8 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		engine->init_context = intel_rcs_ctx_init;
 		engine->add_request = gen8_render_add_request;
 		engine->flush = gen8_render_ring_flush;
-		engine->irq_get = gen8_ring_get_irq;
-		engine->irq_put = gen8_ring_put_irq;
+		engine->irq_enable = gen8_ring_enable_irq;
+		engine->irq_disable = gen8_ring_disable_irq;
 		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		if (i915_semaphore_is_enabled(dev_priv)) {
 			WARN_ON(!dev_priv->semaphore_obj);
@@ -2754,8 +2657,8 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		engine->flush = gen7_render_ring_flush;
 		if (IS_GEN6(dev_priv))
 			engine->flush = gen6_render_ring_flush;
-		engine->irq_get = gen6_ring_get_irq;
-		engine->irq_put = gen6_ring_put_irq;
+		engine->irq_enable = gen6_ring_enable_irq;
+		engine->irq_disable = gen6_ring_disable_irq;
 		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		engine->irq_seqno_barrier = gen6_seqno_barrier;
 		if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2782,8 +2685,8 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 	} else if (IS_GEN5(dev_priv)) {
 		engine->add_request = i9xx_add_request;
 		engine->flush = gen4_render_ring_flush;
-		engine->irq_get = gen5_ring_get_irq;
-		engine->irq_put = gen5_ring_put_irq;
+		engine->irq_enable = gen5_ring_enable_irq;
+		engine->irq_disable = gen5_ring_disable_irq;
 		engine->irq_seqno_barrier = gen5_seqno_barrier;
 		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 	} else {
@@ -2793,11 +2696,11 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		else
 			engine->flush = gen4_render_ring_flush;
 		if (IS_GEN2(dev_priv)) {
-			engine->irq_get = i8xx_ring_get_irq;
-			engine->irq_put = i8xx_ring_put_irq;
+			engine->irq_enable = i8xx_ring_enable_irq;
+			engine->irq_disable = i8xx_ring_disable_irq;
 		} else {
-			engine->irq_get = i9xx_ring_get_irq;
-			engine->irq_put = i9xx_ring_put_irq;
+			engine->irq_enable = i9xx_ring_enable_irq;
+			engine->irq_disable = i9xx_ring_disable_irq;
 		}
 		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
@@ -2857,8 +2760,8 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 		if (INTEL_GEN(dev_priv) >= 8) {
 			engine->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-			engine->irq_get = gen8_ring_get_irq;
-			engine->irq_put = gen8_ring_put_irq;
+			engine->irq_enable = gen8_ring_enable_irq;
+			engine->irq_disable = gen8_ring_disable_irq;
 			engine->dispatch_execbuffer =
 				gen8_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2868,8 +2771,8 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			}
 		} else {
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-			engine->irq_get = gen6_ring_get_irq;
-			engine->irq_put = gen6_ring_put_irq;
+			engine->irq_enable = gen6_ring_enable_irq;
+			engine->irq_disable = gen6_ring_disable_irq;
 			engine->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2893,13 +2796,13 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 		engine->add_request = i9xx_add_request;
 		if (IS_GEN5(dev_priv)) {
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-			engine->irq_get = gen5_ring_get_irq;
-			engine->irq_put = gen5_ring_put_irq;
+			engine->irq_enable = gen5_ring_enable_irq;
+			engine->irq_disable = gen5_ring_disable_irq;
 			engine->irq_seqno_barrier = gen5_seqno_barrier;
 		} else {
 			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-			engine->irq_get = i9xx_ring_get_irq;
-			engine->irq_put = i9xx_ring_put_irq;
+			engine->irq_enable = i9xx_ring_enable_irq;
+			engine->irq_disable = i9xx_ring_disable_irq;
 		}
 		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
 	}
@@ -2928,8 +2831,8 @@  int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	engine->irq_seqno_barrier = gen6_seqno_barrier;
 	engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-	engine->irq_get = gen8_ring_get_irq;
-	engine->irq_put = gen8_ring_put_irq;
+	engine->irq_enable = gen8_ring_enable_irq;
+	engine->irq_disable = gen8_ring_disable_irq;
 	engine->dispatch_execbuffer =
 			gen8_ring_dispatch_execbuffer;
 	if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2960,8 +2863,8 @@  int intel_init_blt_ring_buffer(struct drm_device *dev)
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-		engine->irq_get = gen8_ring_get_irq;
-		engine->irq_put = gen8_ring_put_irq;
+		engine->irq_enable = gen8_ring_enable_irq;
+		engine->irq_disable = gen8_ring_disable_irq;
 		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen8_ring_sync;
@@ -2970,8 +2873,8 @@  int intel_init_blt_ring_buffer(struct drm_device *dev)
 		}
 	} else {
 		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-		engine->irq_get = gen6_ring_get_irq;
-		engine->irq_put = gen6_ring_put_irq;
+		engine->irq_enable = gen6_ring_enable_irq;
+		engine->irq_disable = gen6_ring_disable_irq;
 		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.signal = gen6_signal;
@@ -3019,8 +2922,8 @@  int intel_init_vebox_ring_buffer(struct drm_device *dev)
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-		engine->irq_get = gen8_ring_get_irq;
-		engine->irq_put = gen8_ring_put_irq;
+		engine->irq_enable = gen8_ring_enable_irq;
+		engine->irq_disable = gen8_ring_disable_irq;
 		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen8_ring_sync;
@@ -3029,8 +2932,8 @@  int intel_init_vebox_ring_buffer(struct drm_device *dev)
 		}
 	} else {
 		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-		engine->irq_get = hsw_vebox_get_irq;
-		engine->irq_put = hsw_vebox_put_irq;
+		engine->irq_enable = hsw_vebox_enable_irq;
+		engine->irq_disable = hsw_vebox_disable_irq;
 		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen6_ring_sync;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5f7cb3d0ea1c..182cae767bf1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -201,11 +201,10 @@  struct intel_engine_cs {
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
 
-	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	bool		irq_posted;
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
-	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
-	void		(*irq_put)(struct intel_engine_cs *ring);
+	void		(*irq_enable)(struct intel_engine_cs *ring);
+	void		(*irq_disable)(struct intel_engine_cs *ring);
 
 	int		(*init_hw)(struct intel_engine_cs *ring);