diff mbox

[05/19] drm/i915/icl: Interrupt handling

Message ID 20180215162729.16034-1-mika.kuoppala@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mika Kuoppala Feb. 15, 2018, 4:27 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

v2: Rebase.

v3:
  * Remove DPF, it has been removed from SKL+.
  * Fix -internal rebase wrt. execlists interrupt handling.

v4: Rebase.

v5:
  * Updated for POR changes. (Daniele Ceraolo Spurio)
  * Merged with irq handling fixes by Daniele Ceraolo Spurio:
      * Simplify the code by using gen8_cs_irq_handler.
      * Fix interrupt handling for the upstream kernel.

v6:
  * Remove early bringup debug messages (Tvrtko)
  * Add NB about arbitrary spin wait timeout (Tvrtko)

v7 (from Paulo):
  * Don't try to write RO bits to registers.
  * Don't check for PCH types that don't exist. PCH interrupts are not
    here yet.

v9:
  * squashed in selector and shared register handling (Daniele)
  * skip writing of irq if data is not valid (Daniele)
  * use time_after32 (Chris)
  * use I915_MAX_VCS and I915_MAX_VECS (Daniele)
  * remove fake pm interrupt handling for later patch (Mika)

v10:
  * Direct processing of banks. clear banks early (Chris)
  * remove poll on valid bit, only clear valid bit (Mika)
  * use raw accessors, better naming (Chris)

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_irq.c | 222 ++++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_pm.c |   7 +-
 2 files changed, 228 insertions(+), 1 deletion(-)

Comments

Tvrtko Ursulin Feb. 15, 2018, 4:35 p.m. UTC | #1
On 15/02/2018 16:27, Mika Kuoppala wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

I think a point has arrived when I almost don't recognize the code any 
longer so would want to relinquish the authorship if you don't mind.

Regards,

Tvrtko


> v2: Rebase.
> 
> v3:
>    * Remove DPF, it has been removed from SKL+.
>    * Fix -internal rebase wrt. execlists interrupt handling.
> 
> v4: Rebase.
> 
> v5:
>    * Updated for POR changes. (Daniele Ceraolo Spurio)
>    * Merged with irq handling fixes by Daniele Ceraolo Spurio:
>        * Simplify the code by using gen8_cs_irq_handler.
>        * Fix interrupt handling for the upstream kernel.
> 
> v6:
>    * Remove early bringup debug messages (Tvrtko)
>    * Add NB about arbitrary spin wait timeout (Tvrtko)
> 
> v7 (from Paulo):
>    * Don't try to write RO bits to registers.
>    * Don't check for PCH types that don't exist. PCH interrupts are not
>      here yet.
> 
> v9:
>    * squashed in selector and shared register handling (Daniele)
>    * skip writing of irq if data is not valid (Daniele)
>    * use time_after32 (Chris)
>    * use I915_MAX_VCS and I915_MAX_VECS (Daniele)
>    * remove fake pm interrupt handling for later patch (Mika)
> 
> v10:
>    * Direct processing of banks. clear banks early (Chris)
>    * remove poll on valid bit, only clear valid bit (Mika)
>    * use raw accessors, better naming (Chris)
> 
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Oscar Mateo <oscar.mateo@intel.com>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
> Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
>   drivers/gpu/drm/i915/i915_irq.c | 222 ++++++++++++++++++++++++++++++++++++++++
>   drivers/gpu/drm/i915/intel_pm.c |   7 +-
>   2 files changed, 228 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index b886bd459acc..e3914d1e204e 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -415,6 +415,9 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
>   	if (READ_ONCE(rps->interrupts_enabled))
>   		return;
>   
> +	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
> +		return;
> +
>   	spin_lock_irq(&dev_priv->irq_lock);
>   	WARN_ON_ONCE(rps->pm_iir);
>   	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
> @@ -431,6 +434,9 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
>   	if (!READ_ONCE(rps->interrupts_enabled))
>   		return;
>   
> +	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
> +		return;
> +
>   	spin_lock_irq(&dev_priv->irq_lock);
>   	rps->interrupts_enabled = false;
>   
> @@ -2746,6 +2752,143 @@ static void __fini_wedge(struct wedge_me *w)
>   	     (W)->i915;							\
>   	     __fini_wedge((W)))
>   
> +static __always_inline void
> +gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir)
> +{
> +	gen8_cs_irq_handler(engine, iir, 0);
> +}
> +
> +static void
> +gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
> +			    const unsigned int bank,
> +			    const unsigned int engine_n,
> +			    const u16 iir)
> +{
> +	struct intel_engine_cs ** const engine = i915->engine;
> +
> +	switch (bank) {
> +	case 0:
> +		switch (engine_n) {
> +
> +		case GEN11_RCS0:
> +			return gen11_cs_irq_handler(engine[RCS], iir);
> +
> +		case GEN11_BCS:
> +			return gen11_cs_irq_handler(engine[BCS], iir);
> +		}
> +	case 1:
> +		switch (engine_n) {
> +
> +		case GEN11_VCS(0):
> +			return gen11_cs_irq_handler(engine[_VCS(0)], iir);
> +		case GEN11_VCS(1):
> +			return gen11_cs_irq_handler(engine[_VCS(1)], iir);
> +		case GEN11_VCS(2):
> +			return gen11_cs_irq_handler(engine[_VCS(2)], iir);
> +		case GEN11_VCS(3):
> +			return gen11_cs_irq_handler(engine[_VCS(3)], iir);
> +
> +		case GEN11_VECS(0):
> +			return gen11_cs_irq_handler(engine[_VECS(0)], iir);
> +		case GEN11_VECS(1):
> +			return gen11_cs_irq_handler(engine[_VECS(1)], iir);
> +		}
> +	}
> +}
> +
> +static u32
> +gen11_gt_engine_intr(struct drm_i915_private * const i915,
> +		     const unsigned int bank, const unsigned int bit)
> +{
> +	void __iomem * const regs = i915->regs;
> +	u32 ident;
> +
> +	writel(BIT(bit),
> +	       regs + i915_mmio_reg_offset(GEN11_IIR_REG_SELECTOR(bank)));
> +
> +	ident = readl(regs +
> +		      i915_mmio_reg_offset(GEN11_INTR_IDENTITY_REG(bank)));
> +
> +	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
> +		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
> +			  bank, bit, ident);
> +		return 0;
> +	}
> +
> +	writel(GEN11_INTR_DATA_VALID,
> +	       regs + i915_mmio_reg_offset(GEN11_INTR_IDENTITY_REG(bank)));
> +
> +	return ident & GEN11_INTR_ENGINE_MASK;
> +}
> +
> +static void
> +gen11_gt_irq_handler(struct drm_i915_private * const i915,
> +		     const u32 master_ctl)
> +{
> +	void __iomem * const regs = i915->regs;
> +	unsigned int bank;
> +
> +	for (bank = 0; bank < 2; bank++) {
> +		unsigned long intr_dw;
> +		unsigned int bit;
> +
> +		if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
> +			continue;
> +
> +		intr_dw = readl(regs +
> +				i915_mmio_reg_offset(GEN11_GT_INTR_DW(bank)));
> +
> +		if (unlikely(!intr_dw))
> +			DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
> +
> +		for_each_set_bit(bit, &intr_dw, 32) {
> +			const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
> +
> +			if (unlikely(!iir))
> +				continue;
> +
> +			gen11_gt_engine_irq_handler(i915, bank, bit, iir);
> +		}
> +
> +		/* Clear must be after shared has been served for engine */
> +		writel(intr_dw,
> +		       regs + i915_mmio_reg_offset(GEN11_GT_INTR_DW(bank)));
> +	}
> +}
> +
> +static irqreturn_t gen11_irq_handler(int irq, void *arg)
> +{
> +	struct drm_i915_private * const dev_priv =
> +		to_i915((struct drm_device *)arg);
> +	u32 master_ctl;
> +
> +	if (!intel_irqs_enabled(dev_priv))
> +		return IRQ_NONE;
> +
> +	master_ctl = I915_READ_FW(GEN11_GFX_MSTR_IRQ);
> +	master_ctl &= ~GEN11_MASTER_IRQ;
> +	if (!master_ctl)
> +		return IRQ_NONE;
> +
> +	/* Disable interrupts. */
> +	I915_WRITE_FW(GEN11_GFX_MSTR_IRQ, 0);
> +
> +	/* Find, clear, then process each source of interrupt. */
> +	gen11_gt_irq_handler(dev_priv, master_ctl);
> +
> +	if (master_ctl & GEN11_DISPLAY_IRQ) {
> +		disable_rpm_wakeref_asserts(dev_priv);
> +		gen8_de_irq_handler(dev_priv,
> +				    I915_READ_FW(GEN11_DISPLAY_INT_CTL));
> +		enable_rpm_wakeref_asserts(dev_priv);
> +	}
> +
> +	/* Acknowledge and enable interrupts. */
> +	I915_WRITE_FW(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
> +
> +	return IRQ_HANDLED;
> +}
> +
>   /**
>    * i915_reset_device - do process context error handling work
>    * @dev_priv: i915 device private
> @@ -3159,6 +3302,42 @@ static void gen8_irq_reset(struct drm_device *dev)
>   		ibx_irq_reset(dev_priv);
>   }
>   
> +static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
> +{
> +	/* Disable RCS, BCS, VCS and VECS class engines. */
> +	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
> +	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);
> +
> +	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
> +	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
> +	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
> +	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
> +	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
> +	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
> +}
> +
> +static void gen11_irq_reset(struct drm_device *dev)
> +{
> +	struct drm_i915_private *dev_priv = dev->dev_private;
> +	int pipe;
> +
> +	I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
> +	POSTING_READ(GEN11_GFX_MSTR_IRQ);
> +
> +	gen11_gt_irq_reset(dev_priv);
> +
> +	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
> +
> +	for_each_pipe(dev_priv, pipe)
> +		if (intel_display_power_is_enabled(dev_priv,
> +						   POWER_DOMAIN_PIPE(pipe)))
> +			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
> +
> +	GEN3_IRQ_RESET(GEN8_DE_PORT_);
> +	GEN3_IRQ_RESET(GEN8_DE_MISC_);
> +	GEN3_IRQ_RESET(GEN8_PCU_);
> +}
> +
>   void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
>   				     u8 pipe_mask)
>   {
> @@ -3656,6 +3835,41 @@ static int gen8_irq_postinstall(struct drm_device *dev)
>   	return 0;
>   }
>   
> +static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
> +{
> +	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
> +
> +	BUILD_BUG_ON(irqs & 0xffff0000);
> +
> +	/* Enable RCS, BCS, VCS and VECS class interrupts. */
> +	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
> +	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);
> +
> +	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
> +	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
> +	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
> +	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
> +	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
> +	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
> +
> +	dev_priv->pm_imr = 0xffffffff; /* TODO */
> +}
> +
> +static int gen11_irq_postinstall(struct drm_device *dev)
> +{
> +	struct drm_i915_private *dev_priv = dev->dev_private;
> +
> +	gen11_gt_irq_postinstall(dev_priv);
> +	gen8_de_irq_postinstall(dev_priv);
> +
> +	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
> +
> +	I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
> +	POSTING_READ(GEN11_GFX_MSTR_IRQ);
> +
> +	return 0;
> +}
> +
>   static int cherryview_irq_postinstall(struct drm_device *dev)
>   {
>   	struct drm_i915_private *dev_priv = to_i915(dev);
> @@ -4104,6 +4318,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
>   		dev->driver->enable_vblank = i965_enable_vblank;
>   		dev->driver->disable_vblank = i965_disable_vblank;
>   		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
> +	} else if (INTEL_GEN(dev_priv) >= 11) {
> +		dev->driver->irq_handler = gen11_irq_handler;
> +		dev->driver->irq_preinstall = gen11_irq_reset;
> +		dev->driver->irq_postinstall = gen11_irq_postinstall;
> +		dev->driver->irq_uninstall = gen11_irq_reset;
> +		dev->driver->enable_vblank = gen8_enable_vblank;
> +		dev->driver->disable_vblank = gen8_disable_vblank;
> +		dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
>   	} else if (INTEL_GEN(dev_priv) >= 8) {
>   		dev->driver->irq_handler = gen8_irq_handler;
>   		dev->driver->irq_preinstall = gen8_irq_reset;
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 939b8a41580c..996ab86100ee 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -8029,7 +8029,10 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
>   	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
>   	intel_disable_gt_powersave(dev_priv);
>   
> -	gen6_reset_rps_interrupts(dev_priv);
> +	if (INTEL_GEN(dev_priv) < 11)
> +		gen6_reset_rps_interrupts(dev_priv);
> +	else
> +		WARN_ON_ONCE(1);
>   }
>   
>   static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
> @@ -8142,6 +8145,8 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
>   		cherryview_enable_rps(dev_priv);
>   	} else if (IS_VALLEYVIEW(dev_priv)) {
>   		valleyview_enable_rps(dev_priv);
> +	} else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
> +		/* TODO */
>   	} else if (INTEL_GEN(dev_priv) >= 9) {
>   		gen9_enable_rps(dev_priv);
>   	} else if (IS_BROADWELL(dev_priv)) {
>
Daniele Ceraolo Spurio Feb. 15, 2018, 5:59 p.m. UTC | #2
<snip>

> +
> +static void
> +gen11_gt_irq_handler(struct drm_i915_private * const i915,
> +		     const u32 master_ctl)
> +{
> +	void __iomem * const regs = i915->regs;
> +	unsigned int bank;
> +
> +	for (bank = 0; bank < 2; bank++) {
> +		unsigned long intr_dw;
> +		unsigned int bit;
> +
> +		if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
> +			continue;
> +
> +		intr_dw = readl(regs +
> +				i915_mmio_reg_offset(GEN11_GT_INTR_DW(bank)));
> +

I think we should keep some kind of polling here, because there is going 
to be some delay between writing the selector and getting the correct 
value in the identity register and the CPU might also be running at a 
higher frequency than the GPU. Spec does not specify a time but implies 
that we have to wait for the valid bit to be set.

> +		if (unlikely(!intr_dw))
> +			DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
> +
> +		for_each_set_bit(bit, &intr_dw, 32) {
> +			const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
> +
> +			if (unlikely(!iir))
> +				continue;
> +
> +			gen11_gt_engine_irq_handler(i915, bank, bit, iir);

The identity register contains the class (bits 16-18) and instance (bits 
20-25), so we can potentially decode them and call directly:

	gen11_cs_irq_handler(i915->engine_class[class][instance], iir);

instead of having a big switch. The only problem with that would be that 
GuC and PM interrupts come out as class 4 instances 0 and 1, so once we 
add support for those we'd have to do something like

	if (class != OTHER_CLASS)
		gen11_cs_irq_handler(...);
	else {
		switch (instance) {
		case 0:
			gen11_guc_irq_handler(...);
			break;
		case 1:
			gen11_rps_irq_handler(...);
			break;
		default:
			MISSING_CASE();
		}
	}

not sure if decoding class and instance and having a small switch wins 
in the end against the big switch, so I won't complain whichever you pick ;)

Daniele

> +		}
> +
> +		/* Clear must be after shared has been served for engine */
> +		writel(intr_dw,
> +		       regs + i915_mmio_reg_offset(GEN11_GT_INTR_DW(bank)));
> +	}
> +}
> +
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b886bd459acc..e3914d1e204e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -415,6 +415,9 @@  void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 	if (READ_ONCE(rps->interrupts_enabled))
 		return;
 
+	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+		return;
+
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON_ONCE(rps->pm_iir);
 	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@@ -431,6 +434,9 @@  void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 	if (!READ_ONCE(rps->interrupts_enabled))
 		return;
 
+	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+		return;
+
 	spin_lock_irq(&dev_priv->irq_lock);
 	rps->interrupts_enabled = false;
 
@@ -2746,6 +2752,143 @@  static void __fini_wedge(struct wedge_me *w)
 	     (W)->i915;							\
 	     __fini_wedge((W)))
 
+static __always_inline void
+gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir)
+{
+	gen8_cs_irq_handler(engine, iir, 0);
+}
+
+static void
+gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
+			    const unsigned int bank,
+			    const unsigned int engine_n,
+			    const u16 iir)
+{
+	struct intel_engine_cs ** const engine = i915->engine;
+
+	switch (bank) {
+	case 0:
+		switch (engine_n) {
+
+		case GEN11_RCS0:
+			return gen11_cs_irq_handler(engine[RCS], iir);
+
+		case GEN11_BCS:
+			return gen11_cs_irq_handler(engine[BCS], iir);
+		}
+	case 1:
+		switch (engine_n) {
+
+		case GEN11_VCS(0):
+			return gen11_cs_irq_handler(engine[_VCS(0)], iir);
+		case GEN11_VCS(1):
+			return gen11_cs_irq_handler(engine[_VCS(1)], iir);
+		case GEN11_VCS(2):
+			return gen11_cs_irq_handler(engine[_VCS(2)], iir);
+		case GEN11_VCS(3):
+			return gen11_cs_irq_handler(engine[_VCS(3)], iir);
+
+		case GEN11_VECS(0):
+			return gen11_cs_irq_handler(engine[_VECS(0)], iir);
+		case GEN11_VECS(1):
+			return gen11_cs_irq_handler(engine[_VECS(1)], iir);
+		}
+	}
+}
+
+static u32
+gen11_gt_engine_intr(struct drm_i915_private * const i915,
+		     const unsigned int bank, const unsigned int bit)
+{
+	void __iomem * const regs = i915->regs;
+	u32 ident;
+
+	writel(BIT(bit),
+	       regs + i915_mmio_reg_offset(GEN11_IIR_REG_SELECTOR(bank)));
+
+	ident = readl(regs +
+		      i915_mmio_reg_offset(GEN11_INTR_IDENTITY_REG(bank)));
+
+	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+			  bank, bit, ident);
+		return 0;
+	}
+
+	writel(GEN11_INTR_DATA_VALID,
+	       regs + i915_mmio_reg_offset(GEN11_INTR_IDENTITY_REG(bank)));
+
+	return ident & GEN11_INTR_ENGINE_MASK;
+}
+
+static void
+gen11_gt_irq_handler(struct drm_i915_private * const i915,
+		     const u32 master_ctl)
+{
+	void __iomem * const regs = i915->regs;
+	unsigned int bank;
+
+	for (bank = 0; bank < 2; bank++) {
+		unsigned long intr_dw;
+		unsigned int bit;
+
+		if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
+			continue;
+
+		intr_dw = readl(regs +
+				i915_mmio_reg_offset(GEN11_GT_INTR_DW(bank)));
+
+		if (unlikely(!intr_dw))
+			DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
+
+		for_each_set_bit(bit, &intr_dw, 32) {
+			const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
+
+			if (unlikely(!iir))
+				continue;
+
+			gen11_gt_engine_irq_handler(i915, bank, bit, iir);
+		}
+
+		/* Clear must be after shared has been served for engine */
+		writel(intr_dw,
+		       regs + i915_mmio_reg_offset(GEN11_GT_INTR_DW(bank)));
+	}
+}
+
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+	struct drm_i915_private * const dev_priv =
+		to_i915((struct drm_device *)arg);
+	u32 master_ctl;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	master_ctl = I915_READ_FW(GEN11_GFX_MSTR_IRQ);
+	master_ctl &= ~GEN11_MASTER_IRQ;
+	if (!master_ctl)
+		return IRQ_NONE;
+
+	/* Disable interrupts. */
+	I915_WRITE_FW(GEN11_GFX_MSTR_IRQ, 0);
+
+	/* Find, clear, then process each source of interrupt. */
+	gen11_gt_irq_handler(dev_priv, master_ctl);
+
+	if (master_ctl & GEN11_DISPLAY_IRQ) {
+		disable_rpm_wakeref_asserts(dev_priv);
+		gen8_de_irq_handler(dev_priv,
+				    I915_READ_FW(GEN11_DISPLAY_INT_CTL));
+		enable_rpm_wakeref_asserts(dev_priv);
+	}
+
+	/* Acknowledge and enable interrupts. */
+	I915_WRITE_FW(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+
+	return IRQ_HANDLED;
+}
+
 /**
  * i915_reset_device - do process context error handling work
  * @dev_priv: i915 device private
@@ -3159,6 +3302,42 @@  static void gen8_irq_reset(struct drm_device *dev)
 		ibx_irq_reset(dev_priv);
 }
 
+static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+{
+	/* Disable RCS, BCS, VCS and VECS class engines. */
+	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
+	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);
+
+	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
+	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
+	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
+	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
+	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
+}
+
+static void gen11_irq_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
+	POSTING_READ(GEN11_GFX_MSTR_IRQ);
+
+	gen11_gt_irq_reset(dev_priv);
+
+	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+
+	for_each_pipe(dev_priv, pipe)
+		if (intel_display_power_is_enabled(dev_priv,
+						   POWER_DOMAIN_PIPE(pipe)))
+			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
+	GEN3_IRQ_RESET(GEN8_DE_PORT_);
+	GEN3_IRQ_RESET(GEN8_DE_MISC_);
+	GEN3_IRQ_RESET(GEN8_PCU_);
+}
+
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask)
 {
@@ -3656,6 +3835,41 @@  static int gen8_irq_postinstall(struct drm_device *dev)
 	return 0;
 }
 
+static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+
+	BUILD_BUG_ON(irqs & 0xffff0000);
+
+	/* Enable RCS, BCS, VCS and VECS class interrupts. */
+	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
+	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);
+
+	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
+	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
+	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
+	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
+	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
+
+	dev_priv->pm_imr = 0xffffffff; /* TODO */
+}
+
+static int gen11_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	gen11_gt_irq_postinstall(dev_priv);
+	gen8_de_irq_postinstall(dev_priv);
+
+	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+
+	I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+	POSTING_READ(GEN11_GFX_MSTR_IRQ);
+
+	return 0;
+}
+
 static int cherryview_irq_postinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4104,6 +4318,14 @@  void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev->driver->enable_vblank = i965_enable_vblank;
 		dev->driver->disable_vblank = i965_disable_vblank;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (INTEL_GEN(dev_priv) >= 11) {
+		dev->driver->irq_handler = gen11_irq_handler;
+		dev->driver->irq_preinstall = gen11_irq_reset;
+		dev->driver->irq_postinstall = gen11_irq_postinstall;
+		dev->driver->irq_uninstall = gen11_irq_reset;
+		dev->driver->enable_vblank = gen8_enable_vblank;
+		dev->driver->disable_vblank = gen8_disable_vblank;
+		dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
 	} else if (INTEL_GEN(dev_priv) >= 8) {
 		dev->driver->irq_handler = gen8_irq_handler;
 		dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 939b8a41580c..996ab86100ee 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8029,7 +8029,10 @@  void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
 	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
 	intel_disable_gt_powersave(dev_priv);
 
-	gen6_reset_rps_interrupts(dev_priv);
+	if (INTEL_GEN(dev_priv) < 11)
+		gen6_reset_rps_interrupts(dev_priv);
+	else
+		WARN_ON_ONCE(1);
 }
 
 static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
@@ -8142,6 +8145,8 @@  static void intel_enable_rps(struct drm_i915_private *dev_priv)
 		cherryview_enable_rps(dev_priv);
 	} else if (IS_VALLEYVIEW(dev_priv)) {
 		valleyview_enable_rps(dev_priv);
+	} else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
+		/* TODO */
 	} else if (INTEL_GEN(dev_priv) >= 9) {
 		gen9_enable_rps(dev_priv);
 	} else if (IS_BROADWELL(dev_priv)) {