@@ -1584,10 +1584,6 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
- u32 pm_imr;
- u32 pm_ier;
- u32 pm_rps_events;
- u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -204,7 +204,6 @@ static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
POSTING_READ16(type##IMR); \
} while (0)
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
@@ -343,220 +342,6 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0);
}
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
-{
- WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
-
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
-}
-
-static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_MASK;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IMR(2);
- else
- return GEN6_PMIMR;
-}
-
-static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IER(2);
- else
- return GEN6_PMIER;
-}
-
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
-{
- uint32_t new_val;
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- new_val = dev_priv->pm_imr;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != dev_priv->pm_imr) {
- dev_priv->pm_imr = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
- POSTING_READ(gen6_pm_imr(dev_priv));
- }
-}
-
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- snb_update_pm_irq(dev_priv, mask, mask);
-}
-
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- snb_update_pm_irq(dev_priv, mask, 0);
-}
-
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- __gen6_mask_pm_irq(dev_priv, mask);
-}
-
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
-{
- i915_reg_t reg = gen6_pm_iir(dev_priv);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- I915_WRITE(reg, reset_mask);
- I915_WRITE(reg, reset_mask);
- POSTING_READ(reg);
-}
-
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- dev_priv->pm_ier |= enable_mask;
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
- gen6_unmask_pm_irq(dev_priv, enable_mask);
- /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
-}
-
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- dev_priv->pm_ier &= ~disable_mask;
- __gen6_mask_pm_irq(dev_priv, disable_mask);
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
- /* though a barrier is missing here, but don't really need a one */
-}
-
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
-
- while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
- ;
-
- dev_priv->gt_pm.rps.pm_iir = 0;
-
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
- dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON_ONCE(rps->pm_iir);
-
- if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
- else
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-
- rps->interrupts_enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (!READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&dev_priv->irq_lock);
- rps->interrupts_enabled = false;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
-
- /* Now that we will not be generating any more work, flush any
- * outstanding tasks. As we are called on the RPS idle path,
- * we will reset the GPU to minimum frequencies, so the current
- * state of the worker can be discarded.
- */
- cancel_work_sync(&rps->work);
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- assert_rpm_wakelock_held(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- assert_rpm_wakelock_held(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts_enabled) {
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
- dev_priv->pm_guc_events);
- dev_priv->guc.interrupts_enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
- }
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- assert_rpm_wakelock_held(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts_enabled = false;
-
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
-
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
-
- gen9_reset_guc_interrupts(dev_priv);
-}
-
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -1385,12 +1170,13 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, u32 master_ctl)
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(iir & (i915->pm_rps_events | i915->pm_guc_events))) {
+ if (likely(iir & (i915->gt_pm.rps.pm_events |
+ i915->gt_pm.rps.guc_events))) {
raw_reg_write(regs, GEN8_GT_IIR(2),
- iir & (i915->pm_rps_events |
- i915->pm_guc_events));
+ iir & (i915->gt_pm.rps.pm_events |
+ i915->gt_pm.rps.guc_events));
- gen6_rps_irq_handler(i915, iir);
+ intel_gt_pm_irq_handler(i915, iir);
gen9_guc_irq_handler(i915, iir);
}
}
@@ -1642,35 +1428,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
res1, res2);
}
-/* The RPS events need forcewake, so we add them to a work queue and mask their
- * IMR bits until the work is done. Other interrupts can be processed without
- * the work queue. */
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&dev_priv->irq_lock);
- gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- if (rps->interrupts_enabled) {
- rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&rps->work);
- }
- spin_unlock(&dev_priv->irq_lock);
- }
-
- if (INTEL_GEN(dev_priv) >= 8)
- return;
-
- if (HAS_VEBOX(dev_priv)) {
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(dev_priv->engine[VECS]);
-
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
- }
-}
-
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
{
if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
@@ -1878,6 +1635,19 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
+static void gen6_pm_extra_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ if (HAS_VEBOX(dev_priv)) {
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ notify_ring(dev_priv->engine[VECS]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n",
+ pm_iir);
+ }
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -1952,7 +1722,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ intel_gt_pm_irq_handler(dev_priv, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2401,7 +2171,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ intel_gt_pm_irq_handler(dev_priv, pm_iir);
+ gen6_pm_extra_irq_handler(dev_priv, pm_iir);
}
}
@@ -2694,7 +2465,7 @@ gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
if (instance == OTHER_GTPM_INSTANCE)
- return gen6_rps_irq_handler(i915, iir);
+ return intel_gt_pm_irq_handler(i915, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -3604,11 +3375,11 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
*/
if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->gt_pm.ier |= PM_VEBOX_USER_INTERRUPT;
}
- dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ dev_priv->gt_pm.imr = 0xffffffff;
+ GEN3_IRQ_INIT(GEN6_PM, dev_priv->gt_pm.imr, pm_irqs);
}
}
@@ -3730,15 +3501,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
+ dev_priv->gt_pm.ier = 0x0;
+ dev_priv->gt_pm.imr = ~dev_priv->gt_pm.ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->gt_pm.imr, dev_priv->gt_pm.ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3835,8 +3606,8 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
+ dev_priv->gt_pm.ier = 0x0;
+ dev_priv->gt_pm.imr = ~dev_priv->gt_pm.ier;
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
@@ -4219,7 +3990,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
intel_hpd_init_work(dev_priv);
@@ -4228,30 +3998,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv))
- dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
-
- /* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev_priv))
- /* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
-
- rps->pm_intrmsk_mbz = 0;
-
- /*
- * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_GEN(dev_priv) <= 7)
- rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_GEN(dev_priv) >= 8)
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
@@ -1343,12 +1343,10 @@ bool gen11_reset_one_iir(struct drm_i915_private * const i915,
const unsigned int bit);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
+
+bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
@@ -315,7 +315,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *i915, u8 val)
mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
GEN6_PM_RP_UP_THRESHOLD);
- mask &= i915->pm_rps_events;
+ mask &= rps->pm_events;
return gen6_sanitize_rps_pm_mask(i915, ~mask);
}
@@ -443,6 +443,145 @@ static int intel_set_rps(struct drm_i915_private *i915, u8 val)
return err;
}
+static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+}
+
+static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
+}
+
+static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
+}
+
+static void gen6_update_pm_irq(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ u32 new_val;
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+ GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
+
+ new_val = dev_priv->gt_pm.imr;
+ new_val &= ~interrupt_mask;
+ new_val |= ~enabled_irq_mask & interrupt_mask;
+
+ if (new_val != dev_priv->gt_pm.imr) {
+ dev_priv->gt_pm.imr = new_val;
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->gt_pm.imr);
+ }
+}
+
+static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv,
+ u32 reset_mask)
+{
+ i915_reg_t reg = gen6_pm_iir(dev_priv);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, reset_mask);
+ I915_WRITE(reg, reset_mask);
+ POSTING_READ(reg);
+}
+
+static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv,
+ u32 enable_mask)
+{
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ dev_priv->gt_pm.ier |= enable_mask;
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->gt_pm.ier);
+ gen6_unmask_pm_irq(dev_priv, enable_mask);
+ /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
+ u32 disable_mask)
+{
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ dev_priv->gt_pm.ier &= ~disable_mask;
+ gen6_update_pm_irq(dev_priv, disable_mask, 0);
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->gt_pm.ier);
+ /* though a barrier is missing here, but don't really need a one */
+}
+
+static void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, rps->pm_events);
+ rps->pm_iir = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+ cpu_relax();
+ rps->pm_iir = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void enable_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (READ_ONCE(rps->interrupts_enabled))
+ return;
+
+ if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+ return;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON_ONCE(rps->pm_iir);
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & rps->pm_events);
+ rps->interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, rps->pm_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void disable_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (!READ_ONCE(rps->interrupts_enabled))
+ return;
+
+ if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+ return;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ rps->interrupts_enabled = false;
+
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
+
+ gen6_disable_pm_irq(dev_priv, rps->pm_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ /* Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+ gen6_reset_rps_interrupts(dev_priv);
+}
+
static void vlv_c0_read(struct drm_i915_private *dev_priv,
struct intel_rps_ei *ei)
{
@@ -500,7 +639,7 @@ static void intel_rps_work(struct work_struct *work)
bool client_boost;
u32 pm_iir;
- pm_iir = xchg(&rps->pm_iir, 0) & ~i915->pm_rps_events;
+ pm_iir = xchg(&rps->pm_iir, 0) & ~rps->pm_events;
pm_iir |= vlv_wa_c0_ei(i915, pm_iir);
client_boost = atomic_read(&rps->num_waiters);
@@ -560,12 +699,27 @@ static void intel_rps_work(struct work_struct *work)
if (pm_iir) {
spin_lock_irq(&i915->irq_lock);
if (rps->interrupts_enabled)
- gen6_unmask_pm_irq(i915, i915->pm_rps_events);
+ gen6_unmask_pm_irq(i915, rps->pm_events);
spin_unlock_irq(&i915->irq_lock);
rps->last_adj = adj;
}
}
+void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (pm_iir & rps->pm_events) {
+ spin_lock(&dev_priv->irq_lock);
+ gen6_mask_pm_irq(dev_priv, pm_iir & rps->pm_events);
+ if (rps->interrupts_enabled) {
+ rps->pm_iir |= pm_iir & rps->pm_events;
+ schedule_work(&rps->work);
+ }
+ spin_unlock(&dev_priv->irq_lock);
+ }
+}
+
void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -577,7 +731,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, rps->cur_freq));
- gen6_enable_rps_interrupts(dev_priv);
+ enable_rps_interrupts(dev_priv);
memset(&rps->ei, 0, sizeof(rps->ei));
/*
@@ -605,7 +759,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
* our rpm wakeref. And then disable the interrupts to stop any
* futher RPS reclocking whilst we are asleep.
*/
- gen6_disable_rps_interrupts(dev_priv);
+ disable_rps_interrupts(dev_priv);
mutex_lock(&rps->lock);
if (rps->enabled) {
@@ -2242,6 +2396,30 @@ void intel_init_gt_powersave(struct drm_i915_private *i915)
mutex_init(&rps->lock);
INIT_WORK(&rps->work, intel_rps_work);
+ if (HAS_GUC_SCHED(i915))
+ rps->guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
+ /* Let's track the enabled rps events */
+ if (IS_VALLEYVIEW(i915))
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ rps->pm_events = GEN6_PM_RPS_EVENTS;
+
+ rps->pm_intrmsk_mbz = 0;
+
+ /*
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_GEN(i915) <= 7)
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_GEN(i915) >= 8)
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
@@ -2539,3 +2717,51 @@ int intel_freq_opcode(const struct drm_i915_private *i915, int val)
else
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
}
+
+void gen6_unmask_pm_irq(struct drm_i915_private *i915, u32 mask)
+{
+ gen6_update_pm_irq(i915, mask, mask);
+}
+
+void gen6_mask_pm_irq(struct drm_i915_private *i915, u32 mask)
+{
+ gen6_update_pm_irq(i915, mask, 0);
+}
+
+void gen9_reset_guc_interrupts(struct drm_i915_private *i915)
+{
+ assert_rpm_wakelock_held(i915);
+
+ spin_lock_irq(&i915->irq_lock);
+ gen6_reset_pm_iir(i915, i915->gt_pm.rps.guc_events);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ assert_rpm_wakelock_held(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts_enabled) {
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+ dev_priv->gt_pm.rps.guc_events);
+ dev_priv->guc.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->gt_pm.rps.guc_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ assert_rpm_wakelock_held(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts_enabled = false;
+
+ gen6_disable_pm_irq(dev_priv, dev_priv->gt_pm.rps.guc_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen9_reset_guc_interrupts(dev_priv);
+}
@@ -31,6 +31,9 @@ struct intel_rps {
/* PM interrupt bits that should never be masked */
u32 pm_intrmsk_mbz;
+ u32 pm_events;
+ u32 guc_events;
+
/*
* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
@@ -82,6 +85,9 @@ struct intel_gt_pm {
struct intel_rps rps;
struct intel_rc6 rc6;
struct intel_llc_pstate llc_pstate;
+
+ u32 imr;
+ u32 ier;
};
void intel_gpu_ips_init(struct drm_i915_private *i915);
@@ -94,6 +100,8 @@ void intel_enable_gt_powersave(struct drm_i915_private *i915);
void intel_disable_gt_powersave(struct drm_i915_private *i915);
void intel_suspend_gt_powersave(struct drm_i915_private *i915);
+void intel_gt_pm_irq_handler(struct drm_i915_private *i915, u32 pm_iir);
+
void gen6_rps_busy(struct drm_i915_private *i915);
void gen6_rps_idle(struct drm_i915_private *i915);
void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
@@ -101,4 +109,7 @@ void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
int intel_gpu_freq(const struct drm_i915_private *i915, int val);
int intel_freq_opcode(const struct drm_i915_private *i915, int val);
+void gen6_unmask_pm_irq(struct drm_i915_private *i915, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *i915, u32 mask);
+
#endif /* __INTEL_GT_PM_H__ */
@@ -36,6 +36,7 @@
#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_gt_pm.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
Since all the RPS handling code is in intel_gt_pm, move the irq handlers there as well so that it all contained within one file. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_drv.h | 4 - drivers/gpu/drm/i915/i915_irq.c | 314 +++--------------------- drivers/gpu/drm/i915/intel_drv.h | 10 +- drivers/gpu/drm/i915/intel_gt_pm.c | 236 +++++++++++++++++- drivers/gpu/drm/i915/intel_gt_pm.h | 11 + drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + 6 files changed, 277 insertions(+), 299 deletions(-)