@@ -1729,6 +1729,115 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
schedule_work(&display->irq.vblank_dc_work);
}
+static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return SPRITEB_INVALID_GTT_STATUS |
+ SPRITEA_INVALID_GTT_STATUS |
+ PLANEA_INVALID_GTT_STATUS |
+ CURSORA_INVALID_GTT_STATUS;
+ case PIPE_B:
+ return SPRITED_INVALID_GTT_STATUS |
+ SPRITEC_INVALID_GTT_STATUS |
+ PLANEB_INVALID_GTT_STATUS |
+ CURSORB_INVALID_GTT_STATUS;
+ case PIPE_C:
+ return SPRITEF_INVALID_GTT_STATUS |
+ SPRITEE_INVALID_GTT_STATUS |
+ PLANEC_INVALID_GTT_STATUS |
+ CURSORC_INVALID_GTT_STATUS;
+ default:
+ return 0;
+ }
+}
+
+static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = {
+ { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static void vlv_page_table_error_irq_ack(struct drm_i915_private *i915, u32 *dpinvgtt)
+{
+ u32 status, enable, tmp;
+
+ tmp = intel_uncore_read(&i915->uncore, DPINVGTT);
+
+ enable = tmp >> 16;
+ status = tmp & 0xffff;
+
+ /*
+ * Despite what the docs claim, the status bits seem to get
+ * stuck permanently (similar the old PGTBL_ER register), so
+ * we have to disable and ignore them once set. They do get
+ * reset if the display power well goes down, so no need to
+ * track the enable mask explicitly.
+ */
+ *dpinvgtt = status & enable;
+ enable &= ~status;
+
+ /* customary ack+disable then re-enable to guarantee an edge */
+ intel_uncore_write(&i915->uncore, DPINVGTT, status);
+ intel_uncore_write(&i915->uncore, DPINVGTT, enable << 16);
+}
+
+static void vlv_page_table_error_irq_handler(struct drm_i915_private *i915, u32 dpinvgtt)
+{
+ struct intel_display *display = &i915->display;
+ enum pipe pipe;
+
+ for_each_pipe(i915, pipe) {
+ u32 fault_errors;
+
+ fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers,
+ pipe, fault_errors);
+ }
+}
+
+void vlv_display_error_irq_ack(struct drm_i915_private *dev_priv,
+ u32 *eir, u32 *dpinvgtt)
+{
+ u32 emr;
+
+ *eir = intel_uncore_read(&dev_priv->uncore, VLV_EIR);
+
+ if (*eir & VLV_ERROR_PAGE_TABLE)
+ vlv_page_table_error_irq_ack(dev_priv, dpinvgtt);
+
+ intel_uncore_write(&dev_priv->uncore, VLV_EIR, *eir);
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits.
+ */
+ emr = intel_uncore_read(&dev_priv->uncore, VLV_EMR);
+ intel_uncore_write(&dev_priv->uncore, VLV_EMR, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, VLV_EMR, emr);
+}
+
+void vlv_display_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 eir, u32 dpinvgtt)
+{
+ drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
+
+ if (eir & VLV_ERROR_PAGE_TABLE)
+ vlv_page_table_error_irq_handler(dev_priv, dpinvgtt);
+}
+
static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1738,6 +1847,8 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
else
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
+ gen2_error_reset(&dev_priv->uncore, VLV_ERROR_REGS);
+
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
@@ -1764,6 +1875,12 @@ void i9xx_display_irq_reset(struct drm_i915_private *i915)
i9xx_pipestat_irq_reset(i915);
}
+static u32 vlv_error_mask(void)
+{
+ /* TODO enable other errors too? */
+ return VLV_ERROR_PAGE_TABLE;
+}
+
void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1775,6 +1892,17 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
if (!dev_priv->display.irq.vlv_display_irqs_enabled)
return;
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_uncore_write(uncore, DPINVGTT,
+ DPINVGTT_STATUS_MASK_CHV |
+ DPINVGTT_EN_MASK_CHV);
+ else
+ intel_uncore_write(uncore, DPINVGTT,
+ DPINVGTT_STATUS_MASK_VLV |
+ DPINVGTT_EN_MASK_VLV);
+
+ gen2_error_init(&dev_priv->uncore, VLV_ERROR_REGS, ~vlv_error_mask());
+
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
@@ -1785,7 +1913,8 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT;
+ I915_LPE_PIPE_B_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
@@ -75,6 +75,9 @@ void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_
void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void vlv_display_error_irq_ack(struct drm_i915_private *i915, u32 *eir, u32 *dpinvgtt);
+void vlv_display_error_irq_handler(struct drm_i915_private *i915, u32 eir, u32 dpinvgtt);
+
void intel_display_irq_init(struct drm_i915_private *i915);
void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
@@ -241,6 +241,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
do {
u32 iir, gt_iir, pm_iir;
+ u32 eir = 0, dpinvgtt = 0;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
u32 ier = 0;
@@ -278,6 +279,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_ack(dev_priv, &eir, &dpinvgtt);
+
/* Call regardless, as some status bits might not be
* signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
@@ -304,6 +308,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_handler(dev_priv, eir, dpinvgtt);
+
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
@@ -328,6 +335,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
do {
u32 master_ctl, iir;
+ u32 eir = 0, dpinvgtt = 0;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
u32 ier = 0;
@@ -361,6 +369,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_ack(dev_priv, &eir, &dpinvgtt);
+
/* Call regardless, as some status bits might not be
* signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
@@ -383,6 +394,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_handler(dev_priv, eir, dpinvgtt);
+
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
@@ -475,6 +475,16 @@
#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
+#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
+#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
+#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
+#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
+#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
+#define VLV_ERROR_PAGE_TABLE (1 << 4)
+#define VLV_ERROR_CLAIM (1 << 0)
+
+#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
+
#define INSTPM _MMIO(0x20c0)
#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
@@ -51,6 +51,29 @@ void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
intel_uncore_posting_read(uncore, regs.imr);
}
+void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
+{
+ intel_uncore_write(uncore, regs.emr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.emr);
+
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+}
+
+void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
+ u32 emr_val)
+{
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+
+ intel_uncore_write(uncore, regs.emr, emr_val);
+ intel_uncore_posting_read(uncore, regs.emr);
+}
+
bool intel_irqs_enabled(struct xe_device *xe)
{
return atomic_read(&xe->irq.enabled);