@@ -3374,7 +3374,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
seq_printf(m, "Context workarounds applied: %d\n", workarounds->ctx_wa_count);
for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
- engine->name, workarounds->hw_whitelist_count[id]);
+ engine->name, workarounds->whitelist_wa_count[id]);
for (i = 0; i < workarounds->ctx_wa_count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
@@ -1985,7 +1985,8 @@ struct i915_workarounds {
struct i915_wa_reg gt_wa_reg[I915_MAX_MMIO_WA_REGS];
u32 gt_wa_count;
- u32 hw_whitelist_count[I915_NUM_ENGINES];
+ struct i915_wa_reg whitelist_wa_reg[I915_NUM_ENGINES][RING_MAX_NONPRIV_SLOTS];
+ u32 whitelist_wa_count[I915_NUM_ENGINES];
};
struct i915_virtual_gpu {
@@ -1514,9 +1514,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
+ intel_whitelist_workarounds_apply(engine);
return 0;
}
@@ -1994,6 +1992,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
+ ret = intel_whitelist_workarounds_init(engine);
+ if (ret)
+ return ret;
+
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -778,64 +778,64 @@ void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
}
}
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
+static int whitelist_wa_add(struct intel_engine_cs *engine,
+ i915_reg_t reg)
{
struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[engine->id];
+ const uint32_t index = wa->whitelist_wa_count[engine->id];
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
- I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[engine->id]++;
+ wa->whitelist_wa_reg[engine->id][index].addr =
+ RING_FORCE_TO_NONPRIV(engine->mmio_base, index);
+ wa->whitelist_wa_reg[engine->id][index].value = i915_mmio_reg_offset(reg);
+ wa->whitelist_wa_reg[engine->id][index].mask = 0xffffffff;
+
+ wa->whitelist_wa_count[engine->id]++;
return 0;
}
-static int gen9_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- int ret;
+#define WHITELIST_WA_REG(engine, reg) do { \
+ const int r = whitelist_wa_add(engine, reg); \
+ if (r) \
+ return r; \
+} while (0)
+static int gen9_whitelist_workarounds_init(struct intel_engine_cs *engine)
+{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN9_CTX_PREEMPT_REG);
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN8_CS_CHICKEN1);
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN8_HDC_CHICKEN1);
return 0;
}
-static int skl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int skl_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
- int ret = gen9_whitelist_workarounds_apply(engine);
+ int ret = gen9_whitelist_workarounds_init(engine);
if (ret)
return ret;
/* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN8_L3SQCREG4);
return 0;
}
-static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int bxt_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
- int ret = gen9_whitelist_workarounds_apply(engine);
+ ret = gen9_whitelist_workarounds_init(engine);
if (ret)
return ret;
@@ -844,86 +844,75 @@ static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
- if (ret)
- return ret;
-
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN9_CS_DEBUG_MODE1);
+ WHITELIST_WA_REG(engine, GEN8_L3SQCREG4);
}
return 0;
}
-static int kbl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int kbl_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
- int ret = gen9_whitelist_workarounds_apply(engine);
+ int ret = gen9_whitelist_workarounds_init(engine);
if (ret)
return ret;
/* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN8_L3SQCREG4);
return 0;
}
-static int glk_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int glk_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
- int ret = gen9_whitelist_workarounds_apply(engine);
+ int ret = gen9_whitelist_workarounds_init(engine);
if (ret)
return ret;
return 0;
}
-static int cfl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int cfl_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
- int ret = gen9_whitelist_workarounds_apply(engine);
+ int ret = gen9_whitelist_workarounds_init(engine);
if (ret)
return ret;
return 0;
}
-static int cnl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int cnl_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
/* WaEnablePreemptionGranularityControlByUMD:cnl */
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
+ WHITELIST_WA_REG(engine, GEN8_CS_CHICKEN1);
return 0;
}
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+int intel_whitelist_workarounds_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int err;
WARN_ON(engine->id != RCS);
- dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+ dev_priv->workarounds.whitelist_wa_count[engine->id] = 0;
if (INTEL_GEN(dev_priv) < 9) {
WARN(1, "No whitelisting in Gen%u\n", INTEL_GEN(dev_priv));
err = 0;
} else if (IS_SKYLAKE(dev_priv))
- err = skl_whitelist_workarounds_apply(engine);
+ err = skl_whitelist_workarounds_init(engine);
else if (IS_BROXTON(dev_priv))
- err = bxt_whitelist_workarounds_apply(engine);
+ err = bxt_whitelist_workarounds_init(engine);
else if (IS_KABYLAKE(dev_priv))
- err = kbl_whitelist_workarounds_apply(engine);
+ err = kbl_whitelist_workarounds_init(engine);
else if (IS_GEMINILAKE(dev_priv))
- err = glk_whitelist_workarounds_apply(engine);
+ err = glk_whitelist_workarounds_init(engine);
else if (IS_COFFEELAKE(dev_priv))
- err = cfl_whitelist_workarounds_apply(engine);
+ err = cfl_whitelist_workarounds_init(engine);
else if (IS_CANNONLAKE(dev_priv))
- err = cnl_whitelist_workarounds_apply(engine);
+ err = cnl_whitelist_workarounds_init(engine);
else {
MISSING_CASE(INTEL_GEN(dev_priv));
err = 0;
@@ -932,6 +921,18 @@ int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
return err;
DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %d\n", engine->name,
- dev_priv->workarounds.hw_whitelist_count[engine->id]);
+ dev_priv->workarounds.whitelist_wa_count[engine->id]);
return 0;
}
+
+void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct i915_workarounds *w = &dev_priv->workarounds;
+ int i;
+
+ for (i = 0; i < w->whitelist_wa_count[engine->id]; i++) {
+ I915_WRITE(w->whitelist_wa_reg[engine->id][i].addr,
+ w->whitelist_wa_reg[engine->id][i].value);
+ }
+}
@@ -31,6 +31,7 @@
int intel_gt_workarounds_init_early(struct drm_i915_private *dev_priv);
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+int intel_whitelist_workarounds_init(struct intel_engine_cs *engine);
+void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
#endif