@@ -1991,6 +1991,8 @@ struct i915_wa_reg {
u8 since;
u8 until;
+ i915_reg_t whitelist_addr;
+
i915_reg_t addr;
u32 mask;
u32 value;
@@ -33,6 +33,10 @@
.name = (wa), \
.type = I915_WA_TYPE_GT
+#define WA_WHITELIST(wa) \
+ .name = (wa), \
+ .type = I915_WA_TYPE_WHITELIST
+
#define ALL_REVS \
.since = 0, \
.until = REVID_FOREVER
@@ -75,6 +79,9 @@
.value = MASK(m, v), \
.is_masked_reg = true
+#define WHITELIST(reg) \
+ .whitelist_addr = reg
+
static struct i915_wa_reg gen8_ctx_was[] = {
{ WA_CTX(""),
ALL_REVS, REG(INSTPM),
@@ -861,160 +868,154 @@ void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("Number of GT specific w/a: %u\n", total_count);
}
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[engine->id];
-
- if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
- return -EINVAL;
+static struct i915_wa_reg gen9_whitelist_was[] = {
+ { WA_WHITELIST("WaVFEStateAfterPipeControlwithMediaStateClear"),
+ ALL_REVS, WHITELIST(GEN9_CTX_PREEMPT_REG) },
- I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[engine->id]++;
+ { WA_WHITELIST("WaEnablePreemptionGranularityControlByUMD"),
+ ALL_REVS, WHITELIST(GEN8_CS_CHICKEN1) },
- return 0;
-}
-
-static int gen9_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- int ret;
+ { WA_WHITELIST("WaAllowUMDToModifyHDCChicken1"),
+ ALL_REVS, WHITELIST(GEN8_HDC_CHICKEN1) },
+};
- /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
- if (ret)
- return ret;
+static struct i915_wa_reg skl_whitelist_was[] = {
+ { WA_WHITELIST("WaDisableLSQCROPERFforOCL"),
+ ALL_REVS, WHITELIST(GEN8_L3SQCREG4) },
+};
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
+static struct i915_wa_reg bxt_whitelist_was[] = {
+ { WA_WHITELIST("WaDisableObjectLevelPreemptionForTrifanOrPolygon +"
+ "WaDisableObjectLevelPreemptionForInstancedDraw +"
+ "WaDisableObjectLevelPreemtionForInstanceId +"
+ "WaDisableLSQCROPERFforOCL"),
+ REVS(0, BXT_REVID_A1), WHITELIST(GEN9_CS_DEBUG_MODE1) },
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
- if (ret)
- return ret;
+ { WA_WHITELIST("WaDisableObjectLevelPreemptionForTrifanOrPolygon +"
+ "WaDisableObjectLevelPreemptionForInstancedDraw +"
+ "WaDisableObjectLevelPreemtionForInstanceId +"
+ "WaDisableLSQCROPERFforOCL"),
+ REVS(0, BXT_REVID_A1), WHITELIST(GEN8_L3SQCREG4) },
+};
- return 0;
-}
+static struct i915_wa_reg kbl_whitelist_was[] = {
+ { WA_WHITELIST("WaDisableLSQCROPERFforOCL"),
+ ALL_REVS, WHITELIST(GEN8_L3SQCREG4) },
+};
-static int skl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- int ret = gen9_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
+static struct i915_wa_reg cnl_whitelist_was[] = {
+ { WA_WHITELIST("WaEnablePreemptionGranularityControlByUMD"),
+ ALL_REVS, WHITELIST(GEN8_CS_CHICKEN1) },
+};
- /* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
+static const struct i915_wa_reg_table skl_whitelist_wa_tbl[] = {
+ { gen9_whitelist_was, ARRAY_SIZE(gen9_whitelist_was) },
+ { skl_whitelist_was, ARRAY_SIZE(skl_whitelist_was) },
+};
- return 0;
-}
+static const struct i915_wa_reg_table bxt_whitelist_wa_tbl[] = {
+ { gen9_whitelist_was, ARRAY_SIZE(gen9_whitelist_was) },
+ { bxt_whitelist_was, ARRAY_SIZE(bxt_whitelist_was) },
+};
-static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
+static const struct i915_wa_reg_table kbl_whitelist_wa_tbl[] = {
+ { gen9_whitelist_was, ARRAY_SIZE(gen9_whitelist_was) },
+ { kbl_whitelist_was, ARRAY_SIZE(kbl_whitelist_was) },
+};
- int ret = gen9_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
+static const struct i915_wa_reg_table glk_whitelist_wa_tbl[] = {
+ { gen9_whitelist_was, ARRAY_SIZE(gen9_whitelist_was) },
+};
- /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
- /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
- /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
- /* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
- if (ret)
- return ret;
-
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
- }
+static const struct i915_wa_reg_table cfl_whitelist_wa_tbl[] = {
+ { gen9_whitelist_was, ARRAY_SIZE(gen9_whitelist_was) },
+};
- return 0;
-}
+static const struct i915_wa_reg_table cnl_whitelist_wa_tbl[] = {
+ { cnl_whitelist_was, ARRAY_SIZE(cnl_whitelist_was) },
+};
-static int kbl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+void intel_whitelist_workarounds_get(struct drm_i915_private *dev_priv,
+ const struct i915_wa_reg_table **wa_table,
+ uint *table_count)
{
- int ret = gen9_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
-
- /* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
+ *wa_table = NULL;
+ *table_count = 0;
- return 0;
+ if (INTEL_GEN(dev_priv) < 9)
+ return;
+ else if (IS_SKYLAKE(dev_priv)) {
+ *wa_table = skl_whitelist_wa_tbl;
+ *table_count = ARRAY_SIZE(skl_whitelist_wa_tbl);
+ } else if (IS_BROXTON(dev_priv)) {
+ *wa_table = bxt_whitelist_wa_tbl;
+ *table_count = ARRAY_SIZE(bxt_whitelist_wa_tbl);
+ } else if (IS_KABYLAKE(dev_priv)) {
+ *wa_table = kbl_whitelist_wa_tbl;
+ *table_count = ARRAY_SIZE(kbl_whitelist_wa_tbl);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ *wa_table = glk_whitelist_wa_tbl;
+ *table_count = ARRAY_SIZE(glk_whitelist_wa_tbl);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ *wa_table = cfl_whitelist_wa_tbl;
+ *table_count = ARRAY_SIZE(cfl_whitelist_wa_tbl);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ *wa_table = cnl_whitelist_wa_tbl;
+ *table_count = ARRAY_SIZE(cnl_whitelist_wa_tbl);
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ return;
+ }
}
-static int glk_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
{
- int ret = gen9_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
-
- return 0;
-}
+ struct drm_i915_private *dev_priv = engine->i915;
+ const struct i915_wa_reg_table *wa_table;
+ uint table_count, total_count = 0;
+ int i, j;
-static int cfl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- int ret = gen9_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
+ if (INTEL_GEN(dev_priv) < 9) {
+ WARN(1, "No whitelisting in Gen%u\n", INTEL_GEN(dev_priv));
+ return -EINVAL;
+ }
- return 0;
-}
+ intel_whitelist_workarounds_get(dev_priv, &wa_table, &table_count);
-static int cnl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- int ret;
+ for (i = 0; i < table_count; i++) {
+ struct i915_wa_reg *wa = wa_table[i].table;
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
+ for (j = 0; j < wa_table[i].count; j++) {
+ wa[j].applied =
+ IS_REVID(dev_priv, wa[j].since, wa[j].until);
- return 0;
-}
+ if (wa[j].applied && wa[j].pre_hook)
+ wa[j].applied = wa[j].pre_hook(dev_priv, &wa[j]);
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int err;
+ if (wa[j].applied) {
+ if (WARN_ON(total_count >= RING_MAX_NONPRIV_SLOTS)) {
+ wa[j].applied = false;
+ return -EINVAL;
+ }
- WARN_ON(engine->id != RCS);
+ /* Cache the translation of the */
+ wa[j].addr =
+ RING_FORCE_TO_NONPRIV(engine->mmio_base,
+ total_count++);
+ wa[j].value =
+ i915_mmio_reg_offset(wa[j].whitelist_addr);
+ wa[j].mask = 0xffffffff;
- dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+ I915_WRITE(wa[j].addr, wa[j].value);
+ }
- if (INTEL_GEN(dev_priv) < 9) {
- WARN(1, "No whitelisting in Gen%u\n", INTEL_GEN(dev_priv));
- err = 0;
- } else if (IS_SKYLAKE(dev_priv))
- err = skl_whitelist_workarounds_apply(engine);
- else if (IS_BROXTON(dev_priv))
- err = bxt_whitelist_workarounds_apply(engine);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_whitelist_workarounds_apply(engine);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_whitelist_workarounds_apply(engine);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_whitelist_workarounds_apply(engine);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_whitelist_workarounds_apply(engine);
- else {
- MISSING_CASE(INTEL_GEN(dev_priv));
- err = 0;
+ GEM_BUG_ON(wa[j].post_hook);
+ }
}
- if (err)
- return err;
- DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %d\n", engine->name,
- dev_priv->workarounds.hw_whitelist_count[engine->id]);
+ dev_priv->workarounds.hw_whitelist_count[engine->id] = total_count;
+ DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %u\n", engine->name,
+ total_count);
+
return 0;
}
@@ -35,6 +35,9 @@ void intel_gt_workarounds_get(struct drm_i915_private *dev_priv,
uint *table_count);
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+void intel_whitelist_workarounds_get(struct drm_i915_private *dev_priv,
+ const struct i915_wa_reg_table **wa_table,
+ uint *table_count);
int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
#endif
This is for WAs that whitelist a register. v2: Warn about olden GENs in the apply, not in the get function Suggested-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_workarounds.c | 251 ++++++++++++++++--------------- drivers/gpu/drm/i915/intel_workarounds.h | 3 + 3 files changed, 131 insertions(+), 125 deletions(-)