@@ -4717,6 +4717,12 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
i915_request_add(rq);
if (err)
goto err_active;
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) &&
+ intel_engine_verify_workarounds(engine, "load")) {
+ err = -EIO;
+ goto err_active;
+ }
}
/* Flush the default context image to memory, and enable powersaving. */
@@ -1259,6 +1259,126 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
+static struct i915_vma *
+create_scratch(struct i915_address_space *vm, int count)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int size;
+ int err;
+
+ size = round_up(count * 4, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int
+wa_list_srm(struct i915_request *rq,
+ const struct i915_wa_list *wal,
+ struct i915_vma *vma)
+{
+ const struct i915_wa *wa;
+ u32 srm, *cs;
+ int i;
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * wal->count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int engine_wa_list_verify(struct intel_engine_cs *engine,
+ const struct i915_wa_list * const wal,
+ const char *from)
+{
+ const struct i915_wa *wa;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ unsigned int i;
+ u32 *results;
+ int err;
+
+ if (!wal->count)
+ return 0;
+
+ vma = create_scratch(&engine->i915->ggtt.vm, wal->count);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ rq = i915_request_alloc(engine, engine->kernel_context->gem_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = wa_list_srm(rq, wal, vma);
+ if (err)
+ goto err_vma;
+
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_vma;
+ }
+
+ results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto err_vma;
+ }
+
+ err = 0;
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ if (!wa_verify(wa, results[i], wal->name, from))
+ err = -ENXIO;
+
+ i915_gem_object_unpin_map(vma->obj);
+
+err_vma:
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+ return err;
+}
+
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return engine_wa_list_verify(engine, &engine->wa_list, from);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_workarounds.c"
#endif
@@ -30,5 +30,7 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from);
#endif
@@ -340,49 +340,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
return err;
}
-static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- goto err_obj;
- }
- memset(ptr, 0xc5, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
@@ -475,7 +432,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx);
+ scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -752,9 +709,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_engine(engine, i915, id)
- ok &= wa_list_verify(engine->uncore,
- &lists->engine[id].wa_list, str);
+ for_each_engine(engine, i915, id) {
+ ok &= engine_wa_list_verify(engine,
+ &lists->engine[id].wa_list,
+ str) == 0;
+ }
return ok;
}
Read the engine workarounds back using the GPU after loading the initial context state to verify that we are setting them correctly, and bail if it fails. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> --- drivers/gpu/drm/i915/i915_gem.c | 6 + drivers/gpu/drm/i915/intel_workarounds.c | 120 ++++++++++++++++++ drivers/gpu/drm/i915/intel_workarounds.h | 2 + .../drm/i915/selftests/intel_workarounds.c | 53 +------- 4 files changed, 134 insertions(+), 47 deletions(-)