@@ -563,6 +563,37 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
}
+static int try_preempt_reset(struct intel_engine_execlists *execlists)
+{
+ struct tasklet_struct * const t = &execlists->tasklet;
+ int err = -EBUSY;
+
+ if (tasklet_trylock(t)) {
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->i915->gpu_error.flags;
+
+ t->func(t->data);
+ if (!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT_TIMEOUT)) {
+ /* Nothing to do; the tasklet was just delayed. */
+ err = 0;
+ } else if (!test_and_set_bit(bit, lock)) {
+ tasklet_disable_nosync(t);
+ err = i915_reset_engine(engine, "preemption time out");
+ tasklet_enable(t);
+
+ clear_bit(bit, lock);
+ wake_up_bit(lock, bit);
+ }
+
+ tasklet_unlock(t);
+ }
+
+ return err;
+}
+
static enum hrtimer_restart preempt_timeout(struct hrtimer *hrtimer)
{
struct intel_engine_execlists *execlists =
@@ -585,7 +616,8 @@ static enum hrtimer_restart preempt_timeout(struct hrtimer *hrtimer)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
- queue_work(system_highpri_wq, &execlists->preempt_reset);
+ if (try_preempt_reset(execlists))
+ queue_work(system_highpri_wq, &execlists->preempt_reset);
return HRTIMER_NORESTART;
}
@@ -508,6 +508,116 @@ static int live_preempt_timeout(void *arg)
return err;
}
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+static int live_preempt_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ struct spinner spin;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin, i915))
+ goto err_unlock;
+
+ ctx = kernel_context(i915);
+ if (!ctx)
+ goto err_spin;
+
+ for_each_engine(engine, i915, id) {
+ static const struct {
+ const char *name;
+ void (*critical_section_begin)(void);
+ void (*critical_section_end)(void);
+ } phases[] = {
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+ };
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+ const typeof(*phases) *p;
+
+ for (p = phases; p->name; p++) {
+ struct i915_request *rq;
+
+ rq = spinner_create_request(&spin, ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin, rq)) {
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx;
+ }
+
+ /* Flush to give try_preempt_reset a chance */
+ tasklet_schedule(t);
+ tasklet_kill(t);
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ GEM_TRACE("%s triggering %s reset\n",
+ engine->name, p->name);
+ p->critical_section_begin();
+
+ mark_preemption_hang(&engine->execlists);
+ err = try_preempt_reset(&engine->execlists);
+
+ p->critical_section_end();
+ if (err) {
+ pr_err("Preempt softirq reset failed on %s, tasklet state %lx\n",
+ engine->name, t->state);
+ spinner_end(&spin);
+ i915_gem_set_wedged(i915);
+ goto err_ctx;
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx;
+ }
+ }
+ }
+
+ err = 0;
+err_ctx:
+ kernel_context_close(ctx);
+err_spin:
+ spinner_fini(&spin);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -515,6 +625,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_reset),
};
if (!HAS_EXECLISTS(i915))
When circumstances allow, trying resetting the engine directly from the preemption timeout handler. As this is softirq context, we have to be careful both not to sleep and not to spin on anything we may be interrupting (e.g. the submission tasklet). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: MichaĆ Winiarski <michal.winiarski@intel.com> CC: Michel Thierry <michel.thierry@intel.com> Cc: Jeff McGee <jeff.mcgee@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> --- drivers/gpu/drm/i915/intel_lrc.c | 34 ++++++- drivers/gpu/drm/i915/selftests/intel_lrc.c | 111 +++++++++++++++++++++ 2 files changed, 144 insertions(+), 1 deletion(-)