diff mbox series

drm/i915/selftests: Exercise lite-restore on top of a semaphore

Message ID 20200328092206.20748-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915/selftests: Exercise lite-restore on top of a semaphore | expand

Commit Message

Chris Wilson March 28, 2020, 9:22 a.m. UTC
Exercise issuing a lite-restore (a continuation of the same
active context with a new request) while the HW is blocked
on a semaphore. We expect the HW to ACK immediately after the
lite-restore from the next failed semaphore poll.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 175 +++++++++++++++++++++++++
 1 file changed, 175 insertions(+)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 6f06ba750a0a..44c694ddbddc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -350,6 +350,180 @@  static int live_unlite_preempt(void *arg)
 	return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
 }
 
+static struct i915_request *
+create_lite_semaphore(struct intel_context *ce, void *slot)
+{
+	const u32 offset =
+		i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(slot);
+	struct i915_request *rq;
+	u32 *cs;
+	int err;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return rq;
+
+	if (rq->engine->emit_init_breadcrumb) {
+		err = rq->engine->emit_init_breadcrumb(rq);
+		if (err)
+			goto err;
+	}
+
+	cs = intel_ring_begin(rq, 10);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err;
+	}
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = offset;
+	*cs++ = 0;
+	*cs++ = 1;
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_ARB_CHECK;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_EQ_SDD;
+	*cs++ = 0;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	intel_ring_advance(rq, cs);
+
+	err = 0;
+err:
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (err) {
+		i915_request_put(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static inline bool
+ring_is_paused(const struct intel_engine_cs *engine)
+{
+	return engine->status_page.addr[I915_GEM_HWS_PREEMPT];
+}
+
+static int live_lite_semaphore(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * Exercise issuing a lite-restore (a continuation of the same
+	 * active context with a new request) while the HW is blocked
+	 * on a semaphore. We expect the HW to ACK immediately after the
+	 * lite-restore from the next failed semaphore poll.
+	 */
+
+	err = 0;
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+		struct igt_live_test t;
+		unsigned long saved;
+		u32 *slot;
+
+		if (!intel_engine_has_semaphores(engine))
+			continue;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			break;
+		}
+		engine_heartbeat_disable(engine, &saved);
+
+		slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto err;
+		}
+
+		rq = create_lite_semaphore(ce, slot);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ce;
+		}
+
+		if (wait_for(READ_ONCE(*slot), 50)) {
+			GEM_TRACE_ERR("%s: failed to submit request\n",
+				      engine->name);
+			err = -ETIME;
+			goto err_rq;
+		}
+
+		intel_engine_flush_submission(engine);
+		GEM_BUG_ON(engine->execlists.pending[0]);
+
+		/* Switch from the inner semaphore to the preempt-to-busy one */
+		ring_set_paused(engine, 1);
+		WRITE_ONCE(*slot, 0);
+
+		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+			GEM_TRACE_ERR("%s: failed to complete request\n",
+				      engine->name);
+			err = -ETIME;
+			goto err_rq;
+		}
+
+		i915_request_put(rq);
+
+		rq = intel_context_create_request(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ce;
+		}
+
+		/*
+		 * The ring_is_paused() should only be cleared on the HW ACK
+		 * following the preemption request (see process_csb()). We
+		 * depend on the HW processing that ACK even if it is currently
+		 * inside a semaphore.
+		 */
+		GEM_BUG_ON(!ring_is_paused(engine));
+		GEM_BUG_ON(engine->execlists.pending[0]);
+		GEM_BUG_ON(execlists_active(&engine->execlists)->context != ce);
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+			GEM_TRACE_ERR("%s: failed to complete lite-restore\n",
+				      engine->name);
+			err = -ETIME;
+			goto err_rq;
+		}
+
+err_rq:
+		i915_request_put(rq);
+err_ce:
+		intel_context_put(ce);
+err:
+		engine_heartbeat_enable(engine, saved);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
 static int live_pin_rewind(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -3954,6 +4128,7 @@  int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_sanitycheck),
 		SUBTEST(live_unlite_switch),
 		SUBTEST(live_unlite_preempt),
+		SUBTEST(live_lite_semaphore),
 		SUBTEST(live_pin_rewind),
 		SUBTEST(live_hold_reset),
 		SUBTEST(live_error_interrupt),