[i-g-t] i915/gem_exec_balancer: Swap bond-chain engines over
diff mbox series

Message ID 20191204211442.4073126-1-chris@chris-wilson.co.uk
State New
Headers show
Series
  • [i-g-t] i915/gem_exec_balancer: Swap bond-chain engines over
Related show

Commit Message

Chris Wilson Dec. 4, 2019, 9:14 p.m. UTC
Another situation arises where the master is on the same engine as its
signaler. It is submitted to the second ELSP, causing the submission of
its bonded pairs, but it remains blocked. However, if we do not stall
the bonded pair, it will proceed to execute immediately ahead of its
master dependencies.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 tests/i915/gem_exec_balancer.c | 83 +++++++++++++++++++++++++++++++++-
 1 file changed, 82 insertions(+), 1 deletion(-)

Patch
diff mbox series

diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 5f6e3d568..f4909a978 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -934,6 +934,85 @@  static void __bonded_chain(int i915, uint32_t ctx,
 	gem_close(i915, batch.handle);
 }
 
+static void __bonded_chain_inv(int i915, uint32_t ctx,
+			       const struct i915_engine_class_instance *siblings,
+			       unsigned int count)
+{
+	const int priorities[] = { -1023, 0, 1023 };
+	struct drm_i915_gem_exec_object2 batch = {
+		.handle = batch_create(i915),
+	};
+	struct drm_i915_gem_execbuffer2 execbuf = {
+		.buffers_ptr = to_user_pointer(&batch),
+		.buffer_count = 1,
+		.rsvd1 = ctx,
+	};
+	igt_spin_t *spin;
+
+	for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
+		/* A: spin forever on engine 1 */
+		set_load_balancer(i915, ctx, siblings, count, NULL);
+		if (priorities[i] < 0)
+			gem_context_set_priority(i915, ctx, priorities[i]);
+		spin = igt_spin_new(i915,
+				    .ctx = ctx,
+				    .engine = 1,
+				    .flags = (IGT_SPIN_POLL_RUN |
+					      IGT_SPIN_FENCE_OUT));
+		igt_spin_busywait_until_started(spin);
+		gem_context_set_priority(i915, ctx, 0);
+
+		/*
+		 * Note we replace the timelines between each execbuf, so
+		 * that any pair of requests on the same engine could be
+		 * re-ordered by the scheduler -- if the dependency tracking
+		 * is subpar.
+		 */
+
+		/* B: waits for A on engine 1 */
+		set_load_balancer(i915, ctx, siblings, count, NULL);
+		execbuf.rsvd2 = spin->out_fence;
+		execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
+		execbuf.flags |= 1; /* same engine as spinner */
+		gem_execbuf_wr(i915, &execbuf);
+
+		/* B': run in parallel with B on engine 2, i.e. not before A! */
+		if (priorities[i] > 0)
+			gem_context_set_priority(i915, ctx, priorities[i]);
+		set_load_balancer(i915, ctx, siblings, count, NULL);
+		execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
+		execbuf.flags |= 2; /* opposite engine to spinner */
+		execbuf.rsvd2 >>= 32;
+		gem_execbuf_wr(i915, &execbuf);
+		gem_context_set_priority(i915, ctx, 0);
+
+		/* Wait for any magic timeslicing or preemptions... */
+		igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
+			      -ETIME);
+
+		igt_debugfs_dump(i915, "i915_engine_info");
+
+		/*
+		 * ... which should not have happened, so everything is still
+		 * waiting on the spinner
+		 */
+		igt_assert_eq(sync_fence_status(spin->out_fence), 0);
+		igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 0);
+		igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 0);
+
+		igt_spin_free(i915, spin);
+		gem_sync(i915, batch.handle);
+
+		igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
+		igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
+
+		close(execbuf.rsvd2);
+		close(execbuf.rsvd2 >> 32);
+	}
+
+	gem_close(i915, batch.handle);
+}
+
 static void bonded_chain(int i915)
 {
 	uint32_t ctx;
@@ -951,8 +1030,10 @@  static void bonded_chain(int i915)
 		unsigned int count;
 
 		siblings = list_engines(i915, 1u << class, &count);
-		if (count > 1)
+		if (count > 1) {
 			__bonded_chain(i915, ctx, siblings, count);
+			__bonded_chain_inv(i915, ctx, siblings, count);
+		}
 		free(siblings);
 	}