diff mbox series

[i-g-t,1/7] lib/igt_dummyload: Send batch as first

Message ID 20191113154913.8787-1-mika.kuoppala@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [i-g-t,1/7] lib/igt_dummyload: Send batch as first | expand

Commit Message

Mika Kuoppala Nov. 13, 2019, 3:49 p.m. UTC
To simplify emitting the recursive batch, make batch
always the first object on the execbuf list.

v2: set handles early, poll_ptr indecency (Chris)
v3: allow dep with poll
v4: fix gem_exec_schedule
v5: rebase
v6: rebase
v6: gem_ctx_shared
v7: conditional close of poll handle

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 lib/igt_dummyload.c            | 111 +++++++++++++++++----------------
 lib/igt_dummyload.h            |   8 ++-
 tests/i915/gem_exec_balancer.c |   8 +--
 tests/i915/gem_spin_batch.c    |  13 ++--
 4 files changed, 73 insertions(+), 67 deletions(-)

Comments

Chris Wilson Nov. 13, 2019, 3:57 p.m. UTC | #1
Quoting Mika Kuoppala (2019-11-13 15:49:07)
> To simplify emitting the recursive batch, make batch
> always the first object on the execbuf list.

Requires v4.13. Useful to leave as a note. Fwiw, looks like 3.16 and
4.14 are still rotting away slowly.
-Chris
diff mbox series

Patch

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index b9e239db..c079bd04 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -63,6 +63,7 @@ 
 #define MI_ARB_CHK (0x5 << 23)
 
 static const int BATCH_SIZE = 4096;
+static const int POLL_SIZE = 4096;
 static const int LOOP_START_OFFSET = 64;
 
 static IGT_LIST_HEAD(spin_list);
@@ -72,16 +73,23 @@  static int
 emit_recursive_batch(igt_spin_t *spin,
 		     int fd, const struct igt_spin_factory *opts)
 {
-#define SCRATCH 0
+
 #define BATCH IGT_SPIN_BATCH
+#define POLL 1
+#define DEP     2
 	const int gen = intel_gen(intel_get_drm_devid(fd));
-	struct drm_i915_gem_relocation_entry relocs[2], *r;
+	struct drm_i915_gem_exec_object2 * const batch =
+		&spin->obj[BATCH];
+	struct drm_i915_gem_exec_object2 * const poll =
+		&spin->obj[POLL];
+	struct drm_i915_gem_exec_object2 * const dep =
+		&spin->obj[DEP];
+	struct drm_i915_gem_relocation_entry relocs[4], *r;
 	struct drm_i915_gem_execbuffer2 *execbuf;
-	struct drm_i915_gem_exec_object2 *obj;
 	unsigned int flags[GEM_MAX_ENGINES];
 	unsigned int nengine;
 	int fence_fd = -1;
-	uint32_t *cs, *batch;
+	uint32_t *cs, *batch_start;
 	int i;
 
 	nengine = 0;
@@ -103,64 +111,48 @@  emit_recursive_batch(igt_spin_t *spin,
 	memset(&spin->execbuf, 0, sizeof(spin->execbuf));
 	execbuf = &spin->execbuf;
 	memset(spin->obj, 0, sizeof(spin->obj));
-	obj = spin->obj;
 	memset(relocs, 0, sizeof(relocs));
 
-	obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
-	batch = __gem_mmap__wc(fd, obj[BATCH].handle,
+	batch->handle = gem_create(fd, BATCH_SIZE);
+	spin->handle = batch->handle;
+
+	batch_start = __gem_mmap__wc(fd, batch->handle,
 				     0, BATCH_SIZE, PROT_WRITE);
-	if (!batch)
-		batch = gem_mmap__gtt(fd, obj[BATCH].handle,
+	if (!batch_start)
+		batch_start = gem_mmap__gtt(fd, batch->handle,
 					    BATCH_SIZE, PROT_WRITE);
-
-	gem_set_domain(fd, obj[BATCH].handle,
+	gem_set_domain(fd, batch->handle,
 		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
 	execbuf->buffer_count++;
-	cs = batch;
-
-	if (opts->dependency) {
-		igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN));
-
-		r = &relocs[obj[BATCH].relocation_count++];
-
-		/* dummy write to dependency */
-		obj[SCRATCH].handle = opts->dependency;
-		r->presumed_offset = 0;
-		r->target_handle = obj[SCRATCH].handle;
-		r->offset = sizeof(uint32_t) * 1020;
-		r->delta = 0;
-		r->read_domains = I915_GEM_DOMAIN_RENDER;
-		r->write_domain = I915_GEM_DOMAIN_RENDER;
+	cs = batch_start;
 
+	poll->handle = gem_create(fd, POLL_SIZE);
+	spin->poll_handle = poll->handle;
 	execbuf->buffer_count++;
-	} else if (opts->flags & IGT_SPIN_POLL_RUN) {
-		r = &relocs[obj[BATCH].relocation_count++];
 
-		igt_assert(!opts->dependency);
+	if (opts->flags & IGT_SPIN_POLL_RUN) {
+		r = &relocs[batch->relocation_count++];
 
 		if (gen == 4 || gen == 5) {
 			execbuf->flags |= I915_EXEC_SECURE;
 			igt_require(__igt_device_set_master(fd) == 0);
 		}
 
-		spin->poll_handle = gem_create(fd, 4096);
-		obj[SCRATCH].handle = spin->poll_handle;
-
-		if (__gem_set_caching(fd, spin->poll_handle,
+		if (__gem_set_caching(fd, poll->handle,
 				      I915_CACHING_CACHED) == 0)
-			spin->poll = gem_mmap__cpu(fd, spin->poll_handle,
-						   0, 4096,
+			spin->poll = gem_mmap__cpu(fd, poll->handle,
+						   0, POLL_SIZE,
 						   PROT_READ | PROT_WRITE);
 		else
-			spin->poll = gem_mmap__wc(fd, spin->poll_handle,
-						  0, 4096,
+			spin->poll = gem_mmap__wc(fd, poll->handle,
+						  0, POLL_SIZE,
 						  PROT_READ | PROT_WRITE);
 
 		igt_assert_eq(spin->poll[SPIN_POLL_START_IDX], 0);
 
 		/* batch is first */
-		r->presumed_offset = 4096;
-		r->target_handle = obj[SCRATCH].handle;
+		r->presumed_offset = BATCH_SIZE;
+		r->target_handle = poll->handle;
 		r->offset = sizeof(uint32_t) * 1;
 		r->delta = sizeof(uint32_t) * SPIN_POLL_START_IDX;
 
@@ -179,14 +171,25 @@  emit_recursive_batch(igt_spin_t *spin,
 		}
 
 		*cs++ = 1;
+	}
+
+	if (opts->dependency) {
+		r = &relocs[batch->relocation_count++];
+
+		/* dummy write to dependency */
+		dep->handle = opts->dependency;
+		r->presumed_offset = BATCH_SIZE + POLL_SIZE;
+		r->target_handle = dep->handle;
+		r->offset = sizeof(uint32_t) * 1020;
+		r->delta = 0;
+		r->read_domains = I915_GEM_DOMAIN_RENDER;
+		r->write_domain = I915_GEM_DOMAIN_RENDER;
 
 		execbuf->buffer_count++;
 	}
 
-	spin->handle = obj[BATCH].handle;
-
-	igt_assert_lt(cs - batch, LOOP_START_OFFSET / sizeof(*cs));
-	spin->condition = batch + LOOP_START_OFFSET / sizeof(*cs);
+	igt_assert_lt(cs - batch_start, LOOP_START_OFFSET / sizeof(*cs));
+	spin->condition = batch_start + LOOP_START_OFFSET / sizeof(*cs);
 	cs = spin->condition;
 
 	/* Allow ourselves to be preempted */
@@ -208,9 +211,9 @@  emit_recursive_batch(igt_spin_t *spin,
 		cs += 1000;
 
 	/* recurse */
-	r = &relocs[obj[BATCH].relocation_count++];
-	r->target_handle = obj[BATCH].handle;
-	r->offset = (cs + 1 - batch) * sizeof(*cs);
+	r = &relocs[batch->relocation_count++];
+	r->target_handle = batch->handle;
+	r->offset = (cs + 1 - batch_start) * sizeof(*cs);
 	r->read_domains = I915_GEM_DOMAIN_COMMAND;
 	r->delta = LOOP_START_OFFSET;
 	if (gen >= 8) {
@@ -227,10 +230,10 @@  emit_recursive_batch(igt_spin_t *spin,
 		*cs = r->delta;
 		cs++;
 	}
-	obj[BATCH].relocs_ptr = to_user_pointer(relocs);
+	batch->relocs_ptr = to_user_pointer(relocs);
 
-	execbuf->buffers_ptr = to_user_pointer(obj +
-					       (2 - execbuf->buffer_count));
+	execbuf->buffers_ptr = to_user_pointer(spin->obj);
+	execbuf->flags |= I915_EXEC_BATCH_FIRST;
 	execbuf->rsvd1 = opts->ctx;
 
 	if (opts->flags & IGT_SPIN_FENCE_OUT)
@@ -264,10 +267,9 @@  emit_recursive_batch(igt_spin_t *spin,
 		}
 	}
 
-	igt_assert_lt(cs - batch, BATCH_SIZE / sizeof(*cs));
+	igt_assert_lt(cs - batch_start, BATCH_SIZE / sizeof(*cs));
 
-	/* Make it easier for callers to resubmit. */
-	for (i = 0; i < ARRAY_SIZE(spin->obj); i++) {
+	for (i = 0; i < execbuf->buffer_count; i++) {
 		spin->obj[i].relocation_count = 0;
 		spin->obj[i].relocs_ptr = 0;
 		spin->obj[i].flags = EXEC_OBJECT_PINNED;
@@ -445,10 +447,11 @@  void igt_spin_free(int fd, igt_spin_t *spin)
 	gem_munmap((void *)((unsigned long)spin->condition & (~4095UL)),
 		   BATCH_SIZE);
 
-	if (spin->poll) {
+	if (spin->poll)
 		gem_munmap(spin->poll, 4096);
+
+	if (spin->poll_handle)
 		gem_close(fd, spin->poll_handle);
-	}
 
 	if (spin->handle)
 		gem_close(fd, spin->handle);
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 421ca183..de5781d7 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -34,6 +34,7 @@ 
 
 typedef struct igt_spin {
 	unsigned int handle;
+
 	timer_t timer;
 	struct igt_list_head link;
 
@@ -41,9 +42,12 @@  typedef struct igt_spin {
 	uint32_t cmd_precondition;
 
 	int out_fence;
-	struct drm_i915_gem_exec_object2 obj[2];
-#define IGT_SPIN_BATCH   1
+	struct drm_i915_gem_exec_object2 obj[3];
+#define IGT_SPIN_BATCH   0
+#define IGT_SPIN_POLL    1
+
 	struct drm_i915_gem_execbuffer2 execbuf;
+
 	uint32_t poll_handle;
 	uint32_t *poll;
 #define SPIN_POLL_START_IDX 0
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 70c4529b..19df406c 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -718,7 +718,7 @@  static uint32_t create_semaphore_to_spinner(int i915, igt_spin_t *spin)
 	cs = map = gem_mmap__cpu(i915, handle, 0, 4096, PROT_WRITE);
 
 	/* Wait until the spinner is running */
-	addr = spin->obj[0].offset + 4 * SPIN_POLL_START_IDX;
+	addr = spin->obj[IGT_SPIN_POLL].offset + 4 * SPIN_POLL_START_IDX;
 	*cs++ = MI_SEMAPHORE_WAIT |
 		MI_SEMAPHORE_POLL |
 		MI_SEMAPHORE_SAD_NEQ_SDD |
@@ -797,9 +797,9 @@  static void bonded_slice(int i915)
 		igt_spin_reset(spin);
 
 		/* igt_spin_t poll and batch obj must be laid out as we expect */
-		igt_assert_eq(IGT_SPIN_BATCH, 1);
-		obj[0] = spin->obj[0];
-		obj[1] = spin->obj[1];
+		igt_assert_eq(IGT_SPIN_BATCH, 0);
+		obj[0] = spin->obj[IGT_SPIN_POLL];
+		obj[1] = spin->obj[IGT_SPIN_BATCH];
 		obj[2].handle = create_semaphore_to_spinner(i915, spin);
 
 		eb.buffers_ptr = to_user_pointer(obj);
diff --git a/tests/i915/gem_spin_batch.c b/tests/i915/gem_spin_batch.c
index c67f015f..04707fdc 100644
--- a/tests/i915/gem_spin_batch.c
+++ b/tests/i915/gem_spin_batch.c
@@ -78,12 +78,9 @@  static void spin_resubmit(int fd, const struct intel_execution_engine2 *e2,
 		gem_context_create(fd) : ctx0;
 	igt_spin_t *spin = __igt_spin_new(fd, .ctx = ctx0, .engine = e2->flags);
 	const struct intel_execution_engine2 *other;
+	struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
 
-	struct drm_i915_gem_execbuffer2 eb = {
-		.buffer_count = 1,
-		.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
-		.rsvd1 = ctx1,
-	};
+	eb.rsvd1 = ctx1;
 
 	igt_assert(gem_context_has_engine_map(fd, 0) ||
 		   !(flags & RESUBMIT_ALL_ENGINES));
@@ -97,11 +94,13 @@  static void spin_resubmit(int fd, const struct intel_execution_engine2 *e2,
 			if (gem_engine_is_equal(other, e2))
 				continue;
 
-			eb.flags = other->flags;
+			eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+			eb.flags |= other->flags;
 			gem_execbuf(fd, &eb);
 		}
 	} else {
-		eb.flags = e2->flags;
+		eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+		eb.flags |= e2->flags;
 		gem_execbuf(fd, &eb);
 	}