diff mbox series

fixups

Message ID 20190408101039.16412-1-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series fixups | expand

Commit Message

Tvrtko Ursulin April 8, 2019, 10:10 a.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_dummyload.c | 39 +++++++++++++++++++++++++++++++++++++--
 lib/igt_gt.c        | 14 --------------
 lib/igt_gt.h        |  2 --
 tests/perf_pmu.c    | 44 ++++++++++++++++++++++++++++++++++++--------
 4 files changed, 73 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 13bd2e8dead1..8f457e5069e7 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -105,10 +105,45 @@  emit_recursive_batch(igt_spin_t *spin,
 			flags[nengine++] = engine->flags;
 		}
 	} else {
-		gem_require_ring(fd, opts->engine);
+#define SIZEOF_CTX_PARAM	offsetof(struct i915_context_param_engines, \
+					class_instance[GEM_MAX_ENGINES])
+		I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, GEM_MAX_ENGINES);
+		struct drm_i915_gem_context_param param = {
+			.param = I915_CONTEXT_PARAM_ENGINES,
+			.ctx_id = opts->ctx,
+			.size = SIZEOF_CTX_PARAM,
+			.value = to_user_pointer(&engines),
+		};
+		unsigned int class;
+
+		/*
+		 * If the context has engine map configured we need to consider
+		 * the engine selector as index into the array. Therefore to
+		 * find out the class and instance we need to query the map.
+		 */
+		if (!__gem_context_get_param(fd, &param) && param.size) {
+			unsigned int nengines;
+
+			nengines = param.size >
+				   sizeof(struct i915_context_param_engines) ?
+				   (param.size -
+				    sizeof(struct i915_context_param_engines)) /
+				   sizeof(engines.class_instance[0]) : 0;
+
+			igt_assert(nengines > 0);
+			igt_assert(opts->engine < nengines);
+
+			class =
+			     engines.class_instance[opts->engine].engine_class;
+		} else {
+
+			gem_require_ring(fd, opts->engine);
+
+			class = gem_eb_to_class(opts->engine);
+		}
 
 		if (opts->flags & IGT_SPIN_POLL_RUN)
-			igt_require(gem_can_store_dword(fd, opts->engine));
+			igt_require(gem_class_can_store_dword(fd, class));
 
 		flags[nengine++] = opts->engine;
 	}
diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index 12e19417a827..5d0a8e8060a7 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -609,20 +609,6 @@  int gem_eb_to_class(unsigned int flags)
 	}
 }
 
-void gem_eb_to_class_instance(int fd, unsigned int flags,
-			      struct intel_execution_engine2 *e)
-{
-	e->class = gem_eb_to_class(flags);
-
-	e->instance = ((flags & 0x3f) | I915_EXEC_BSD_RING2) ==
-		      (I915_EXEC_BSD | I915_EXEC_BSD_RING2) ?
-		      1 :
-		      0;
-
-	if (!gem_has_engine_topology(fd))
-		e->flags = flags;
-}
-
 unsigned int
 gem_class_instance_to_eb_flags(int gem_fd,
 			       enum drm_i915_gem_engine_class class,
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index b7e6bdc33db7..c2ca07e03738 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -101,8 +101,6 @@  extern const struct intel_execution_engine2 {
 } intel_execution_engines2[];
 
 int gem_eb_to_class(unsigned int flags);
-void gem_eb_to_class_instance(int fd, unsigned int flags,
-			      struct intel_execution_engine2 *e);
 
 unsigned int
 gem_class_instance_to_eb_flags(int gem_fd,
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 0af955139c3e..d70f4585c147 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -225,9 +225,12 @@  static igt_spin_t * spin_sync(int fd, uint32_t ctx,
 
 static igt_spin_t * spin_sync_flags(int fd, uint32_t ctx, unsigned int flags)
 {
-	struct intel_execution_engine2 e;
+	struct intel_execution_engine2 e = { };
 
-	gem_eb_to_class_instance(fd, flags, &e);
+	e.class = gem_eb_to_class(flags);
+	e.instance = (flags & (I915_EXEC_BSD_MASK | I915_EXEC_RING_MASK)) ==
+		     (I915_EXEC_BSD | I915_EXEC_BSD_RING2) ? 1 : 0;
+	e.flags = flags;
 
 	return spin_sync(fd, ctx, &e);
 }
@@ -355,6 +358,9 @@  busy_double_start(int gem_fd, struct intel_execution_engine2 *e)
 
 	ctx = gem_context_create(gem_fd);
 
+	/* QQQ This feels ugly! */
+	intel_init_engine_list(gem_fd, ctx);
+
 	/*
 	 * Defeat the busy stats delayed disable, we need to guarantee we are
 	 * the first user.
@@ -510,7 +516,7 @@  most_busy_check_all(int gem_fd, struct intel_execution_engine2 *e,
 		else if (spin)
 			__submit_spin_batch(gem_fd, spin, e_, 64);
 		else
-			spin = __spin_poll(gem_fd, 0, e);
+			spin = __spin_poll(gem_fd, 0, e_);
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
 	}
@@ -1046,6 +1052,7 @@  static void cpu_hotplug(int gem_fd)
 	igt_spin_t *spin[2];
 	uint64_t ts[2];
 	uint64_t val;
+	uint32_t ctx;
 	int link[2];
 	int fd, ret;
 	int cur = 0;
@@ -1053,14 +1060,18 @@  static void cpu_hotplug(int gem_fd)
 
 	igt_require(cpu0_hotplug_support());
 
+	ctx = gem_context_create(gem_fd);
+
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_RENDER, 0));
 
 	/*
 	 * Create two spinners so test can ensure shorter gaps in engine
 	 * busyness as it is terminating one and re-starting the other.
 	 */
-	spin[0] = igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER);
-	spin[1] = __igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER);
+	spin[0] = igt_spin_batch_new(gem_fd,
+				     .engine = I915_EXEC_RENDER, .ctx = ctx);
+	spin[1] = __igt_spin_batch_new(gem_fd,
+				       .engine = I915_EXEC_RENDER, .ctx = ctx);
 
 	val = __pmu_read_single(fd, &ts[0]);
 
@@ -1144,6 +1155,7 @@  static void cpu_hotplug(int gem_fd)
 
 		igt_spin_batch_free(gem_fd, spin[cur]);
 		spin[cur] = __igt_spin_batch_new(gem_fd,
+						 .ctx = ctx,
 						 .engine = I915_EXEC_RENDER);
 		cur ^= 1;
 	}
@@ -1157,6 +1169,7 @@  static void cpu_hotplug(int gem_fd)
 	igt_waitchildren();
 	close(fd);
 	close(link[0]);
+	gem_context_destroy(gem_fd, ctx);
 
 	/* Skip if child signals a problem with offlining a CPU. */
 	igt_skip_on(buf == 's');
@@ -1172,17 +1185,21 @@  test_interrupts(int gem_fd)
 	igt_spin_t *spin[target];
 	struct pollfd pfd;
 	uint64_t idle, busy;
+	uint32_t ctx;
 	int fence_fd;
 	int fd;
 
 	gem_quiescent_gpu(gem_fd);
 
+	ctx = gem_context_create(gem_fd);
+
 	fd = open_pmu(I915_PMU_INTERRUPTS);
 
 	/* Queue spinning batches. */
 	for (int i = 0; i < target; i++) {
 		spin[i] = __igt_spin_batch_new(gem_fd,
 					       .engine = I915_EXEC_RENDER,
+					       .ctx = ctx,
 					       .flags = IGT_SPIN_FENCE_OUT);
 		if (i == 0) {
 			fence_fd = spin[i]->out_fence;
@@ -1224,6 +1241,7 @@  test_interrupts(int gem_fd)
 	/* Check at least as many interrupts has been generated. */
 	busy = pmu_read_single(fd) - idle;
 	close(fd);
+	gem_context_destroy(gem_fd, ctx);
 
 	igt_assert_lte(target, busy);
 }
@@ -1236,15 +1254,19 @@  test_interrupts_sync(int gem_fd)
 	igt_spin_t *spin[target];
 	struct pollfd pfd;
 	uint64_t idle, busy;
+	uint32_t ctx;
 	int fd;
 
 	gem_quiescent_gpu(gem_fd);
 
+	ctx = gem_context_create(gem_fd);
+
 	fd = open_pmu(I915_PMU_INTERRUPTS);
 
 	/* Queue spinning batches. */
 	for (int i = 0; i < target; i++)
 		spin[i] = __igt_spin_batch_new(gem_fd,
+					       .ctx = ctx,
 					       .flags = IGT_SPIN_FENCE_OUT);
 
 	/* Wait for idle state. */
@@ -1269,6 +1291,7 @@  test_interrupts_sync(int gem_fd)
 	/* Check at least as many interrupts has been generated. */
 	busy = pmu_read_single(fd) - idle;
 	close(fd);
+	gem_context_destroy(gem_fd, ctx);
 
 	igt_assert_lte(target, busy);
 }
@@ -1281,6 +1304,9 @@  test_frequency(int gem_fd)
 	double min[2], max[2];
 	igt_spin_t *spin;
 	int fd, sysfs;
+	uint32_t ctx;
+
+	ctx = gem_context_create(gem_fd);
 
 	sysfs = igt_sysfs_open(gem_fd);
 	igt_require(sysfs >= 0);
@@ -1308,7 +1334,7 @@  test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_boost_freq_mhz") == min_freq);
 
 	gem_quiescent_gpu(gem_fd); /* Idle to be sure the change takes effect */
-	spin = spin_sync_flags(gem_fd, 0, I915_EXEC_RENDER);
+	spin = spin_sync_flags(gem_fd, ctx, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1334,7 +1360,7 @@  test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_min_freq_mhz") == max_freq);
 
 	gem_quiescent_gpu(gem_fd);
-	spin = spin_sync_flags(gem_fd, 0, I915_EXEC_RENDER);
+	spin = spin_sync_flags(gem_fd, ctx, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1355,6 +1381,8 @@  test_frequency(int gem_fd)
 			 min_freq, igt_sysfs_get_u32(sysfs, "gt_min_freq_mhz"));
 	close(fd);
 
+	gem_context_destroy(gem_fd, ctx);
+
 	igt_info("Min frequency: requested %.1f, actual %.1f\n",
 		 min[0], min[1]);
 	igt_info("Max frequency: requested %.1f, actual %.1f\n",
@@ -1904,7 +1932,7 @@  igt_main
 			gem_quiescent_gpu(fd);
 		}
 
-		__for_each_physical_engine(fd, e) {
+		__for_each_physical_engine(render_fd, e) {
 			igt_subtest_group {
 				igt_fixture {
 					gem_require_engine(render_fd,