diff mbox

[19/46] drm/i915: Test request ordering between engines

Message ID 20170202090905.29028-20-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Feb. 2, 2017, 9:08 a.m. UTC
A request on one engine with a dependency on a request on another engine
must wait for completion of the first request before starting.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_gem_request.c | 137 ++++++++++++++++++++++
 1 file changed, 137 insertions(+)

Comments

Joonas Lahtinen Feb. 9, 2017, 10:20 a.m. UTC | #1
On to, 2017-02-02 at 09:08 +0000, Chris Wilson wrote:
> A request on one engine with a dependency on a request on another engine
> must wait for completion of the first request before starting.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

<SNIP>

> +static int live_sequential_engines(void *arg)
> +{

<SNIP>

> +	for_each_engine(engine, i915, id) {
> +		long timeout;
> +		u32 *cmd;
> +
> +		if (i915_gem_request_completed(request[id])) {
> +			pr_err("%s(%s): request completed too early!\n",
> +			       __func__, engine->name);
> +			err = -EINVAL;
> +			goto out_request;
> +		}

I was kinda anticipating you capture prev and always release the
previous batch, doesn't add that much value necessarily, but you could
make sure before releasing the batch that current has not even been
started yet. That's what you mention in the commit message.

> +
> +		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
> +					      I915_MAP_WC);
> +		if (IS_ERR(cmd)) {
> +			err = PTR_ERR(cmd);
> +			pr_err("%s: failed to WC map batch, err=%d\n", __func__, err);
> +			goto out_request;
> +		}
> +		*cmd = MI_BATCH_BUFFER_END;
> +		wmb();
> +		i915_gem_object_unpin_map(request[id]->batch->obj);

recursive_batch_release()

With the helper;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 570cfa196ca7..f9c171d1a05b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -473,11 +473,148 @@  static int live_all_engines(void *arg)
 	return err;
 }
 
+static int live_sequential_engines(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
+	struct drm_i915_gem_request *prev = NULL;
+	struct intel_engine_cs *engine;
+	struct live_test t;
+	unsigned int id;
+	int err;
+
+	/* Check we can submit requests to all engines sequentially, such
+	 * that each successive request waits for the earlier ones. This
+	 * tests that we don't execute requests out of order, even though
+	 * they are running on independent engines.
+	 */
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	err = begin_live_test(&t, i915, __func__, "");
+	if (err)
+		goto out_unlock;
+
+	for_each_engine(engine, i915, id) {
+		struct i915_vma *batch;
+
+		batch = recursive_batch(i915);
+		if (IS_ERR(batch)) {
+			err = PTR_ERR(batch);
+			pr_err("%s: Unable to create batch for %s, err=%d\n",
+			       __func__, engine->name, err);
+			goto out_unlock;
+		}
+
+		request[id] = i915_gem_request_alloc(engine,
+						     i915->kernel_context);
+		if (IS_ERR(request[id])) {
+			err = PTR_ERR(request[id]);
+			pr_err("%s: Request allocation failed for %s with err=%d\n",
+			       __func__, engine->name, err);
+			goto out_request;
+		}
+
+		if (prev) {
+			err = i915_gem_request_await_dma_fence(request[id],
+							       &prev->fence);
+			if (err) {
+				i915_add_request(request[id]);
+				pr_err("%s: Request await failed for %s with err=%d\n",
+				       __func__, engine->name, err);
+				goto out_request;
+			}
+		}
+
+		err = engine->emit_flush(request[id], EMIT_INVALIDATE);
+		GEM_BUG_ON(err);
+
+		err = i915_switch_context(request[id]);
+		GEM_BUG_ON(err);
+
+		err = engine->emit_bb_start(request[id],
+					    batch->node.start,
+					    batch->node.size,
+					    0);
+		GEM_BUG_ON(err);
+		request[id]->batch = batch;
+
+		i915_vma_move_to_active(batch, request[id], 0);
+		i915_gem_object_set_active_reference(batch->obj);
+		i915_vma_get(batch);
+
+		i915_gem_request_get(request[id]);
+		i915_add_request(request[id]);
+
+		prev = request[id];
+	}
+
+	for_each_engine(engine, i915, id) {
+		long timeout;
+		u32 *cmd;
+
+		if (i915_gem_request_completed(request[id])) {
+			pr_err("%s(%s): request completed too early!\n",
+			       __func__, engine->name);
+			err = -EINVAL;
+			goto out_request;
+		}
+
+		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+					      I915_MAP_WC);
+		if (IS_ERR(cmd)) {
+			err = PTR_ERR(cmd);
+			pr_err("%s: failed to WC map batch, err=%d\n", __func__, err);
+			goto out_request;
+		}
+		*cmd = MI_BATCH_BUFFER_END;
+		wmb();
+		i915_gem_object_unpin_map(request[id]->batch->obj);
+
+		timeout = i915_wait_request(request[id],
+					    I915_WAIT_LOCKED,
+					    MAX_SCHEDULE_TIMEOUT);
+		if (timeout < 0) {
+			err = timeout;
+			pr_err("%s: error waiting for request on %s, err=%d\n",
+			       __func__, engine->name, err);
+			goto out_request;
+		}
+
+		GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+	}
+
+	err = end_live_test(&t);
+
+out_request:
+	for_each_engine(engine, i915, id) {
+		u32 *cmd;
+
+		if (!request[id])
+			break;
+
+		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+					      I915_MAP_WC);
+		if (!IS_ERR(cmd)) {
+			*cmd = MI_BATCH_BUFFER_END;
+			wmb();
+			i915_gem_object_unpin_map(request[id]->batch->obj);
+		}
+
+		i915_vma_put(request[id]->batch);
+		i915_gem_request_put(request[id]);
+	}
+out_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+}
+
 int i915_gem_request_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_nop_request),
 		SUBTEST(live_all_engines),
+		SUBTEST(live_sequential_engines),
 	};
 	return i915_subtests(tests, i915);
 }