diff mbox series

drm/i915/selftests: Verify whitelist of context registers

Message ID 20190415174155.11203-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915/selftests: Verify whitelist of context registers | expand

Commit Message

Chris Wilson April 15, 2019, 5:41 p.m. UTC
The RING_NONPRIV allows us to add registers to a whitelist that allows
userspace to modify them. Ideally such registers should be safe and
saved within the context such that they do not impact system behaviour
for other users. This selftest verifies that those registers we do add
are (a) then writable by userspace and (b) only affect a single client.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 .../drm/i915/selftests/intel_workarounds.c    | 289 ++++++++++++++++++
 1 file changed, 289 insertions(+)

Comments

Dan Carpenter April 16, 2019, 2:49 p.m. UTC | #1
Hi Chris,

Thank you for the patch! Perhaps something to improve:

url:    https://github.com/0day-ci/linux/commits/Chris-Wilson/drm-i915-selftests-Verify-whitelist-of-context-registers/20190416-105231
base:   git://anongit.freedesktop.org/drm-intel for-linux-next

New smatch warnings:
drivers/gpu/drm/i915/selftests/intel_workarounds.c:846 scrub_whitelisted_registers() error: uninitialized symbol 'err'.

# https://github.com/0day-ci/linux/commit/a9ce77a003ecaa54f530f1e9ff81cbae11380380
git remote add linux-review https://github.com/0day-ci/linux
git remote update linux-review
git checkout a9ce77a003ecaa54f530f1e9ff81cbae11380380
vim +/PTR_ERR +759 drivers/gpu/drm/i915/selftests/intel_workarounds.c

a9ce77a0 Chris Wilson 2019-04-15  794  static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
a9ce77a0 Chris Wilson 2019-04-15  795  				       struct intel_engine_cs *engine)
a9ce77a0 Chris Wilson 2019-04-15  796  {
a9ce77a0 Chris Wilson 2019-04-15  797  	intel_wakeref_t wakeref;
a9ce77a0 Chris Wilson 2019-04-15  798  	struct i915_request *rq;
a9ce77a0 Chris Wilson 2019-04-15  799  	struct i915_vma *batch;
a9ce77a0 Chris Wilson 2019-04-15  800  	int i, err;
a9ce77a0 Chris Wilson 2019-04-15  801  	u32 *cs;
a9ce77a0 Chris Wilson 2019-04-15  802  
a9ce77a0 Chris Wilson 2019-04-15  803  	batch = create_batch(ctx);
a9ce77a0 Chris Wilson 2019-04-15  804  	if (IS_ERR(batch))
a9ce77a0 Chris Wilson 2019-04-15  805  		return PTR_ERR(batch);
a9ce77a0 Chris Wilson 2019-04-15  806  
a9ce77a0 Chris Wilson 2019-04-15  807  	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
a9ce77a0 Chris Wilson 2019-04-15  808  	if (IS_ERR(cs)) {
a9ce77a0 Chris Wilson 2019-04-15  809  		err = PTR_ERR(cs);
a9ce77a0 Chris Wilson 2019-04-15  810  		goto err_batch;
a9ce77a0 Chris Wilson 2019-04-15  811  	}
a9ce77a0 Chris Wilson 2019-04-15  812  
a9ce77a0 Chris Wilson 2019-04-15  813  	*cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
a9ce77a0 Chris Wilson 2019-04-15  814  	for (i = 0; i < engine->whitelist.count; i++) {
a9ce77a0 Chris Wilson 2019-04-15  815  		*cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
a9ce77a0 Chris Wilson 2019-04-15  816  		*cs++ = STACK_MAGIC;
a9ce77a0 Chris Wilson 2019-04-15  817  	}
a9ce77a0 Chris Wilson 2019-04-15  818  	*cs++ = MI_BATCH_BUFFER_END;
a9ce77a0 Chris Wilson 2019-04-15  819  
a9ce77a0 Chris Wilson 2019-04-15  820  	i915_gem_object_flush_map(batch->obj);
a9ce77a0 Chris Wilson 2019-04-15  821  	i915_gem_chipset_flush(ctx->i915);
a9ce77a0 Chris Wilson 2019-04-15  822  
a9ce77a0 Chris Wilson 2019-04-15  823  	rq = ERR_PTR(-ENODEV);
a9ce77a0 Chris Wilson 2019-04-15  824  	with_intel_runtime_pm(engine->i915, wakeref)
a9ce77a0 Chris Wilson 2019-04-15  825  		rq = i915_request_alloc(engine, ctx);
a9ce77a0 Chris Wilson 2019-04-15  826  	if (IS_ERR(rq))
a9ce77a0 Chris Wilson 2019-04-15  827  		goto err_unpin;
                                                ^^^^^^^^^^^^^^
"err" not set.

a9ce77a0 Chris Wilson 2019-04-15  828  
a9ce77a0 Chris Wilson 2019-04-15  829  	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
a9ce77a0 Chris Wilson 2019-04-15  830  		err = engine->emit_init_breadcrumb(rq);
a9ce77a0 Chris Wilson 2019-04-15  831  		if (err)
a9ce77a0 Chris Wilson 2019-04-15  832  			goto err_request;
a9ce77a0 Chris Wilson 2019-04-15  833  	}
a9ce77a0 Chris Wilson 2019-04-15  834  
a9ce77a0 Chris Wilson 2019-04-15  835  	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
a9ce77a0 Chris Wilson 2019-04-15  836  
a9ce77a0 Chris Wilson 2019-04-15  837  err_request:
a9ce77a0 Chris Wilson 2019-04-15  838  	i915_request_add(rq);
a9ce77a0 Chris Wilson 2019-04-15  839  	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
a9ce77a0 Chris Wilson 2019-04-15  840  		err = -EIO;
a9ce77a0 Chris Wilson 2019-04-15  841  
a9ce77a0 Chris Wilson 2019-04-15  842  err_unpin:
a9ce77a0 Chris Wilson 2019-04-15  843  	i915_gem_object_unpin_map(batch->obj);
a9ce77a0 Chris Wilson 2019-04-15  844  err_batch:
a9ce77a0 Chris Wilson 2019-04-15  845  	i915_vma_unpin_and_release(&batch, 0);
a9ce77a0 Chris Wilson 2019-04-15 @846  	return err;
a9ce77a0 Chris Wilson 2019-04-15  847  }
a9ce77a0 Chris Wilson 2019-04-15  848  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index a363748a7a4f..d71b5b062d6f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -700,6 +700,294 @@  static int live_reset_whitelist(void *arg)
 	return err;
 }
 
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+				      struct intel_engine_cs *engine,
+				      struct i915_vma *results)
+{
+	intel_wakeref_t wakeref;
+	struct i915_request *rq;
+	u32 srm, *cs;
+	int err, i;
+
+	rq = ERR_PTR(-ENODEV);
+	with_intel_runtime_pm(engine->i915, wakeref)
+		rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+	if (err)
+		goto err_req;
+
+	srm = MI_STORE_REGISTER_MEM;
+	if (INTEL_GEN(ctx->i915) >= 8)
+		srm++;
+
+	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_req;
+	}
+
+	for (i = 0; i < engine->whitelist.count; i++) {
+		u64 offset = results->node.start + sizeof(u32) * i;
+
+		*cs++ = srm;
+		*cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+		*cs++ = lower_32_bits(offset);
+		*cs++ = upper_32_bits(offset);
+	}
+	intel_ring_advance(rq, cs);
+
+err_req:
+	i915_request_add(rq);
+
+	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+		err = -EIO;
+
+	return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+				       struct intel_engine_cs *engine)
+{
+	intel_wakeref_t wakeref;
+	struct i915_request *rq;
+	struct i915_vma *batch;
+	int i, err;
+	u32 *cs;
+
+	batch = create_batch(ctx);
+	if (IS_ERR(batch))
+		return PTR_ERR(batch);
+
+	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_batch;
+	}
+
+	*cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+	for (i = 0; i < engine->whitelist.count; i++) {
+		*cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+		*cs++ = STACK_MAGIC;
+	}
+	*cs++ = MI_BATCH_BUFFER_END;
+
+	i915_gem_object_flush_map(batch->obj);
+	i915_gem_chipset_flush(ctx->i915);
+
+	rq = ERR_PTR(-ENODEV);
+	with_intel_runtime_pm(engine->i915, wakeref)
+		rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq))
+		goto err_unpin;
+
+	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+		err = engine->emit_init_breadcrumb(rq);
+		if (err)
+			goto err_request;
+	}
+
+	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+	i915_request_add(rq);
+	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+		err = -EIO;
+
+err_unpin:
+	i915_gem_object_unpin_map(batch->obj);
+err_batch:
+	i915_vma_unpin_and_release(&batch, 0);
+	return err;
+}
+
+static bool ignore_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+	/* Alas, we must pardon some whitelists */
+	static const struct {
+		i915_reg_t reg;
+		unsigned long gen_mask;
+	} ignore[] = {
+		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+	};
+	u32 offset = i915_mmio_reg_offset(reg);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ignore); i++) {
+		if (INTEL_INFO(i915)->gen_mask & ignore[i].gen_mask &&
+		    i915_mmio_reg_offset(ignore[i].reg) == offset)
+			return true;
+	}
+
+	return false;
+}
+
+static int eq_whitelisted_registers(struct i915_vma *A,
+				    struct i915_vma *B,
+				    struct intel_engine_cs *engine)
+{
+	u32 *a, *b;
+	int i, err;
+
+	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+	if (IS_ERR(a))
+		return PTR_ERR(a);
+
+	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+	if (IS_ERR(b)) {
+		err = PTR_ERR(b);
+		goto err_a;
+	}
+
+	err = 0;
+	for (i = 0; i < engine->whitelist.count; i++) {
+		if (a[i] != b[i] &&
+		    !ignore_reg(engine->i915, engine->whitelist.list[i].reg)) {
+			pr_err("[%d] Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+			       i, i915_mmio_reg_offset(engine->whitelist.list[i].reg),
+			       a[i], b[i]);
+			err = -EINVAL;
+		}
+	}
+
+	i915_gem_object_unpin_map(B->obj);
+err_a:
+	i915_gem_object_unpin_map(A->obj);
+	return err;
+}
+
+static int neq_whitelisted_registers(struct i915_vma *A,
+				     struct i915_vma *B,
+				     struct intel_engine_cs *engine)
+{
+	u32 *a, *b;
+	int i, err;
+
+	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+	if (IS_ERR(a))
+		return PTR_ERR(a);
+
+	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+	if (IS_ERR(b)) {
+		err = PTR_ERR(b);
+		goto err_a;
+	}
+
+	err = 0;
+	for (i = 0; i < engine->whitelist.count; i++) {
+		if (a[i] == b[i]) {
+			pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
+			       i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
+			err = -EINVAL;
+		}
+	}
+
+	i915_gem_object_unpin_map(B->obj);
+err_a:
+	i915_gem_object_unpin_map(A->obj);
+	return err;
+}
+
+static int live_isolated_whitelist(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct {
+		struct i915_gem_context *ctx;
+		struct i915_vma *scratch[2];
+	} client[2] = {};
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int i, err = 0;
+
+	/*
+	 * Check that a write into a whitelist register works, but
+	 * invisible to a second context.
+	 */
+
+	for (i = 0; i < ARRAY_SIZE(client); i++) {
+		struct i915_gem_context *c;
+
+		c = kernel_context(i915);
+		if (IS_ERR(c))
+			goto err;
+
+		client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
+		if (IS_ERR(client[i].scratch[0])) {
+			kernel_context_close(c);
+			goto err;
+		}
+
+		client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
+		if (IS_ERR(client[i].scratch[1])) {
+			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+			kernel_context_close(c);
+			goto err;
+		}
+
+		client[i].ctx = c;
+	}
+
+	for_each_engine(engine, i915, id) {
+		if (!engine->whitelist.count)
+			continue;
+
+		/* Read default values */
+		err = read_whitelisted_registers(client[0].ctx, engine,
+						 client[0].scratch[0]);
+		if (err)
+			goto err;
+
+		/* Try to overwrite registers (should only affect ctx0) */
+		err = scrub_whitelisted_registers(client[0].ctx, engine);
+		if (err)
+			goto err;
+
+		/* Read values from ctx1, we expect these to be defaults */
+		err = read_whitelisted_registers(client[1].ctx, engine,
+						 client[1].scratch[0]);
+		if (err)
+			goto err;
+
+		/* Verify that both reads return the same default values */
+		err = eq_whitelisted_registers(client[0].scratch[0],
+					       client[1].scratch[0],
+					       engine);
+		if (err)
+			goto err;
+
+		/* Read back the updated values in ctx0 */
+		err = read_whitelisted_registers(client[0].ctx, engine,
+						 client[0].scratch[1]);
+		if (err)
+			goto err;
+
+		/* User should be granted privilege to overwhite regs */
+		err = neq_whitelisted_registers(client[0].scratch[0],
+						client[1].scratch[1],
+						engine);
+		if (err)
+			goto err;
+	}
+
+err:
+	for (i = 0; i < ARRAY_SIZE(client); i++) {
+		if (!client[i].ctx)
+			break;
+
+		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+		kernel_context_close(client[i].ctx);
+	}
+
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+
+	return err;
+}
+
 static bool verify_gt_engine_wa(struct drm_i915_private *i915,
 				struct wa_lists *lists, const char *str)
 {
@@ -844,6 +1132,7 @@  int intel_workarounds_live_selftests(struct drm_i915_private *i915)
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_dirty_whitelist),
 		SUBTEST(live_reset_whitelist),
+		SUBTEST(live_isolated_whitelist),
 		SUBTEST(live_gpu_reset_gt_engine_workarounds),
 		SUBTEST(live_engine_reset_gt_engine_workarounds),
 	};