diff mbox series

[22/37] drm/i915/gem: Include secure batch in common execbuf pinning

Message ID 20200805122231.23313-23-chris@chris-wilson.co.uk
State New, archived
Headers show
Series Replace obj->mm.lock with reservation_ww_class | expand

Commit Message

Chris Wilson Aug. 5, 2020, 12:22 p.m. UTC
Pull the GGTT binding for the secure batch dispatch into the common vma
pinning routine for execbuf, so that there is just a single central
place for all i915_vma_pin().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 88 +++++++++++--------
 1 file changed, 51 insertions(+), 37 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 236d4ad3516b..19cab5541dbc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1674,6 +1674,48 @@  static int eb_alloc_cmdparser(struct i915_execbuffer *eb)
 	return err;
 }
 
+static int eb_secure_batch(struct i915_execbuffer *eb)
+{
+	struct i915_vma *vma = eb->batch->vma;
+
+	/*
+	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+	 * batch" bit. Hence we need to pin secure batches into the global gtt.
+	 * hsw should have this fixed, but bdw mucks it up again.
+	 */
+	if (!(eb->batch_flags & I915_DISPATCH_SECURE))
+		return 0;
+
+	if (GEM_WARN_ON(vma->vm != &eb->engine->gt->ggtt->vm)) {
+		struct eb_vma *ev;
+
+		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+		if (!ev)
+			return -ENOMEM;
+
+		vma = i915_vma_instance(vma->obj,
+					&eb->engine->gt->ggtt->vm,
+					NULL);
+		if (IS_ERR(vma)) {
+			kfree(ev);
+			return PTR_ERR(vma);
+		}
+
+		ev->vma = i915_vma_get(vma);
+		ev->exec = &no_entry;
+
+		list_add(&ev->submit_link, &eb->submit_list);
+		list_add(&ev->reloc_link, &eb->array->aux_list);
+		list_add(&ev->bind_link, &eb->bind_list);
+
+		GEM_BUG_ON(eb->batch->vma->private);
+		eb->batch = ev;
+	}
+
+	eb->batch->flags |= EXEC_OBJECT_NEEDS_GTT;
+	return 0;
+}
+
 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
 {
 	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
@@ -1823,6 +1865,10 @@  static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	if (err)
 		return err;
 
+	err = eb_secure_batch(eb);
+	if (err)
+		return err;
+
 	return 0;
 }
 
@@ -2798,7 +2844,7 @@  static int eb_parse(struct i915_execbuffer *eb)
 	return 0;
 }
 
-static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
+static int eb_submit(struct i915_execbuffer *eb)
 {
 	int err;
 
@@ -2825,7 +2871,7 @@  static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 	}
 
 	err = eb->engine->emit_bb_start(eb->request,
-					batch->node.start +
+					eb->batch->vma->node.start +
 					eb->batch_start_offset,
 					eb->batch_len,
 					eb->batch_flags);
@@ -3486,7 +3532,6 @@  i915_gem_do_execbuffer(struct drm_device *dev,
 	struct i915_execbuffer eb;
 	struct dma_fence *in_fence = NULL;
 	struct sync_file *out_fence = NULL;
-	struct i915_vma *batch;
 	int out_fence_fd = -1;
 	int err;
 
@@ -3601,34 +3646,6 @@  i915_gem_do_execbuffer(struct drm_device *dev,
 	if (err)
 		goto err_vma;
 
-	/*
-	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
-	 * batch" bit. Hence we need to pin secure batches into the global gtt.
-	 * hsw should have this fixed, but bdw mucks it up again. */
-	batch = i915_vma_get(eb.batch->vma);
-	if (eb.batch_flags & I915_DISPATCH_SECURE) {
-		struct i915_vma *vma;
-
-		/*
-		 * So on first glance it looks freaky that we pin the batch here
-		 * outside of the reservation loop. But:
-		 * - The batch is already pinned into the relevant ppgtt, so we
-		 *   already have the backing storage fully allocated.
-		 * - No other BO uses the global gtt (well contexts, but meh),
-		 *   so we don't really have issues with multiple objects not
-		 *   fitting due to fragmentation.
-		 * So this is actually safe.
-		 */
-		vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
-		if (IS_ERR(vma)) {
-			err = PTR_ERR(vma);
-			goto err_vma;
-		}
-
-		GEM_BUG_ON(vma->obj != batch->obj);
-		batch = vma;
-	}
-
 	/* All GPU relocation batches must be submitted prior to the user rq */
 	GEM_BUG_ON(eb.reloc_cache.rq);
 
@@ -3636,7 +3653,7 @@  i915_gem_do_execbuffer(struct drm_device *dev,
 	eb.request = __i915_request_create(eb.context, GFP_KERNEL);
 	if (IS_ERR(eb.request)) {
 		err = PTR_ERR(eb.request);
-		goto err_batch_unpin;
+		goto err_vma;
 	}
 	eb.request->cookie = lockdep_pin_lock(&eb.context->timeline->mutex);
 
@@ -3673,13 +3690,13 @@  i915_gem_do_execbuffer(struct drm_device *dev,
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 */
-	eb.request->batch = batch;
+	eb.request->batch = eb.batch->vma;
 	if (eb.parser.shadow)
 		intel_gt_buffer_pool_mark_active(eb.parser.shadow->vma->private,
 						 eb.request);
 
 	trace_i915_request_queue(eb.request, eb.batch_flags);
-	err = eb_submit(&eb, batch);
+	err = eb_submit(&eb);
 err_request:
 	i915_request_get(eb.request);
 	eb_request_add(&eb);
@@ -3689,9 +3706,6 @@  i915_gem_do_execbuffer(struct drm_device *dev,
 
 	i915_request_put(eb.request);
 
-err_batch_unpin:
-	if (eb.batch_flags & I915_DISPATCH_SECURE)
-		i915_vma_unpin(batch);
 err_vma:
 	eb_unlock_engine(&eb);
 	/* *** TIMELINE UNLOCK *** */