[090/190] drm/i915: Refactor execlists default context pinning
diff mbox

Message ID 1452509174-16671-4-git-send-email-chris@chris-wilson.co.uk
State New
Headers show

Commit Message

Chris Wilson Jan. 11, 2016, 10:44 a.m. UTC
Refactor pinning and unpinning of contexts, such that the default
context for an engine is pinned during initialisation and unpinned
during teardown (pinning of the context handles the reference counting).
Thus we can eliminate the special case handling of the default context
that was required to mask that it was not being pinned normally.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |   7 +-
 drivers/gpu/drm/i915/i915_gem_request.c |   6 +-
 drivers/gpu/drm/i915/intel_lrc.c        | 117 +++++++++++++-------------------
 drivers/gpu/drm/i915/intel_lrc.h        |   3 +-
 4 files changed, 53 insertions(+), 80 deletions(-)

Patch
diff mbox

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a5ea90944bbb..ea5b9f6d0fc9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2052,11 +2052,8 @@  static int i915_dump_lrc(struct seq_file *m, void *unused)
 		return ret;
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->default_context != ctx)
-				i915_dump_lrc_obj(m, ring,
-						  ctx->engine[i].state);
-		}
+		for_each_ring(ring, dev_priv, i)
+			i915_dump_lrc_obj(m, ring, ctx->engine[i].state);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 069c0b9dfd95..61be8dda4a14 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -345,10 +345,8 @@  static void __i915_gem_request_retire_active(struct drm_i915_gem_request *req)
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
 	intel_ring_reserved_space_cancel(req->ring);
-	if (i915.enable_execlists) {
-		if (req->ctx != req->engine->default_context)
-			intel_lr_context_unpin(req);
-	}
+	if (i915.enable_execlists)
+		intel_lr_context_unpin(req->ctx, req->engine);
 
 	/* If a request is to be discarded after actions have been queued upon
 	 * it, we cannot unwind that request and it must be submitted rather
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 80b346a3fd8a..31fbb482d15c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -227,7 +227,8 @@  enum {
 #define GEN8_CTX_ID_SHIFT 32
 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
+static int intel_lr_context_pin(struct intel_context *ctx,
+				struct intel_engine_cs *engine);
 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
 		struct drm_i915_gem_object *default_ctx_obj);
 
@@ -485,11 +486,9 @@  int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 
 	request->ring = request->ctx->engine[request->engine->id].ring;
 
-	if (request->ctx != request->engine->default_context) {
-		ret = intel_lr_context_pin(request);
-		if (ret)
-			return ret;
-	}
+	ret = intel_lr_context_pin(request->ctx, request->engine);
+	if (ret)
+		return ret;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -521,13 +520,7 @@  bool intel_execlists_retire_requests(struct intel_engine_cs *ring)
 	spin_unlock(&ring->execlist_lock);
 
 	list_for_each_entry_safe(req, tmp, &list, execlist_link) {
-		struct intel_context *ctx = req->ctx;
-		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
-
-		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(req);
-
+		intel_lr_context_unpin(req->ctx, req->engine);
 		i915_gem_request_put(req);
 	}
 
@@ -557,83 +550,73 @@  void intel_logical_ring_stop(struct intel_engine_cs *ring)
 	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
-		struct drm_i915_gem_object *ctx_obj,
-		struct intel_ring *ringbuf)
+static int intel_lr_context_pin(struct intel_context *ctx,
+				struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->i915;
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_gem_object *ctx_obj;
+	struct intel_ring *ring;
 	u32 ggtt_offset;
 	int ret = 0;
 
-	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	if (ctx->engine[engine->id].pin_count++)
+		return 0;
+
+	lockdep_assert_held(&engine->dev->struct_mutex);
+
+	ctx_obj = ctx->engine[engine->id].state;
 	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
 				    PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
 	if (ret)
-		return ret;
+		goto err;
 
-	ret = intel_ring_map(ringbuf);
+	ring = ctx->engine[engine->id].ring;
+	ret = intel_ring_map(ring);
 	if (ret)
 		goto unpin_ctx_obj;
 
+	i915_gem_context_reference(ctx);
 	ctx_obj->dirty = true;
 
 	ggtt_offset =
 		i915_gem_obj_ggtt_offset(ctx_obj) + LRC_PPHWSP_PN * PAGE_SIZE;
-	ringbuf->context_descriptor =
-		ggtt_offset | ring->execlist_context_descriptor;
+	ring->context_descriptor =
+		ggtt_offset | engine->execlist_context_descriptor;
 
-	ringbuf->registers =
+	ring->registers =
 		kmap(i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN));
-	ringbuf->registers[CTX_RING_BUFFER_START+1] =
-		i915_gem_obj_ggtt_offset(ringbuf->obj);
+	ring->registers[CTX_RING_BUFFER_START+1] =
+		i915_gem_obj_ggtt_offset(ring->obj);
 
 	/* Invalidate GuC TLB. */
 	if (i915.enable_guc_submission)
 		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
-	return ret;
+	return 0;
 
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
-
+err:
+	ctx->engine[engine->id].pin_count = 0;
 	return ret;
 }
 
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
-{
-	int engine = rq->engine->id;
-	int ret;
-
-	if (rq->ctx->engine[engine].pin_count++)
-		return 0;
-
-	ret = intel_lr_context_do_pin(rq->engine,
-				      rq->ctx->engine[engine].state,
-				      rq->ring);
-	if (ret) {
-		rq->ctx->engine[engine].pin_count = 0;
-		return ret;
-	}
-
-	i915_gem_context_reference(rq->ctx);
-	return 0;
-}
-
-void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
+void intel_lr_context_unpin(struct intel_context *ctx,
+			    struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *ctx_obj;
-	int engine = rq->engine->id;
 
-	WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
-	if (--rq->ctx->engine[engine].pin_count)
+	lockdep_assert_held(&engine->dev->struct_mutex);
+	if (--ctx->engine[engine->id].pin_count)
 		return;
 
-	intel_ring_unmap(rq->ring);
+	intel_ring_unmap(ctx->engine[engine->id].ring);
 
-	ctx_obj = rq->ctx->engine[engine].state;
+	ctx_obj = ctx->engine[engine->id].state;
 	kunmap(i915_gem_object_get_page(ctx_obj, LRC_STATE_PN));
 	i915_gem_object_ggtt_unpin(ctx_obj);
-	i915_gem_context_unreference(rq->ctx);
+
+	i915_gem_context_unreference(ctx);
 }
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1425,6 +1408,7 @@  void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 		kunmap(sg_page(ring->status_page.obj->pages->sgl));
 		ring->status_page.obj = NULL;
 	}
+	intel_lr_context_unpin(ring->default_context, ring);
 
 	lrc_destroy_wa_ctx_obj(ring);
 	ring->dev = NULL;
@@ -1433,6 +1417,7 @@  void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_context *ctx;
 	struct task_struct *task;
 	int ret;
 
@@ -1457,19 +1442,17 @@  static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
 	if (ret)
 		goto error;
 
-	ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+	ctx = ring->default_context;
+
+	ret = intel_lr_context_deferred_alloc(ctx, ring);
 	if (ret)
 		goto error;
 
 	/* As this is the default context, always pin it */
-	ret = intel_lr_context_do_pin(
-			ring,
-			ring->default_context->engine[ring->id].state,
-			ring->default_context->engine[ring->id].ring);
+	ret = intel_lr_context_pin(ctx, ring);
 	if (ret) {
-		DRM_ERROR(
-			"Failed to pin and map ringbuffer %s: %d\n",
-			ring->name, ret);
+		DRM_ERROR("Failed to pin context for %s: %d\n",
+			  ring->name, ret);
 		goto error;
 	}
 
@@ -1872,15 +1855,9 @@  void intel_lr_context_free(struct intel_context *ctx)
 		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
 		if (ctx_obj) {
-			struct intel_ring *ring = ctx->engine[i].ring;
-			struct intel_engine_cs *engine = ring->engine;
+			WARN_ON(ctx->engine[i].pin_count);
 
-			if (ctx == engine->default_context) {
-				intel_ring_unmap(ring);
-				i915_gem_object_ggtt_unpin(ctx_obj);
-			}
-			WARN_ON(ctx->engine[engine->id].pin_count);
-			intel_ring_free(ring);
+			intel_ring_free(ctx->engine[i].ring);
 			drm_gem_object_unreference(&ctx_obj->base);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 37601a35d5fc..a43d1e5e5f5a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -71,7 +71,8 @@  void intel_lr_context_free(struct intel_context *ctx);
 uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 				    struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct drm_i915_gem_request *req);
+void intel_lr_context_unpin(struct intel_context *ctx,
+			    struct intel_engine_cs *engine);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);