diff mbox

drm/i915: Defer LRC unpin and release

Message ID 1446847421-28788-1-git-send-email-yu.dai@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

yu.dai@intel.com Nov. 6, 2015, 10:03 p.m. UTC
From: Alex Dai <yu.dai@intel.com>

Can't immediately free LRC (neither unpin it) even all its
referenced requests are completed, because HW still need a short
period of time to save data to LRC status page. It is safe to free
LRC when HW completes a request from a different LRC.

Introduce a new function intel_lr_context_do_unpin that do the
actual unpin work. When a LRC pin count reaches to zero, move it
to ring->retired_ctx. Also, increase ctx refcount to make sure it
won't be freed immediately. When HW complete the next request from
a different LRC, do the actual unpin work. In case it is pinned
again, decrease its refcount and clear it from ring->retired_ctx.

Signed-off-by: Alex Dai <yu.dai@intel.com>
---
 certs/x509_certificate_list             |  0
 drivers/gpu/drm/i915/intel_lrc.c        | 80 +++++++++++++++++++++++++++++----
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 +
 3 files changed, 73 insertions(+), 8 deletions(-)
 create mode 100644 certs/x509_certificate_list
diff mbox

Patch

diff --git a/certs/x509_certificate_list b/certs/x509_certificate_list
new file mode 100644
index 0000000..e69de29
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 06180dc..bd2d705 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1039,6 +1039,57 @@  unpin_ctx_obj:
 	return ret;
 }
 
+static void intel_lr_context_do_unpin(struct intel_engine_cs *ring,
+		struct intel_context *ctx)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *ctx_obj;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	if (!ctx)
+		return;
+
+	ctx_obj = ctx->engine[ring->id].state;
+	if (!ctx_obj)
+		return;
+
+	i915_gem_object_ggtt_unpin(ctx_obj);
+	intel_unpin_ringbuffer_obj(ctx->engine[ring->id].ringbuf);
+
+	ctx_obj->dirty = true;
+
+	/* Invalidate GuC TLB. */
+	if (i915.enable_guc_submission)
+		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void set_retired_lrc(struct intel_engine_cs *ring,
+		struct intel_context *ctx)
+{
+	struct intel_context *last = ring->retired_ctx;
+
+	if (WARN_ON(ctx == last))
+		return;
+
+	/* Either the actual unpin is done already or it is pinned again. */
+	if (ctx == NULL) {
+		last->engine[ring->id].pin_count--;
+		i915_gem_context_unreference(last);
+		ring->retired_ctx = NULL;
+		return;
+	}
+
+	/* Last retired lrc should be handled already. */
+	WARN_ON(last);
+
+	/* A lrc is set to retired. Increase its ref count to avoid release
+	 * immediately. It is deferred to the completion of next request. */
+	ctx->engine[ring->id].pin_count++;
+	i915_gem_context_reference(ctx);
+	ring->retired_ctx = ctx;
+}
+
 static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
 	int ret = 0;
@@ -1051,6 +1102,10 @@  static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 		if (ret)
 			goto reset_pin_count;
 	}
+
+	if (ring->retired_ctx == rq->ctx)
+		set_retired_lrc(ring, NULL);
+
 	return ret;
 
 reset_pin_count:
@@ -1061,16 +1116,20 @@  reset_pin_count:
 void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
 	struct intel_engine_cs *ring = rq->ring;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
-	if (ctx_obj) {
-		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--rq->ctx->engine[ring->id].pin_count == 0) {
-			intel_unpin_ringbuffer_obj(ringbuf);
-			i915_gem_object_ggtt_unpin(ctx_obj);
-		}
+	if (!rq->ctx->engine[ring->id].state)
+		return;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	/* HW completes request from a new LRC, then do the actual unpin. */
+	if (ring->retired_ctx && ring->retired_ctx != rq->ctx) {
+		intel_lr_context_do_unpin(ring, ring->retired_ctx);
+		set_retired_lrc(ring, NULL);
 	}
+
+	if (--rq->ctx->engine[ring->id].pin_count == 0)
+		set_retired_lrc(ring, rq->ctx);
 }
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1908,6 +1967,11 @@  void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 	}
 
 	lrc_destroy_wa_ctx_obj(ring);
+
+	if (ring->retired_ctx) {
+		intel_lr_context_do_unpin(ring, ring->retired_ctx);
+		set_retired_lrc(ring, NULL);
+	}
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 58b1976..1f5e9bd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -267,6 +267,7 @@  struct  intel_engine_cs {
 	spinlock_t execlist_lock;
 	struct list_head execlist_queue;
 	struct list_head execlist_retired_req_list;
+	struct intel_context *retired_ctx;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
 	int		(*emit_request)(struct drm_i915_gem_request *request);