diff mbox

[33/50] drm/i915/bdw: Always write seqno to default context

Message ID 1399637360-4277-34-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com May 9, 2014, 12:09 p.m. UTC
From: Oscar Mateo <oscar.mateo@intel.com>

Even though we have one Hardware Status Page per context, we are still
managing the seqnos per engine. Therefore, the sequence number must be
written to a consistent place for all contexts: one of the global
default contexts.

Signed-off-by: Thomas Daniel <thomas.daniel@intel.com>

v2: Since get_seqno and set_seqno now look for the seqno in the engine's
status page, they don't need to be changed.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 drivers/gpu/drm/i915/i915_reg.h         |  1 +
 drivers/gpu/drm/i915/intel_ringbuffer.c | 67 ++++++++++++++++++++++++++++++++-
 2 files changed, 67 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 33d007d..2e76ec0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -259,6 +259,7 @@ 
 #define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN8	MI_INSTR(0x20, 2)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5e4e3f7..d38d824 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -781,6 +781,66 @@  gen6_add_request(struct intel_engine *ring,
 	return 0;
 }
 
+static int
+gen8_nonrender_add_request_lrc(struct intel_engine *ring,
+			       struct i915_hw_context *ctx)
+{
+	struct intel_ringbuffer *ringbuf;
+	struct i915_hw_context *dctx = ring->default_context;
+	struct drm_i915_gem_object *obj = dctx->engine[ring->id].obj;
+	u32 cmd;
+
+	ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+	if (IS_ERR_OR_NULL(ringbuf))
+		return (PTR_ERR(ringbuf));
+
+	cmd = MI_FLUSH_DW + 1;
+	cmd |= MI_INVALIDATE_TLB;
+	cmd |= MI_FLUSH_DW_OP_STOREDW;
+
+	intel_ringbuffer_emit(ringbuf, cmd);
+	intel_ringbuffer_emit(ringbuf,
+			((i915_gem_obj_ggtt_offset(obj)) +
+			(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)) |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ringbuffer_emit(ringbuf, 0); /* upper addr */
+	intel_ringbuffer_emit(ringbuf, ring->outstanding_lazy_seqno);
+	intel_ringbuffer_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_ringbuffer_emit(ringbuf, MI_NOOP);
+	intel_ringbuffer_advance_and_submit(ring, ctx);
+
+	return 0;
+}
+
+static int
+gen8_add_request_lrc(struct intel_engine *ring,
+		     struct i915_hw_context *ctx)
+{
+	struct intel_ringbuffer *ringbuf;
+	struct i915_hw_context *dctx = ring->default_context;
+	struct drm_i915_gem_object *obj = dctx->engine[ring->id].obj;
+	u32 cmd;
+
+	ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+	if (IS_ERR_OR_NULL(ringbuf))
+		return (PTR_ERR(ringbuf));
+
+	cmd = MI_STORE_DWORD_IMM_GEN8;
+	cmd |= (1 << 22); /* use global GTT */
+
+	intel_ringbuffer_emit(ringbuf, cmd);
+	intel_ringbuffer_emit(ringbuf,
+			((i915_gem_obj_ggtt_offset(obj)) +
+			(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+	intel_ringbuffer_emit(ringbuf, 0); /* upper addr */
+	intel_ringbuffer_emit(ringbuf, ring->outstanding_lazy_seqno);
+	intel_ringbuffer_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_ringbuffer_emit(ringbuf, MI_NOOP);
+	intel_ringbuffer_advance_and_submit(ring, ctx);
+
+	return 0;
+}
+
 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
 					      u32 seqno)
 {
@@ -2047,6 +2107,7 @@  int intel_init_render_ring(struct drm_device *dev)
 			if (dev_priv->lrc_enabled) {
 				ring->submit = gen8_submit_ctx;
 				ring->init = init_render_ring_lrc;
+				ring->add_request = gen8_add_request_lrc;
 			}
 			ring->flush = gen8_render_ring_flush;
 			ring->irq_get = gen8_ring_get_irq;
@@ -2224,6 +2285,7 @@  int intel_init_bsd_ring(struct drm_device *dev)
 			if (dev_priv->lrc_enabled) {
 				ring->submit = gen8_submit_ctx;
 				ring->init = init_ring_common_lrc;
+				ring->add_request = gen8_nonrender_add_request_lrc;
 			}
 			ring->flush = gen8_ring_flush;
 			ring->irq_enable_mask =
@@ -2294,13 +2356,14 @@  int intel_init_bsd2_ring(struct drm_device *dev)
 
 	if (dev_priv->lrc_enabled) {
 		ring->submit = gen8_submit_ctx;
+		ring->add_request = gen8_nonrender_add_request_lrc;
 		ring->init = init_ring_common_lrc;
 	} else {
 		ring->submit = ring_write_tail;
+		ring->add_request = gen6_add_request;
 		ring->init = init_ring_common;
 	}
 	ring->flush = gen8_ring_flush;
-	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
 	ring->irq_enable_mask =
@@ -2344,6 +2407,7 @@  int intel_init_blt_ring(struct drm_device *dev)
 		if (dev_priv->lrc_enabled) {
 			ring->submit = gen8_submit_ctx;
 			ring->init = init_ring_common_lrc;
+			ring->add_request = gen8_nonrender_add_request_lrc;
 		}
 		ring->flush = gen8_ring_flush;
 		ring->irq_enable_mask =
@@ -2395,6 +2459,7 @@  int intel_init_vebox_ring(struct drm_device *dev)
 		if (dev_priv->lrc_enabled) {
 			ring->submit = gen8_submit_ctx;
 			ring->init = init_ring_common_lrc;
+			ring->add_request = gen8_nonrender_add_request_lrc;
 		}
 		ring->flush = gen8_ring_flush;
 		ring->irq_enable_mask =