diff mbox

[29/50] drm/i915/bdw: Execlists ring tail writing

Message ID 1399637360-4277-30-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com May 9, 2014, 12:08 p.m. UTC
From: Oscar Mateo <oscar.mateo@intel.com>

The write tail function is a very special place for execlists: since
all access to the ring is mediated through requests (thanks to
Chris Wilson's "Write RING_TAIL once per-request" for that) and all
requests end up with a write tail, this is the place we are going to
use to submit contexts for execution.

For the moment, just mark the place (we still need to do a lot of
preparation before execlists are ready to start submitting things).

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index eef7094..03719b0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -430,6 +430,12 @@  static void ring_write_tail(struct intel_engine *ring,
 	I915_WRITE_TAIL(ring, value);
 }
 
+static void gen8_submit_ctx(struct intel_engine *ring,
+			    struct i915_hw_context *ctx, u32 value)
+{
+	DRM_ERROR("Execlists still not ready!\n");
+}
+
 u64 intel_ring_get_active_head(struct intel_engine *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1983,12 +1989,15 @@  int intel_init_render_ring(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine *ring = &dev_priv->ring[RCS];
 
+	ring->submit = ring_write_tail;
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
 		ring->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
 			ring->flush = gen6_render_ring_flush;
 		if (INTEL_INFO(dev)->gen >= 8) {
+			if (dev_priv->lrc_enabled)
+				ring->submit = gen8_submit_ctx;
 			ring->flush = gen8_render_ring_flush;
 			ring->irq_get = gen8_ring_get_irq;
 			ring->irq_put = gen8_ring_put_irq;
@@ -2043,7 +2052,7 @@  int intel_init_render_ring(struct drm_device *dev)
 		}
 		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
-	ring->submit = ring_write_tail;
+
 	if (IS_HASWELL(dev))
 		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
 	else if (IS_GEN8(dev))
@@ -2163,6 +2172,8 @@  int intel_init_bsd_ring(struct drm_device *dev)
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
 		if (INTEL_INFO(dev)->gen >= 8) {
+			if (dev_priv->lrc_enabled)
+				ring->submit = gen8_submit_ctx;
 			ring->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 			ring->irq_get = gen8_ring_get_irq;
@@ -2229,7 +2240,10 @@  int intel_init_bsd2_ring(struct drm_device *dev)
 		return -EINVAL;
 	}
 
-	ring->submit = ring_write_tail;
+	if (dev_priv->lrc_enabled)
+		ring->submit = gen8_submit_ctx;
+	else
+		ring->submit = ring_write_tail;
 	ring->flush = gen6_bsd_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
@@ -2274,6 +2288,8 @@  int intel_init_blt_ring(struct drm_device *dev)
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
 	if (INTEL_INFO(dev)->gen >= 8) {
+		if (dev_priv->lrc_enabled)
+			ring->submit = gen8_submit_ctx;
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 		ring->irq_get = gen8_ring_get_irq;
@@ -2320,6 +2336,8 @@  int intel_init_vebox_ring(struct drm_device *dev)
 	ring->set_seqno = ring_set_seqno;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
+		if (dev_priv->lrc_enabled)
+			ring->submit = gen8_submit_ctx;
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 		ring->irq_get = gen8_ring_get_irq;