diff mbox

[30/50] drm/i915/bdw: LR context ring init

Message ID 1399637360-4277-31-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com May 9, 2014, 12:09 p.m. UTC
From: Ben Widawsky <benjamin.widawsky@intel.com>

Logical ring contexts do not need most of the ring init: we just need
the pipe control object for the render ring and a few workarounds (some
more stuff to be added later).

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 67 +++++++++++++++++++++++++++------
 1 file changed, 55 insertions(+), 12 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03719b0..35e89c9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -565,6 +565,11 @@  out:
 	return ret;
 }
 
+static int init_ring_common_lrc(struct intel_engine *ring)
+{
+	return 0;
+}
+
 static int
 init_pipe_control(struct intel_engine *ring)
 {
@@ -663,6 +668,35 @@  static int init_render_ring(struct intel_engine *ring)
 	return ret;
 }
 
+static int init_render_ring_lrc(struct intel_engine *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int ret;
+
+	ret = init_ring_common_lrc(ring);
+	if (ret)
+		return ret;
+
+	/* We need to disable the AsyncFlip performance optimisations in order
+	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+	 * programmed to '1' on all products.
+	 *
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
+	 */
+	if (INTEL_INFO(dev)->gen >= 6)
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+	ret = init_pipe_control(ring);
+	if (ret)
+		return ret;
+
+	if (INTEL_INFO(dev)->gen >= 6)
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+	return 0;
+}
+
 static void render_ring_cleanup(struct intel_engine *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -1990,14 +2024,17 @@  int intel_init_render_ring(struct drm_device *dev)
 	struct intel_engine *ring = &dev_priv->ring[RCS];
 
 	ring->submit = ring_write_tail;
+	ring->init = init_render_ring;
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
 		ring->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
 			ring->flush = gen6_render_ring_flush;
 		if (INTEL_INFO(dev)->gen >= 8) {
-			if (dev_priv->lrc_enabled)
+			if (dev_priv->lrc_enabled) {
 				ring->submit = gen8_submit_ctx;
+				ring->init = init_render_ring_lrc;
+			}
 			ring->flush = gen8_render_ring_flush;
 			ring->irq_get = gen8_ring_get_irq;
 			ring->irq_put = gen8_ring_put_irq;
@@ -2065,7 +2102,6 @@  int intel_init_render_ring(struct drm_device *dev)
 		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
 	else
 		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-	ring->init = init_render_ring;
 	ring->cleanup = render_ring_cleanup;
 
 	/* Workaround batchbuffer to combat CS tlb bug. */
@@ -2163,6 +2199,7 @@  int intel_init_bsd_ring(struct drm_device *dev)
 	struct intel_engine *ring = &dev_priv->ring[VCS];
 
 	ring->submit = ring_write_tail;
+	ring->init = init_ring_common;
 	if (INTEL_INFO(dev)->gen >= 6) {
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev))
@@ -2172,8 +2209,10 @@  int intel_init_bsd_ring(struct drm_device *dev)
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
 		if (INTEL_INFO(dev)->gen >= 8) {
-			if (dev_priv->lrc_enabled)
+			if (dev_priv->lrc_enabled) {
 				ring->submit = gen8_submit_ctx;
+				ring->init = init_ring_common_lrc;
+			}
 			ring->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 			ring->irq_get = gen8_ring_get_irq;
@@ -2221,7 +2260,6 @@  int intel_init_bsd_ring(struct drm_device *dev)
 		}
 		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
 	}
-	ring->init = init_ring_common;
 
 	return intel_init_ring(dev, ring);
 }
@@ -2240,10 +2278,13 @@  int intel_init_bsd2_ring(struct drm_device *dev)
 		return -EINVAL;
 	}
 
-	if (dev_priv->lrc_enabled)
+	if (dev_priv->lrc_enabled) {
 		ring->submit = gen8_submit_ctx;
-	else
+		ring->init = init_ring_common_lrc;
+	} else {
 		ring->submit = ring_write_tail;
+		ring->init = init_ring_common;
+	}
 	ring->flush = gen6_bsd_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
@@ -2272,8 +2313,6 @@  int intel_init_bsd2_ring(struct drm_device *dev)
 	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
 	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 
-	ring->init = init_ring_common;
-
 	return intel_init_ring(dev, ring);
 }
 
@@ -2283,13 +2322,16 @@  int intel_init_blt_ring(struct drm_device *dev)
 	struct intel_engine *ring = &dev_priv->ring[BCS];
 
 	ring->submit = ring_write_tail;
+	ring->init = init_ring_common;
 	ring->flush = gen6_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
 	if (INTEL_INFO(dev)->gen >= 8) {
-		if (dev_priv->lrc_enabled)
+		if (dev_priv->lrc_enabled) {
 			ring->submit = gen8_submit_ctx;
+			ring->init = init_ring_common_lrc;
+		}
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 		ring->irq_get = gen8_ring_get_irq;
@@ -2319,7 +2361,6 @@  int intel_init_blt_ring(struct drm_device *dev)
 	ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
 	ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
 	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-	ring->init = init_ring_common;
 
 	return intel_init_ring(dev, ring);
 }
@@ -2330,14 +2371,17 @@  int intel_init_vebox_ring(struct drm_device *dev)
 	struct intel_engine *ring = &dev_priv->ring[VECS];
 
 	ring->submit = ring_write_tail;
+	ring->init = init_ring_common;
 	ring->flush = gen6_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
-		if (dev_priv->lrc_enabled)
+		if (dev_priv->lrc_enabled) {
 			ring->submit = gen8_submit_ctx;
+			ring->init = init_ring_common_lrc;
+		}
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 		ring->irq_get = gen8_ring_get_irq;
@@ -2361,7 +2405,6 @@  int intel_init_vebox_ring(struct drm_device *dev)
 	ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
 	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
 	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-	ring->init = init_ring_common;
 
 	return intel_init_ring(dev, ring);
 }