diff mbox

[07/50] drm/i915: Split the ringbuffers and the rings

Message ID 1399637360-4277-8-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com May 9, 2014, 12:08 p.m. UTC
From: Oscar Mateo <oscar.mateo@intel.com>

Following the logic behind the previous patch, the ringbuffers and the rings
belong in different structs. For the time being, we will keep the relationship
between the two via the default_ringbuf living inside each ring.

This commit should not introduce functional changes (unless I made an error,
this is).

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 drivers/gpu/drm/i915/i915_dma.c         |  25 ++++---
 drivers/gpu/drm/i915/i915_gem.c         |   2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c   |   6 +-
 drivers/gpu/drm/i915/i915_irq.c         |   9 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 123 ++++++++++++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  61 ++++++++++------
 6 files changed, 131 insertions(+), 95 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5263d63..8ec8963 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -47,6 +47,8 @@ 
 
 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
+#define LP_RINGBUF(d) (&((struct drm_i915_private *)(d))->ring[RCS].default_ringbuf)
+
 #define BEGIN_LP_RING(n) \
 	intel_ring_begin(LP_RING(dev_priv), (n))
 
@@ -63,7 +65,7 @@ 
  * has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
-	if (LP_RING(dev->dev_private)->obj == NULL)			\
+	if (LP_RINGBUF(dev->dev_private)->obj == NULL)			\
 		LOCK_TEST_WITH_RETURN(dev, file);			\
 } while (0)
 
@@ -140,6 +142,7 @@  void i915_kernel_lost_context(struct drm_device * dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
 	struct intel_engine *ring = LP_RING(dev_priv);
+	struct intel_ringbuffer *ringbuf = LP_RINGBUF(dev_priv);
 
 	/*
 	 * We should never lose context on the ring with modesetting
@@ -148,17 +151,17 @@  void i915_kernel_lost_context(struct drm_device * dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
-	if (ring->space < 0)
-		ring->space += ring->size;
+	ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+	ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
+	if (ringbuf->space < 0)
+		ringbuf->space += ringbuf->size;
 
 	if (!dev->primary->master)
 		return;
 
 	master_priv = dev->primary->master->driver_priv;
-	if (ring->head == ring->tail && master_priv->sarea_priv)
+	if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
@@ -201,7 +204,7 @@  static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 	}
 
 	if (init->ring_size != 0) {
-		if (LP_RING(dev_priv)->obj != NULL) {
+		if (LP_RINGBUF(dev_priv)->obj != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
@@ -238,7 +241,7 @@  static int i915_dma_resume(struct drm_device * dev)
 
 	DRM_DEBUG_DRIVER("%s\n", __func__);
 
-	if (ring->virtual_start == NULL) {
+	if (__get_ringbuf(ring)->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
@@ -360,7 +363,7 @@  static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i, ret;
 
-	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+	if ((dwords+1) * sizeof(int) >= LP_RINGBUF(dev_priv)->size - 8)
 		return -EINVAL;
 
 	for (i = 0; i < dwords;) {
@@ -823,7 +826,7 @@  static int i915_irq_emit(struct drm_device *dev, void *data,
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+	if (!dev_priv || !LP_RINGBUF(dev_priv)->virtual_start) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a3b697b..d9253c4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2494,7 +2494,7 @@  i915_gem_retire_requests_ring(struct intel_engine *ring)
 		 * of tail of the request to update the last known position
 		 * of the GPU head.
 		 */
-		ring->last_retired_head = request->tail;
+		__get_ringbuf(ring)->last_retired_head = request->tail;
 
 		i915_gem_free_request(request);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0853db3..a7b165f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -823,8 +823,8 @@  static void i915_record_ring_state(struct drm_device *dev,
 		ering->hws = I915_READ(mmio);
 	}
 
-	ering->cpu_ring_head = ring->head;
-	ering->cpu_ring_tail = ring->tail;
+	ering->cpu_ring_head = __get_ringbuf(ring)->head;
+	ering->cpu_ring_tail = __get_ringbuf(ring)->tail;
 
 	ering->hangcheck_score = ring->hangcheck.score;
 	ering->hangcheck_action = ring->hangcheck.action;
@@ -928,7 +928,7 @@  static void i915_gem_record_rings(struct drm_device *dev,
 		}
 
 		error->ring[i].ringbuffer =
-			i915_error_ggtt_object_create(dev_priv, ring->obj);
+			i915_error_ggtt_object_create(dev_priv, __get_ringbuf(ring)->obj);
 
 		if (ring->status_page.obj)
 			error->ring[i].hws_page =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 58c8812..e0c3a01 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1079,7 +1079,7 @@  static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 static void notify_ring(struct drm_device *dev,
 			struct intel_engine *ring)
 {
-	if (ring->obj == NULL)
+	if (!intel_ring_initialized(ring))
 		return;
 
 	trace_i915_gem_request_complete(ring);
@@ -2610,6 +2610,7 @@  static struct intel_engine *
 semaphore_waits_for(struct intel_engine *ring, u32 *seqno)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	u32 cmd, ipehr, head;
 	int i;
 
@@ -2632,10 +2633,10 @@  semaphore_waits_for(struct intel_engine *ring, u32 *seqno)
 		 * our ring is smaller than what the hardware (and hence
 		 * HEAD_ADDR) allows. Also handles wrap-around.
 		 */
-		head &= ring->size - 1;
+		head &= ringbuf->size - 1;
 
 		/* This here seems to blow up */
-		cmd = ioread32(ring->virtual_start + head);
+		cmd = ioread32(ringbuf->virtual_start + head);
 		if (cmd == ipehr)
 			break;
 
@@ -2645,7 +2646,7 @@  semaphore_waits_for(struct intel_engine *ring, u32 *seqno)
 	if (!i)
 		return NULL;
 
-	*seqno = ioread32(ring->virtual_start + head + 4) + 1;
+	*seqno = ioread32(ringbuf->virtual_start + head + 4) + 1;
 	return semaphore_wait_to_signaller_ring(ring, ipehr);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4c3cc44..f02c21e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -42,9 +42,11 @@ 
 
 static inline int ring_space(struct intel_engine *ring)
 {
-	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+
+	int space = (ringbuf->head & HEAD_ADDR) - (ringbuf->tail + I915_RING_FREE_SPACE);
 	if (space < 0)
-		space += ring->size;
+		space += ringbuf->size;
 	return space;
 }
 
@@ -56,10 +58,12 @@  static bool intel_ring_stopped(struct intel_engine *ring)
 
 void __intel_ring_advance(struct intel_engine *ring)
 {
-	ring->tail &= ring->size - 1;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+
+	ringbuf->tail &= ringbuf->size - 1;
 	if (intel_ring_stopped(ring))
 		return;
-	ring->write_tail(ring, ring->tail);
+	ring->write_tail(ring, ringbuf->tail);
 }
 
 static int
@@ -476,7 +480,8 @@  static int init_ring_common(struct intel_engine *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj = ring->obj;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+	struct drm_i915_gem_object *obj = ringbuf->obj;
 	int ret = 0;
 
 	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -515,7 +520,7 @@  static int init_ring_common(struct intel_engine *ring)
 	 * register values. */
 	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
 	I915_WRITE_CTL(ring,
-			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
@@ -535,10 +540,10 @@  static int init_ring_common(struct intel_engine *ring)
 	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
 		i915_kernel_lost_context(ring->dev);
 	else {
-		ring->head = I915_READ_HEAD(ring);
-		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring_space(ring);
-		ring->last_retired_head = -1;
+		ringbuf->head = I915_READ_HEAD(ring);
+		ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+		ringbuf->space = ring_space(ring);
+		ringbuf->last_retired_head = -1;
 	}
 
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -1370,13 +1375,15 @@  static int init_phys_status_page(struct intel_engine *ring)
 
 void intel_destroy_ring_buffer(struct intel_engine *ring)
 {
-	if (!ring->obj)
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+
+	if (!ringbuf->obj)
 		return;
 
-	iounmap(ring->virtual_start);
-	i915_gem_object_ggtt_unpin(ring->obj);
-	drm_gem_object_unreference(&ring->obj->base);
-	ring->obj = NULL;
+	iounmap(ringbuf->virtual_start);
+	i915_gem_object_ggtt_unpin(ringbuf->obj);
+	drm_gem_object_unreference(&ringbuf->obj->base);
+	ringbuf->obj = NULL;
 }
 
 int intel_allocate_ring_buffer(struct intel_engine *ring)
@@ -1384,16 +1391,17 @@  int intel_allocate_ring_buffer(struct intel_engine *ring)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	int ret;
 
-	if (ring->obj)
+	if (ringbuf->obj)
 		return 0;
 
 	obj = NULL;
 	if (!HAS_LLC(dev))
-		obj = i915_gem_object_create_stolen(dev, ring->size);
+		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
 	if (obj == NULL)
-		obj = i915_gem_alloc_object(dev, ring->size);
+		obj = i915_gem_alloc_object(dev, ringbuf->size);
 	if (obj == NULL)
 		return -ENOMEM;
 
@@ -1405,15 +1413,15 @@  int intel_allocate_ring_buffer(struct intel_engine *ring)
 	if (ret)
 		goto err_unpin;
 
-	ring->virtual_start =
+	ringbuf->virtual_start =
 		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-			   ring->size);
-	if (ring->virtual_start == NULL) {
+			   ringbuf->size);
+	if (ringbuf->virtual_start == NULL) {
 		ret = -EINVAL;
 		goto err_unpin;
 	}
 
-	ring->obj = obj;
+	ringbuf->obj = obj;
 	return 0;
 
 err_unpin:
@@ -1426,11 +1434,12 @@  err_unref:
 static int intel_init_ring_buffer(struct drm_device *dev,
 				  struct intel_engine *ring)
 {
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	int ret;
 
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	ring->size = 32 * PAGE_SIZE;
+	ringbuf->size = 32 * PAGE_SIZE;
 	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1456,9 +1465,9 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 	 * the TAIL pointer points to within the last 2 cachelines
 	 * of the buffer.
 	 */
-	ring->effective_size = ring->size;
+	ringbuf->effective_size = ringbuf->size;
 	if (IS_I830(dev) || IS_845G(dev))
-		ring->effective_size -= 2 * CACHELINE_BYTES;
+		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
 	i915_cmd_parser_init_ring(ring);
 
@@ -1468,8 +1477,9 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 void intel_cleanup_ring_buffer(struct intel_engine *ring)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 
-	if (ring->obj == NULL)
+	if (ringbuf->obj == NULL)
 		return;
 
 	intel_stop_ring_buffer(ring);
@@ -1488,15 +1498,16 @@  void intel_cleanup_ring_buffer(struct intel_engine *ring)
 static int intel_ring_wait_request(struct intel_engine *ring, int n)
 {
 	struct drm_i915_gem_request *request;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	u32 seqno = 0, tail;
 	int ret;
 
-	if (ring->last_retired_head != -1) {
-		ring->head = ring->last_retired_head;
-		ring->last_retired_head = -1;
+	if (ringbuf->last_retired_head != -1) {
+		ringbuf->head = ringbuf->last_retired_head;
+		ringbuf->last_retired_head = -1;
 
-		ring->space = ring_space(ring);
-		if (ring->space >= n)
+		ringbuf->space = ring_space(ring);
+		if (ringbuf->space >= n)
 			return 0;
 	}
 
@@ -1506,9 +1517,9 @@  static int intel_ring_wait_request(struct intel_engine *ring, int n)
 		if (request->tail == -1)
 			continue;
 
-		space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
+		space = request->tail - (ringbuf->tail + I915_RING_FREE_SPACE);
 		if (space < 0)
-			space += ring->size;
+			space += ringbuf->size;
 		if (space >= n) {
 			seqno = request->seqno;
 			tail = request->tail;
@@ -1530,9 +1541,9 @@  static int intel_ring_wait_request(struct intel_engine *ring, int n)
 	if (ret)
 		return ret;
 
-	ring->head = tail;
-	ring->space = ring_space(ring);
-	if (WARN_ON(ring->space < n))
+	ringbuf->head = tail;
+	ringbuf->space = ring_space(ring);
+	if (WARN_ON(ringbuf->space < n))
 		return -ENOSPC;
 
 	return 0;
@@ -1542,6 +1553,7 @@  static int ring_wait_for_space(struct intel_engine *ring, int n)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	unsigned long end;
 	int ret;
 
@@ -1561,9 +1573,9 @@  static int ring_wait_for_space(struct intel_engine *ring, int n)
 	end = jiffies + 60 * HZ;
 
 	do {
-		ring->head = I915_READ_HEAD(ring);
-		ring->space = ring_space(ring);
-		if (ring->space >= n) {
+		ringbuf->head = I915_READ_HEAD(ring);
+		ringbuf->space = ring_space(ring);
+		if (ringbuf->space >= n) {
 			trace_i915_ring_wait_end(ring);
 			return 0;
 		}
@@ -1589,21 +1601,22 @@  static int ring_wait_for_space(struct intel_engine *ring, int n)
 static int intel_wrap_ring_buffer(struct intel_engine *ring)
 {
 	uint32_t __iomem *virt;
-	int rem = ring->size - ring->tail;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ring->space < rem) {
+	if (ringbuf->space < rem) {
 		int ret = ring_wait_for_space(ring, rem);
 		if (ret)
 			return ret;
 	}
 
-	virt = ring->virtual_start + ring->tail;
+	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
 		iowrite32(MI_NOOP, virt++);
 
-	ring->tail = 0;
-	ring->space = ring_space(ring);
+	ringbuf->tail = 0;
+	ringbuf->space = ring_space(ring);
 
 	return 0;
 }
@@ -1653,15 +1666,16 @@  intel_ring_alloc_seqno(struct intel_engine *ring)
 static int __intel_ring_prepare(struct intel_engine *ring,
 				int bytes)
 {
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	int ret;
 
-	if (unlikely(ring->tail + bytes > ring->effective_size)) {
+	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
 		ret = intel_wrap_ring_buffer(ring);
 		if (unlikely(ret))
 			return ret;
 	}
 
-	if (unlikely(ring->space < bytes)) {
+	if (unlikely(ringbuf->space < bytes)) {
 		ret = ring_wait_for_space(ring, bytes);
 		if (unlikely(ret))
 			return ret;
@@ -1674,6 +1688,7 @@  int intel_ring_begin(struct intel_engine *ring,
 		     int num_dwords)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	int ret;
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
@@ -1690,14 +1705,15 @@  int intel_ring_begin(struct intel_engine *ring,
 	if (ret)
 		return ret;
 
-	ring->space -= num_dwords * sizeof(uint32_t);
+	ringbuf->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct intel_engine *ring)
 {
-	int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+	int num_dwords = (ringbuf->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2019,6 +2035,7 @@  int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine *ring = &dev_priv->ring[RCS];
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
 	int ret;
 
 	if (INTEL_INFO(dev)->gen >= 6) {
@@ -2057,13 +2074,13 @@  int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 
-	ring->size = size;
-	ring->effective_size = ring->size;
+	ringbuf->size = size;
+	ringbuf->effective_size = ringbuf->size;
 	if (IS_I830(ring->dev) || IS_845G(ring->dev))
-		ring->effective_size -= 2 * CACHELINE_BYTES;
+		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
-	ring->virtual_start = ioremap_wc(start, size);
-	if (ring->virtual_start == NULL) {
+	ringbuf->virtual_start = ioremap_wc(start, size);
+	if (ringbuf->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 50cc525..7299bff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -54,6 +54,27 @@  struct intel_ring_hangcheck {
 	bool deadlock;
 };
 
+struct intel_ringbuffer {
+	struct drm_i915_gem_object *obj;
+	void __iomem *virtual_start;
+
+	u32 head;
+	u32 tail;
+	int space;
+	int size;
+	int effective_size;
+
+	/** We track the position of the requests in the ring buffer, and
+	 * when each is retired we increment last_retired_head as the GPU
+	 * must have finished processing the request and so we know we
+	 * can advance the ringbuffer up to that position.
+	 *
+	 * last_retired_head is set to -1 after the value is consumed so
+	 * we can detect new retirements.
+	 */
+	u32 last_retired_head;
+};
+
 struct intel_engine {
 	const char	*name;
 	enum intel_ring_id {
@@ -66,27 +87,11 @@  struct intel_engine {
 #define I915_NUM_RINGS 5
 #define LAST_USER_RING (VECS + 1)
 	u32		mmio_base;
-	void		__iomem *virtual_start;
 	struct		drm_device *dev;
-	struct		drm_i915_gem_object *obj;
+	struct intel_ringbuffer default_ringbuf;
 
-	u32		head;
-	u32		tail;
-	int		space;
-	int		size;
-	int		effective_size;
 	struct intel_hw_status_page status_page;
 
-	/** We track the position of the requests in the ring buffer, and
-	 * when each is retired we increment last_retired_head as the GPU
-	 * must have finished processing the request and so we know we
-	 * can advance the ringbuffer up to that position.
-	 *
-	 * last_retired_head is set to -1 after the value is consumed so
-	 * we can detect new retirements.
-	 */
-	u32		last_retired_head;
-
 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	u32		trace_irq_seqno;
@@ -139,7 +144,7 @@  struct intel_engine {
 
 	/**
 	 * List of objects currently involved in rendering from the
-	 * ringbuffer.
+	 * engine.
 	 *
 	 * Includes buffers having the contents of their GPU caches
 	 * flushed, not necessarily primitives.  last_rendering_seqno
@@ -209,10 +214,16 @@  struct intel_engine {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
+/* This is a temporary define to help us transition to per-context ringbuffers */
+static inline struct intel_ringbuffer *__get_ringbuf(struct intel_engine *ring)
+{
+	return &ring->default_ringbuf;
+}
+
 static inline bool
 intel_ring_initialized(struct intel_engine *ring)
 {
-	return ring->obj != NULL;
+	return __get_ringbuf(ring)->obj != NULL;
 }
 
 static inline unsigned
@@ -283,12 +294,16 @@  int __must_check intel_ring_cacheline_align(struct intel_engine *ring);
 static inline void intel_ring_emit(struct intel_engine *ring,
 				   u32 data)
 {
-	iowrite32(data, ring->virtual_start + ring->tail);
-	ring->tail += 4;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+
+	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+	ringbuf->tail += 4;
 }
 static inline void intel_ring_advance(struct intel_engine *ring)
 {
-	ring->tail &= ring->size - 1;
+	struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+
+	ringbuf->tail &= ringbuf->size - 1;
 }
 void __intel_ring_advance(struct intel_engine *ring);
 
@@ -312,7 +327,7 @@  int intel_allocate_ring_buffer(struct intel_engine *ring);
 
 static inline u32 intel_ring_get_tail(struct intel_engine *ring)
 {
-	return ring->tail;
+	return __get_ringbuf(ring)->tail;
 }
 
 static inline u32 intel_ring_get_seqno(struct intel_engine *ring)