diff mbox

[1003/1094] drm/i915: Split the ringbuffers from the rings (2/3)

Message ID 1413889294-31328-1004-git-send-email-dheerajx.s.jamwal@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dheeraj Jamwal Oct. 21, 2014, 11 a.m. UTC
From: Oscar Mateo <oscar.mateo@intel.com>

This refactoring has been performed using the following Coccinelle
semantic script:

    @@
    struct intel_engine_cs r;
    @@
    (
    - (r).obj
    + r.buffer->obj
    |
    - (r).virtual_start
    + r.buffer->virtual_start
    |
    - (r).head
    + r.buffer->head
    |
    - (r).tail
    + r.buffer->tail
    |
    - (r).space
    + r.buffer->space
    |
    - (r).size
    + r.buffer->size
    |
    - (r).effective_size
    + r.buffer->effective_size
    |
    - (r).last_retired_head
    + r.buffer->last_retired_head
    )

    @@
    struct intel_engine_cs *r;
    @@
    (
    - (r)->obj
    + r->buffer->obj
    |
    - (r)->virtual_start
    + r->buffer->virtual_start
    |
    - (r)->head
    + r->buffer->head
    |
    - (r)->tail
    + r->buffer->tail
    |
    - (r)->space
    + r->buffer->space
    |
    - (r)->size
    + r->buffer->size
    |
    - (r)->effective_size
    + r->buffer->effective_size
    |
    - (r)->last_retired_head
    + r->buffer->last_retired_head
    )

    @@
    expression E;
    @@
    (
    - LP_RING(E)->obj
    + LP_RING(E)->buffer->obj
    |
    - LP_RING(E)->virtual_start
    + LP_RING(E)->buffer->virtual_start
    |
    - LP_RING(E)->head
    + LP_RING(E)->buffer->head
    |
    - LP_RING(E)->tail
    + LP_RING(E)->buffer->tail
    |
    - LP_RING(E)->space
    + LP_RING(E)->buffer->space
    |
    - LP_RING(E)->size
    + LP_RING(E)->buffer->size
    |
    - LP_RING(E)->effective_size
    + LP_RING(E)->buffer->effective_size
    |
    - LP_RING(E)->last_retired_head
    + LP_RING(E)->buffer->last_retired_head
    )

Note: On top of this this patch also removes the now unused ringbuffer
fields in intel_engine_cs.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
[danvet: Add note about fixup patch included here.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
(cherry picked from commit ee1b1e5ef38d22e2447b48b8456a2b2bcf438e65)

Signed-off-by: Dheeraj Jamwal <dheerajx.s.jamwal@intel.com>
---
 drivers/gpu/drm/i915/i915_dma.c         |   22 +++----
 drivers/gpu/drm/i915/i915_gem.c         |    2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c   |    6 +-
 drivers/gpu/drm/i915/i915_irq.c         |    8 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c |  102 +++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |   27 ++------
 6 files changed, 75 insertions(+), 92 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5d4ed2..40a0176 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -64,7 +64,7 @@ 
  * has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
-	if (LP_RING(dev->dev_private)->obj == NULL)			\
+	if (LP_RING(dev->dev_private)->buffer->obj == NULL)			\
 		LOCK_TEST_WITH_RETURN(dev, file);			\
 } while (0)
 
@@ -149,17 +149,17 @@  void i915_kernel_lost_context(struct drm_device * dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
-	if (ring->space < 0)
-		ring->space += ring->size;
+	ring->buffer->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+	ring->buffer->space = ring->buffer->head - (ring->buffer->tail + I915_RING_FREE_SPACE);
+	if (ring->buffer->space < 0)
+		ring->buffer->space += ring->buffer->size;
 
 	if (!dev->primary->master)
 		return;
 
 	master_priv = dev->primary->master->driver_priv;
-	if (ring->head == ring->tail && master_priv->sarea_priv)
+	if (ring->buffer->head == ring->buffer->tail && master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
@@ -202,7 +202,7 @@  static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 	}
 
 	if (init->ring_size != 0) {
-		if (LP_RING(dev_priv)->obj != NULL) {
+		if (LP_RING(dev_priv)->buffer->obj != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
@@ -239,7 +239,7 @@  static int i915_dma_resume(struct drm_device * dev)
 
 	DRM_DEBUG_DRIVER("%s\n", __func__);
 
-	if (ring->virtual_start == NULL) {
+	if (ring->buffer->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
@@ -361,7 +361,7 @@  static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i, ret;
 
-	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
 		return -EINVAL;
 
 	for (i = 0; i < dwords;) {
@@ -824,7 +824,7 @@  static int i915_irq_emit(struct drm_device *dev, void *data,
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+	if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fc6d8d3..ca952be 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2640,7 +2640,7 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 		 * of tail of the request to update the last known position
 		 * of the GPU head.
 		 */
-		ring->last_retired_head = request->tail;
+		ring->buffer->last_retired_head = request->tail;
 
 		i915_gem_free_request(request);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 632db42..87ec60e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -825,8 +825,8 @@  static void i915_record_ring_state(struct drm_device *dev,
 		ering->hws = I915_READ(mmio);
 	}
 
-	ering->cpu_ring_head = ring->head;
-	ering->cpu_ring_tail = ring->tail;
+	ering->cpu_ring_head = ring->buffer->head;
+	ering->cpu_ring_tail = ring->buffer->tail;
 
 	ering->hangcheck_score = ring->hangcheck.score;
 	ering->hangcheck_action = ring->hangcheck.action;
@@ -930,7 +930,7 @@  static void i915_gem_record_rings(struct drm_device *dev,
 		}
 
 		error->ring[i].ringbuffer =
-			i915_error_ggtt_object_create(dev_priv, ring->obj);
+			i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
 
 		if (ring->status_page.obj)
 			error->ring[i].hws_page =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c28e0da..152ba12 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1219,7 +1219,7 @@  static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 static void notify_ring(struct drm_device *dev,
 			struct intel_engine_cs *ring)
 {
-	if (ring->obj == NULL)
+	if (ring->buffer->obj == NULL)
 		return;
 
 	trace_i915_gem_request_complete(ring);
@@ -2837,10 +2837,10 @@  semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 		 * our ring is smaller than what the hardware (and hence
 		 * HEAD_ADDR) allows. Also handles wrap-around.
 		 */
-		head &= ring->size - 1;
+		head &= ring->buffer->size - 1;
 
 		/* This here seems to blow up */
-		cmd = ioread32(ring->virtual_start + head);
+		cmd = ioread32(ring->buffer->virtual_start + head);
 		if (cmd == ipehr)
 			break;
 
@@ -2850,7 +2850,7 @@  semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 	if (!i)
 		return NULL;
 
-	*seqno = ioread32(ring->virtual_start + head + 4) + 1;
+	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
 	return semaphore_wait_to_signaller_ring(ring, ipehr);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9b406e42..70f1b88 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -50,7 +50,7 @@  static inline int __ring_space(int head, int tail, int size)
 
 static inline int ring_space(struct intel_engine_cs *ring)
 {
-	return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
+	return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size);
 }
 
 static bool intel_ring_stopped(struct intel_engine_cs *ring)
@@ -61,10 +61,10 @@  static bool intel_ring_stopped(struct intel_engine_cs *ring)
 
 void __intel_ring_advance(struct intel_engine_cs *ring)
 {
-	ring->tail &= ring->size - 1;
+	ring->buffer->tail &= ring->buffer->size - 1;
 	if (intel_ring_stopped(ring))
 		return;
-	ring->write_tail(ring, ring->tail);
+	ring->write_tail(ring, ring->buffer->tail);
 }
 
 static int
@@ -481,7 +481,7 @@  static int init_ring_common(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj = ring->obj;
+	struct drm_i915_gem_object *obj = ring->buffer->obj;
 	int ret = 0;
 
 	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -520,7 +520,7 @@  static int init_ring_common(struct intel_engine_cs *ring)
 	 * register values. */
 	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
 	I915_WRITE_CTL(ring,
-			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
@@ -540,10 +540,10 @@  static int init_ring_common(struct intel_engine_cs *ring)
 	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
 		i915_kernel_lost_context(ring->dev);
 	else {
-		ring->head = I915_READ_HEAD(ring);
-		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring_space(ring);
-		ring->last_retired_head = -1;
+		ring->buffer->head = I915_READ_HEAD(ring);
+		ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+		ring->buffer->space = ring_space(ring);
+		ring->buffer->last_retired_head = -1;
 	}
 
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -1382,14 +1382,14 @@  static int allocate_ring_buffer(struct intel_engine_cs *ring)
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (ring->obj)
+	if (ring->buffer->obj)
 		return 0;
 
 	obj = NULL;
 	if (!HAS_LLC(dev))
-		obj = i915_gem_object_create_stolen(dev, ring->size);
+		obj = i915_gem_object_create_stolen(dev, ring->buffer->size);
 	if (obj == NULL)
-		obj = i915_gem_alloc_object(dev, ring->size);
+		obj = i915_gem_alloc_object(dev, ring->buffer->size);
 	if (obj == NULL)
 		return -ENOMEM;
 
@@ -1401,15 +1401,15 @@  static int allocate_ring_buffer(struct intel_engine_cs *ring)
 	if (ret)
 		goto err_unpin;
 
-	ring->virtual_start =
+	ring->buffer->virtual_start =
 		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-			   ring->size);
-	if (ring->virtual_start == NULL) {
+			   ring->buffer->size);
+	if (ring->buffer->virtual_start == NULL) {
 		ret = -EINVAL;
 		goto err_unpin;
 	}
 
-	ring->obj = obj;
+	ring->buffer->obj = obj;
 	return 0;
 
 err_unpin:
@@ -1435,7 +1435,7 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	ring->size = 32 * PAGE_SIZE;
+	ring->buffer->size = 32 * PAGE_SIZE;
 	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1461,9 +1461,9 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 	 * the TAIL pointer points to within the last 2 cachelines
 	 * of the buffer.
 	 */
-	ring->effective_size = ring->size;
+	ring->buffer->effective_size = ring->buffer->size;
 	if (IS_I830(dev) || IS_845G(dev))
-		ring->effective_size -= 2 * CACHELINE_BYTES;
+		ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
 
 	ret = i915_cmd_parser_init_ring(ring);
 	if (ret)
@@ -1485,17 +1485,17 @@  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
-	if (ring->obj == NULL)
+	if (ring->buffer->obj == NULL)
 		return;
 
 	intel_stop_ring_buffer(ring);
 	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
-	iounmap(ring->virtual_start);
+	iounmap(ring->buffer->virtual_start);
 
-	i915_gem_object_ggtt_unpin(ring->obj);
-	drm_gem_object_unreference(&ring->obj->base);
-	ring->obj = NULL;
+	i915_gem_object_ggtt_unpin(ring->buffer->obj);
+	drm_gem_object_unreference(&ring->buffer->obj->base);
+	ring->buffer->obj = NULL;
 	ring->preallocated_lazy_request = NULL;
 	ring->outstanding_lazy_seqno = 0;
 
@@ -1516,17 +1516,17 @@  static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
 	u32 seqno = 0;
 	int ret;
 
-	if (ring->last_retired_head != -1) {
-		ring->head = ring->last_retired_head;
-		ring->last_retired_head = -1;
+	if (ring->buffer->last_retired_head != -1) {
+		ring->buffer->head = ring->buffer->last_retired_head;
+		ring->buffer->last_retired_head = -1;
 
-		ring->space = ring_space(ring);
-		if (ring->space >= n)
+		ring->buffer->space = ring_space(ring);
+		if (ring->buffer->space >= n)
 			return 0;
 	}
 
 	list_for_each_entry(request, &ring->request_list, list) {
-		if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
+		if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) {
 			seqno = request->seqno;
 			break;
 		}
@@ -1540,10 +1540,10 @@  static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
 		return ret;
 
 	i915_gem_retire_requests_ring(ring);
-	ring->head = ring->last_retired_head;
-	ring->last_retired_head = -1;
+	ring->buffer->head = ring->buffer->last_retired_head;
+	ring->buffer->last_retired_head = -1;
 
-	ring->space = ring_space(ring);
+	ring->buffer->space = ring_space(ring);
 	return 0;
 }
 
@@ -1570,9 +1570,9 @@  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 
 	trace_i915_ring_wait_begin(ring);
 	do {
-		ring->head = I915_READ_HEAD(ring);
-		ring->space = ring_space(ring);
-		if (ring->space >= n) {
+		ring->buffer->head = I915_READ_HEAD(ring);
+		ring->buffer->space = ring_space(ring);
+		if (ring->buffer->space >= n) {
 			ret = 0;
 			break;
 		}
@@ -1608,21 +1608,21 @@  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 {
 	uint32_t __iomem *virt;
-	int rem = ring->size - ring->tail;
+	int rem = ring->buffer->size - ring->buffer->tail;
 
-	if (ring->space < rem) {
+	if (ring->buffer->space < rem) {
 		int ret = ring_wait_for_space(ring, rem);
 		if (ret)
 			return ret;
 	}
 
-	virt = ring->virtual_start + ring->tail;
+	virt = ring->buffer->virtual_start + ring->buffer->tail;
 	rem /= 4;
 	while (rem--)
 		iowrite32(MI_NOOP, virt++);
 
-	ring->tail = 0;
-	ring->space = ring_space(ring);
+	ring->buffer->tail = 0;
+	ring->buffer->space = ring_space(ring);
 
 	return 0;
 }
@@ -1674,13 +1674,13 @@  static int __intel_ring_prepare(struct intel_engine_cs *ring,
 {
 	int ret;
 
-	if (unlikely(ring->tail + bytes > ring->effective_size)) {
+	if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) {
 		ret = intel_wrap_ring_buffer(ring);
 		if (unlikely(ret))
 			return ret;
 	}
 
-	if (unlikely(ring->space < bytes)) {
+	if (unlikely(ring->buffer->space < bytes)) {
 		ret = ring_wait_for_space(ring, bytes);
 		if (unlikely(ret))
 			return ret;
@@ -1709,14 +1709,14 @@  int intel_ring_begin(struct intel_engine_cs *ring,
 	if (ret)
 		return ret;
 
-	ring->space -= num_dwords * sizeof(uint32_t);
+	ring->buffer->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 {
-	int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2094,13 +2094,13 @@  int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 
-	ring->size = size;
-	ring->effective_size = ring->size;
+	ring->buffer->size = size;
+	ring->buffer->effective_size = ring->buffer->size;
 	if (IS_I830(ring->dev) || IS_845G(ring->dev))
-		ring->effective_size -= 2 * CACHELINE_BYTES;
+		ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
 
-	ring->virtual_start = ioremap_wc(start, size);
-	if (ring->virtual_start == NULL) {
+	ring->buffer->virtual_start = ioremap_wc(start, size);
+	if (ring->buffer->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		ret = -ENOMEM;
@@ -2116,7 +2116,7 @@  int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	return 0;
 
 err_vstart:
-	iounmap(ring->virtual_start);
+	iounmap(ring->buffer->virtual_start);
 err_ringbuf:
 	kfree(ringbuf);
 	ring->buffer = NULL;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a0ac668..c26def08 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -91,28 +91,11 @@  struct  intel_engine_cs {
 #define I915_NUM_RINGS 5
 #define LAST_USER_RING (VECS + 1)
 	u32		mmio_base;
-	void		__iomem *virtual_start;
 	struct		drm_device *dev;
-	struct		drm_i915_gem_object *obj;
 	struct intel_ringbuffer *buffer;
 
-	u32		head;
-	u32		tail;
-	int		space;
-	int		size;
-	int		effective_size;
 	struct intel_hw_status_page status_page;
 
-	/** We track the position of the requests in the ring buffer, and
-	 * when each is retired we increment last_retired_head as the GPU
-	 * must have finished processing the request and so we know we
-	 * can advance the ringbuffer up to that position.
-	 *
-	 * last_retired_head is set to -1 after the value is consumed so
-	 * we can detect new retirements.
-	 */
-	u32		last_retired_head;
-
 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	u32		trace_irq_seqno;
@@ -239,7 +222,7 @@  struct  intel_engine_cs {
 static inline bool
 intel_ring_initialized(struct intel_engine_cs *ring)
 {
-	return ring->buffer && ring->obj;
+	return ring->buffer && ring->buffer->obj;
 }
 
 static inline unsigned
@@ -310,12 +293,12 @@  int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
 				   u32 data)
 {
-	iowrite32(data, ring->virtual_start + ring->tail);
-	ring->tail += 4;
+	iowrite32(data, ring->buffer->virtual_start + ring->buffer->tail);
+	ring->buffer->tail += 4;
 }
 static inline void intel_ring_advance(struct intel_engine_cs *ring)
 {
-	ring->tail &= ring->size - 1;
+	ring->buffer->tail &= ring->buffer->size - 1;
 }
 void __intel_ring_advance(struct intel_engine_cs *ring);
 
@@ -335,7 +318,7 @@  void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
 static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
 {
-	return ring->tail;
+	return ring->buffer->tail;
 }
 
 static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)