diff mbox

[07/12] drm/i915: Rename request->ringbuf to request->ring

Message ID 1448023432-10726-7-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Nov. 20, 2015, 12:43 p.m. UTC
Now that we have disambuigated ring and engine, we can use the clearer
and more consistent name for the intel_ringbuffer pointer in the
request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |   2 +-
 drivers/gpu/drm/i915/i915_gem.c            |  28 +++---
 drivers/gpu/drm/i915/i915_gem_context.c    |   2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   4 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
 drivers/gpu/drm/i915/intel_display.c       |  10 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 149 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_mocs.c          |  32 +++----
 drivers/gpu/drm/i915/intel_overlay.c       |  42 ++++----
 drivers/gpu/drm/i915/intel_ringbuffer.c    |  86 ++++++++---------
 10 files changed, 178 insertions(+), 183 deletions(-)

Comments

Daniel Vetter Nov. 24, 2015, 2:51 p.m. UTC | #1
On Fri, Nov 20, 2015 at 12:43:47PM +0000, Chris Wilson wrote:
> Now that we have disambuigated ring and engine, we can use the clearer
> and more consistent name for the intel_ringbuffer pointer in the
> request.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/i915/i915_drv.h            |   2 +-
>  drivers/gpu/drm/i915/i915_gem.c            |  28 +++---
>  drivers/gpu/drm/i915/i915_gem_context.c    |   2 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   4 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
>  drivers/gpu/drm/i915/intel_display.c       |  10 +-
>  drivers/gpu/drm/i915/intel_lrc.c           | 149 ++++++++++++++---------------
>  drivers/gpu/drm/i915/intel_mocs.c          |  32 +++----
>  drivers/gpu/drm/i915/intel_overlay.c       |  42 ++++----
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  86 ++++++++---------
>  10 files changed, 178 insertions(+), 183 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 9ce8b3fcb3a0..b7eaa2deb437 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2185,7 +2185,7 @@ struct drm_i915_gem_request {
>  	 * context.
>  	 */
>  	struct intel_context *ctx;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ringbuffer *ring;
>  
>  	/** Batch buffer related to this request if any (used for
>  	    error state dump only) */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 5a1b51a27fe3..d6706dd4117c 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1386,7 +1386,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
>  	 * Note this requires that we are always called in request
>  	 * completion order.
>  	 */
> -	request->ringbuf->last_retired_head = request->postfix;
> +	request->ring->last_retired_head = request->postfix;
>  
>  	list_del_init(&request->list);
>  	i915_gem_request_remove_from_client(request);
> @@ -2553,7 +2553,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  {
>  	struct intel_engine_cs *engine;
>  	struct drm_i915_private *dev_priv;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ringbuffer *ring;
>  	u32 request_start;
>  	int ret;
>  
> @@ -2562,16 +2562,16 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  
>  	engine = request->engine;
>  	dev_priv = request->i915;
> -	ringbuf = request->ringbuf;
> +	ring = request->ring;
>  
>  	/*
>  	 * To ensure that this call will not fail, space for its emissions
>  	 * should already have been reserved in the ring buffer. Let the ring
>  	 * know that it is time to use that space up.
>  	 */
> -	intel_ring_reserved_space_use(ringbuf);
> +	intel_ring_reserved_space_use(ring);
>  
> -	request_start = intel_ring_get_tail(ringbuf);
> +	request_start = intel_ring_get_tail(ring);
>  	/*
>  	 * Emit any outstanding flushes - execbuf can fail to emit the flush
>  	 * after having emitted the batchbuffer command. Hence we need to fix
> @@ -2593,14 +2593,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	 * GPU processing the request, we never over-estimate the
>  	 * position of the head.
>  	 */
> -	request->postfix = intel_ring_get_tail(ringbuf);
> +	request->postfix = intel_ring_get_tail(ring);
>  
>  	if (i915.enable_execlists)
>  		ret = engine->emit_request(request);
>  	else {
>  		ret = engine->add_request(request);
>  
> -		request->tail = intel_ring_get_tail(ringbuf);
> +		request->tail = intel_ring_get_tail(ring);
>  	}
>  	/* Not allowed to fail! */
>  	WARN(ret, "emit|add_request failed: %d!\n", ret);
> @@ -2629,7 +2629,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	intel_mark_busy(dev_priv->dev);
>  
>  	/* Sanity check that the reserved size was large enough. */
> -	intel_ring_reserved_space_end(ringbuf);
> +	intel_ring_reserved_space_end(ring);
>  }
>  
>  static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
> @@ -2741,7 +2741,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *engine,
>  	 * to be redone if the request is not actually submitted straight
>  	 * away, e.g. because a GPU scheduler has deferred it.
>  	 */
> -	intel_ring_reserved_space_reserve(req->ringbuf,
> +	intel_ring_reserved_space_reserve(req->ring,
>  					  MIN_SPACE_FOR_ADD_REQUEST);
>  	ret = intel_ring_begin(req, 0);
>  	if (ret) {
> @@ -2764,7 +2764,7 @@ err:
>  
>  void i915_gem_request_cancel(struct drm_i915_gem_request *req)
>  {
> -	intel_ring_reserved_space_cancel(req->ringbuf);
> +	intel_ring_reserved_space_cancel(req->ring);
>  
>  	i915_gem_request_unreference(req);
>  }
> @@ -4657,11 +4657,11 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
>  	 * at initialization time.
>  	 */
>  	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
> -		intel_ring_emit(req->ringbuf, MI_LOAD_REGISTER_IMM(1));
> -		intel_ring_emit(req->ringbuf, reg_base + i);
> -		intel_ring_emit(req->ringbuf, remap_info[i/4]);
> +		intel_ring_emit(req->ring, MI_LOAD_REGISTER_IMM(1));
> +		intel_ring_emit(req->ring, reg_base + i);
> +		intel_ring_emit(req->ring, remap_info[i/4]);
>  	}
> -	intel_ring_advance(req->ringbuf);
> +	intel_ring_advance(req->ring);
>  
>  	return ret;
>  }
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 047c2f94bd22..5d0516930b16 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -479,7 +479,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
>  static inline int
>  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 flags = hw_flags | MI_MM_SPACE_GTT;
>  	const int num_rings =
>  		/* Use an extended w/a on ivb+ if signalling from other rings */
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 32c2d08bdd4c..afa930dab632 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1115,7 +1115,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
>  static int
>  i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret, i;
>  
>  	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
> @@ -1196,7 +1196,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  			       struct drm_i915_gem_execbuffer2 *args,
>  			       struct list_head *vmas)
>  {
> -	struct intel_ringbuffer *ring = params->request->ringbuf;
> +	struct intel_ringbuffer *ring = params->request->ring;
>  	struct drm_i915_private *dev_priv = params->request->i915;
>  	u64 exec_start, exec_len;
>  	int instp_mode;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index c222456961fb..b1ee6f89e70b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -651,7 +651,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
>  			  unsigned entry,
>  			  dma_addr_t addr)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	BUG_ON(entry >= 4);
> @@ -1588,7 +1588,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
>  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			 struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> @@ -1626,7 +1626,7 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			  struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 2447f1a36fb0..cdd6074257af 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -10824,7 +10824,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	u32 flip_mask;
>  	int ret;
> @@ -10859,7 +10859,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	u32 flip_mask;
>  	int ret;
> @@ -10891,7 +10891,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t pf, pipesrc;
> @@ -10930,7 +10930,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t pf, pipesrc;
> @@ -10966,7 +10966,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t plane_bit = 0;
>  	int len, ret;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 1502e53d2ad6..5c37922c3cde 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -339,7 +339,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
>  {
>  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
>  	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[rq->engine->id].state;
> -	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
> +	struct drm_i915_gem_object *rb_obj = rq->ring->obj;
>  	struct page *page;
>  	uint32_t *reg_state;
>  
> @@ -545,7 +545,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
>  
>  	i915_gem_request_reference(request);
>  
> -	request->tail = request->ringbuf->tail;
> +	request->tail = request->ring->tail;
>  
>  	spin_lock_irq(&engine->execlist_lock);
>  
> @@ -633,7 +633,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>  {
>  	int ret;
>  
> -	request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
> +	request->ring = request->ctx->engine[request->engine->id].ringbuf;
>  
>  	if (request->ctx != request->engine->default_context) {
>  		ret = intel_lr_context_pin(request);
> @@ -656,7 +656,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>  static void
>  intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  {
> -	intel_ring_advance(request->ringbuf);
> +	intel_ring_advance(request->ring);
>  
>  	if (intel_ring_stopped(request->engine))
>  		return;
> @@ -686,9 +686,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  			       struct list_head *vmas)
>  {
>  	struct drm_device       *dev = params->dev;
> -	struct intel_engine_cs  *ring = params->ring;
> +	struct intel_engine_cs  *engine = params->ring;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
> -	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
> +	struct intel_ringbuffer *ring = params->request->ring;
>  	u64 exec_start;
>  	int instp_mode;
>  	u32 instp_mask;
> @@ -700,7 +700,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  	case I915_EXEC_CONSTANTS_REL_GENERAL:
>  	case I915_EXEC_CONSTANTS_ABSOLUTE:
>  	case I915_EXEC_CONSTANTS_REL_SURFACE:
> -		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
> +		if (instp_mode != 0 && engine->id != RCS) {
>  			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
>  			return -EINVAL;
>  		}
> @@ -729,17 +729,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  	if (ret)
>  		return ret;
>  
> -	if (ring == &dev_priv->ring[RCS] &&
> +	if (engine->id == RCS &&
>  	    instp_mode != dev_priv->relative_constants_mode) {
>  		ret = intel_ring_begin(params->request, 4);
>  		if (ret)
>  			return ret;
>  
> -		intel_ring_emit(ringbuf, MI_NOOP);
> -		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
> -		intel_ring_emit(ringbuf, INSTPM);
> -		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
> -		intel_ring_advance(ringbuf);
> +		intel_ring_emit(ring, MI_NOOP);
> +		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> +		intel_ring_emit(ring, INSTPM);
> +		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
> +		intel_ring_advance(ring);
>  
>  		dev_priv->relative_constants_mode = instp_mode;
>  	}
> @@ -747,7 +747,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  	exec_start = params->batch_obj_vm_offset +
>  		     args->batch_start_offset;
>  
> -	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
> +	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
>  	if (ret)
>  		return ret;
>  
> @@ -827,7 +827,7 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
>  {
>  	int engine = rq->engine->id;
>  	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
> -	struct intel_ringbuffer *ringbuf = rq->ringbuf;
> +	struct intel_ringbuffer *ring = rq->ring;
>  	int ret = 0;
>  
>  	WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
> @@ -837,7 +837,7 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
>  		if (ret)
>  			goto reset_pin_count;
>  
> -		ret = intel_pin_and_map_ringbuffer_obj(rq->i915->dev, ringbuf);
> +		ret = intel_pin_and_map_ringbuffer_obj(rq->i915->dev, ring);
>  		if (ret)
>  			goto unpin_ctx_obj;
>  
> @@ -858,12 +858,12 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
>  {
>  	int engine = rq->engine->id;
>  	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
> -	struct intel_ringbuffer *ringbuf = rq->ringbuf;
> +	struct intel_ringbuffer *ring = rq->ring;
>  
>  	if (ctx_obj) {
>  		WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
>  		if (--rq->ctx->engine[engine].pin_count == 0) {
> -			intel_unpin_ringbuffer_obj(ringbuf);
> +			intel_unpin_ringbuffer_obj(ring);
>  			i915_gem_object_ggtt_unpin(ctx_obj);
>  		}
>  	}
> @@ -873,7 +873,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
>  	int ret, i;
>  	struct intel_engine_cs *engine = req->engine;
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	struct i915_workarounds *w = &dev_priv->workarounds;
>  
> @@ -889,14 +889,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  	if (ret)
>  		return ret;
>  
> -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
> +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
>  	for (i = 0; i < w->count; i++) {
> -		intel_ring_emit(ringbuf, w->reg[i].addr);
> -		intel_ring_emit(ringbuf, w->reg[i].value);
> +		intel_ring_emit(ring, w->reg[i].addr);
> +		intel_ring_emit(ring, w->reg[i].value);
>  	}
> -	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_emit(ring, MI_NOOP);
>  
> -	intel_ring_advance(ringbuf);
> +	intel_ring_advance(ring);
>  
>  	engine->gpu_caches_dirty = true;
>  	ret = logical_ring_flush_all_caches(req);
> @@ -1318,7 +1318,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  {
>  	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
>  	struct intel_engine_cs *engine = req->engine;
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
>  	int i, ret;
>  
> @@ -1326,18 +1326,18 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  	if (ret)
>  		return ret;
>  
> -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
> +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
>  	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
>  		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
>  
> -		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(engine, i));
> -		intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
> -		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(engine, i));
> -		intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
> +		intel_ring_emit(ring, GEN8_RING_PDP_UDW(engine, i));
> +		intel_ring_emit(ring, upper_32_bits(pd_daddr));
> +		intel_ring_emit(ring, GEN8_RING_PDP_LDW(engine, i));
> +		intel_ring_emit(ring, lower_32_bits(pd_daddr));
>  	}
>  
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> @@ -1345,7 +1345,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>  			      u64 offset, unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
>  	int ret;
>  
> @@ -1371,14 +1371,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>  		return ret;
>  
>  	/* FIXME(BDW): Address space and security selectors. */
> -	intel_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
> +	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
>  			(ppgtt<<8) |
>  			(dispatch_flags & I915_DISPATCH_RS ?
>  			 MI_BATCH_RESOURCE_STREAMER : 0));
> -	intel_ring_emit(ringbuf, lower_32_bits(offset));
> -	intel_ring_emit(ringbuf, upper_32_bits(offset));
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, lower_32_bits(offset));
> +	intel_ring_emit(ring, upper_32_bits(offset));
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> @@ -1420,10 +1420,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>  			   u32 invalidate_domains,
>  			   u32 unused)
>  {
> -	struct intel_ringbuffer *ringbuf = request->ringbuf;
> -	struct intel_engine_cs *ring = ringbuf->ring;
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct intel_ringbuffer *ring = request->ring;
>  	uint32_t cmd;
>  	int ret;
>  
> @@ -1442,17 +1439,17 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>  
>  	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
>  		cmd |= MI_INVALIDATE_TLB;
> -		if (ring == &dev_priv->ring[VCS])
> +		if (request->engine->id == VCS)
>  			cmd |= MI_INVALIDATE_BSD;
>  	}
>  
> -	intel_ring_emit(ringbuf, cmd);
> -	intel_ring_emit(ringbuf,
> +	intel_ring_emit(ring, cmd);
> +	intel_ring_emit(ring,
>  			I915_GEM_HWS_SCRATCH_ADDR |
>  			MI_FLUSH_DW_USE_GTT);
> -	intel_ring_emit(ringbuf, 0); /* upper addr */
> -	intel_ring_emit(ringbuf, 0); /* value */
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, 0); /* upper addr */
> +	intel_ring_emit(ring, 0); /* value */
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> @@ -1461,9 +1458,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>  				  u32 invalidate_domains,
>  				  u32 flush_domains)
>  {
> -	struct intel_ringbuffer *ringbuf = request->ringbuf;
> -	struct intel_engine_cs *ring = ringbuf->ring;
> -	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	struct intel_ringbuffer *ring = request->ring;
> +	u32 scratch_addr = request->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	bool vf_flush_wa;
>  	u32 flags = 0;
>  	int ret;
> @@ -1491,7 +1487,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>  	 * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
>  	 * control.
>  	 */
> -	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
> +	vf_flush_wa = INTEL_INFO(request->i915)->gen >= 9 &&
>  		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
>  
>  	ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
> @@ -1499,21 +1495,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>  		return ret;
>  
>  	if (vf_flush_wa) {
> -		intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
> -		intel_ring_emit(ringbuf, 0);
> -		intel_ring_emit(ringbuf, 0);
> -		intel_ring_emit(ringbuf, 0);
> -		intel_ring_emit(ringbuf, 0);
> -		intel_ring_emit(ringbuf, 0);
> +		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
> +		intel_ring_emit(ring, 0);
> +		intel_ring_emit(ring, 0);
> +		intel_ring_emit(ring, 0);
> +		intel_ring_emit(ring, 0);
> +		intel_ring_emit(ring, 0);
>  	}
>  
> -	intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
> -	intel_ring_emit(ringbuf, flags);
> -	intel_ring_emit(ringbuf, scratch_addr);
> -	intel_ring_emit(ringbuf, 0);
> -	intel_ring_emit(ringbuf, 0);
> -	intel_ring_emit(ringbuf, 0);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
> +	intel_ring_emit(ring, flags);
> +	intel_ring_emit(ring, scratch_addr);
> +	intel_ring_emit(ring, 0);
> +	intel_ring_emit(ring, 0);
> +	intel_ring_emit(ring, 0);
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> @@ -1530,8 +1526,7 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
>  
>  static int gen8_emit_request(struct drm_i915_gem_request *request)
>  {
> -	struct intel_ringbuffer *ringbuf = request->ringbuf;
> -	struct intel_engine_cs *ring = ringbuf->ring;
> +	struct intel_ringbuffer *ring = request->ring;
>  	u32 cmd;
>  	int ret;
>  
> @@ -1547,23 +1542,23 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>  	cmd = MI_STORE_DWORD_IMM_GEN4;
>  	cmd |= MI_GLOBAL_GTT;
>  
> -	intel_ring_emit(ringbuf, cmd);
> -	intel_ring_emit(ringbuf,
> -			(ring->status_page.gfx_addr +
> +	intel_ring_emit(ring, cmd);
> +	intel_ring_emit(ring,
> +			(request->engine->status_page.gfx_addr +
>  			 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
> -	intel_ring_emit(ringbuf, 0);
> -	intel_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
> -	intel_ring_emit(ringbuf, MI_USER_INTERRUPT);
> -	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_emit(ring, 0);
> +	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
> +	intel_ring_emit(ring, MI_USER_INTERRUPT);
> +	intel_ring_emit(ring, MI_NOOP);
>  	intel_logical_ring_advance_and_submit(request);
>  
>  	/*
>  	 * Here we add two extra NOOPs as padding to avoid
>  	 * lite restore of a context with HEAD==TAIL.
>  	 */
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index d79c9c0bbffb..3a1bd5f7af55 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -174,7 +174,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  				   const struct drm_i915_mocs_table *table,
>  				   u32 reg_base)
>  {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	unsigned int index;
>  	int ret;
>  
> @@ -185,11 +185,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  	if (ret)
>  		return ret;
>  
> -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
> +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
>  
>  	for (index = 0; index < table->size; index++) {
> -		intel_ring_emit(ringbuf, reg_base + index * 4);
> -		intel_ring_emit(ringbuf, table->table[index].control_value);
> +		intel_ring_emit(ring, reg_base + index * 4);
> +		intel_ring_emit(ring, table->table[index].control_value);
>  	}
>  
>  	/*
> @@ -201,12 +201,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  	 * that value to all the used entries.
>  	 */
>  	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
> -		intel_ring_emit(ringbuf, reg_base + index * 4);
> -		intel_ring_emit(ringbuf, table->table[0].control_value);
> +		intel_ring_emit(ring, reg_base + index * 4);
> +		intel_ring_emit(ring, table->table[0].control_value);
>  	}
>  
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> @@ -225,7 +225,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>  				const struct drm_i915_mocs_table *table)
>  {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	unsigned int count;
>  	unsigned int i;
>  	u32 value;
> @@ -240,15 +240,15 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>  	if (ret)
>  		return ret;
>  
> -	intel_ring_emit(ringbuf,
> +	intel_ring_emit(ring,
>  			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
>  
>  	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
>  		value = (table->table[count].l3cc_value & 0xffff) |
>  			((table->table[count + 1].l3cc_value & 0xffff) << 16);
>  
> -		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
> -		intel_ring_emit(ringbuf, value);
> +		intel_ring_emit(ring, GEN9_LNCFCMOCS0 + i * 4);
> +		intel_ring_emit(ring, value);
>  	}
>  
>  	if (table->size & 0x01) {
> @@ -264,14 +264,14 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>  	 * they are reserved by the hardware.
>  	 */
>  	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
> -		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
> -		intel_ring_emit(ringbuf, value);
> +		intel_ring_emit(ring, GEN9_LNCFCMOCS0 + i * 4);
> +		intel_ring_emit(ring, value);
>  
>  		value = filler;
>  	}
>  
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index 5e9c7c15a84b..451c59b8f526 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
>  
>  	overlay->active = true;
>  
> -	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
> -	intel_ring_emit(req->ringbuf, overlay->flip_addr | OFC_UPDATE);
> -	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> -	intel_ring_emit(req->ringbuf, MI_NOOP);
> -	intel_ring_advance(req->ringbuf);
> +	intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
> +	intel_ring_emit(req->ring, overlay->flip_addr | OFC_UPDATE);
> +	intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +	intel_ring_emit(req->ring, MI_NOOP);
> +	intel_ring_advance(req->ring);
>  
>  	return intel_overlay_do_wait_request(overlay, req, NULL);
>  }
> @@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
>  		return ret;
>  	}
>  
> -	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> -	intel_ring_emit(req->ringbuf, flip_addr);
> -	intel_ring_advance(req->ringbuf);
> +	intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> +	intel_ring_emit(req->ring, flip_addr);
> +	intel_ring_advance(req->ring);
>  
>  	WARN_ON(overlay->last_flip_req);
>  	i915_gem_request_assign(&overlay->last_flip_req, req);
> @@ -360,22 +360,22 @@ static int intel_overlay_off(struct intel_overlay *overlay)
>  	}
>  
>  	/* wait for overlay to go idle */
> -	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> -	intel_ring_emit(req->ringbuf, flip_addr);
> -	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +	intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> +	intel_ring_emit(req->ring, flip_addr);
> +	intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
>  	/* turn overlay off */
>  	if (IS_I830(dev)) {
>  		/* Workaround: Don't disable the overlay fully, since otherwise
>  		 * it dies on the next OVERLAY_ON cmd. */
> -		intel_ring_emit(req->ringbuf, MI_NOOP);
> -		intel_ring_emit(req->ringbuf, MI_NOOP);
> -		intel_ring_emit(req->ringbuf, MI_NOOP);
> +		intel_ring_emit(req->ring, MI_NOOP);
> +		intel_ring_emit(req->ring, MI_NOOP);
> +		intel_ring_emit(req->ring, MI_NOOP);
>  	} else {
> -		intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
> -		intel_ring_emit(req->ringbuf, flip_addr);
> -		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +		intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
> +		intel_ring_emit(req->ring, flip_addr);
> +		intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
>  	}
> -	intel_ring_advance(req->ringbuf);
> +	intel_ring_advance(req->ring);
>  
>  	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
>  }
> @@ -433,9 +433,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
>  			return ret;
>  		}
>  
> -		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> -		intel_ring_emit(req->ringbuf, MI_NOOP);
> -		intel_ring_advance(req->ringbuf);
> +		intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +		intel_ring_emit(req->ring, MI_NOOP);
> +		intel_ring_advance(req->ring);
>  
>  		ret = intel_overlay_do_wait_request(overlay, req,
>  						    intel_overlay_release_old_vid_tail);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index cc060588a287..c4610c727c49 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -95,7 +95,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
>  		       u32	flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 cmd;
>  	int ret;
>  
> @@ -122,7 +122,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
>  		       u32	flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 cmd;
>  	int ret;
>  
> @@ -215,7 +215,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
> @@ -251,7 +251,7 @@ static int
>  gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 flags = 0;
>  	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
> @@ -303,7 +303,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -324,7 +324,7 @@ static int
>  gen7_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 flags = 0;
>  	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
> @@ -387,7 +387,7 @@ static int
>  gen8_emit_pipe_control(struct drm_i915_gem_request *req,
>  		       u32 flags, u32 scratch_addr)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -712,7 +712,7 @@ err:
>  
>  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	struct i915_workarounds *w = &dev_priv->workarounds;
>  	int ret, i;
> @@ -1184,7 +1184,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  			   unsigned int num_dwords)
>  {
>  #define MBOX_UPDATE_DWORDS 8
> -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> +	struct intel_ringbuffer *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	int i, ret, num_rings;
> @@ -1224,7 +1224,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  			   unsigned int num_dwords)
>  {
>  #define MBOX_UPDATE_DWORDS 6
> -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> +	struct intel_ringbuffer *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	int i, ret, num_rings;
> @@ -1261,7 +1261,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  		       unsigned int num_dwords)
>  {
> -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> +	struct intel_ringbuffer *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *useless;
>  	int i, ret, num_rings;
> @@ -1303,7 +1303,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  static int
>  gen6_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	if (req->engine->semaphore.signal)
> @@ -1342,7 +1342,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       struct intel_engine_cs *signaller,
>  	       u32 seqno)
>  {
> -	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> +	struct intel_ringbuffer *waiter = waiter_req->ring;
>  	struct drm_i915_private *dev_priv = waiter_req->i915;
>  	int ret;
>  
> @@ -1370,7 +1370,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       struct intel_engine_cs *signaller,
>  	       u32 seqno)
>  {
> -	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> +	struct intel_ringbuffer *waiter = waiter_req->ring;
>  	u32 dw1 = MI_SEMAPHORE_MBOX |
>  		  MI_SEMAPHORE_COMPARE |
>  		  MI_SEMAPHORE_REGISTER;
> @@ -1418,7 +1418,7 @@ do {									\
>  static int
>  pc_render_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
> @@ -1612,7 +1612,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  	       u32     invalidate_domains,
>  	       u32     flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1628,7 +1628,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  i9xx_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -1773,7 +1773,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 length,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1800,7 +1800,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 len,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	u32 cs_offset = req->engine->scratch.gtt_offset;
>  	int ret;
>  
> @@ -1863,7 +1863,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 len,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2164,7 +2164,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
>  
>  int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>  {
> -	request->ringbuf = request->engine->buffer;
> +	request->ring = request->engine->buffer;
>  	return 0;
>  }
>  
> @@ -2217,17 +2217,17 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
>  
>  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	struct intel_engine_cs *engine = req->engine;
>  	struct drm_i915_gem_request *target;
>  	unsigned space;
>  	int ret;
>  
> -	if (intel_ring_space(ringbuf) >= bytes)
> +	if (intel_ring_space(ring) >= bytes)
>  		return 0;
>  
>  	/* The whole point of reserving space is to not wait! */
> -	WARN_ON(ringbuf->reserved_in_use);
> +	WARN_ON(ring->reserved_in_use);
>  
>  	list_for_each_entry(target, &engine->request_list, list) {
>  		/*
> @@ -2235,12 +2235,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  		 * from multiple ringbuffers. Here, we must ignore any that
>  		 * aren't from the ringbuffer we're considering.
>  		 */
> -		if (target->ringbuf != ringbuf)
> +		if (target->ring != ring)
>  			continue;
>  
>  		/* Would completion of this request free enough space? */
> -		space = __intel_ring_space(target->postfix, ringbuf->tail,
> -					   ringbuf->size);
> +		space = __intel_ring_space(target->postfix, ring->tail,
> +					   ring->size);
>  		if (space >= bytes)
>  			break;
>  	}
> @@ -2252,7 +2252,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  	if (ret)
>  		return ret;
>  
> -	ringbuf->space = space;
> +	ring->space = space;
>  	return 0;
>  }
>  
> @@ -2267,16 +2267,16 @@ static void ring_wrap(struct intel_ringbuffer *ringbuf)
>  
>  static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
>  {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> -	int remain_usable = ringbuf->effective_size - ringbuf->tail;
> -	int remain_actual = ringbuf->size - ringbuf->tail;
> +	struct intel_ringbuffer *ring = req->ring;
> +	int remain_usable = ring->effective_size - ring->tail;
> +	int remain_actual = ring->size - ring->tail;
>  	int ret, total_bytes, wait_bytes = 0;
>  	bool need_wrap = false;
>  
> -	if (ringbuf->reserved_in_use)
> +	if (ring->reserved_in_use)
>  		total_bytes = bytes;
>  	else
> -		total_bytes = bytes + ringbuf->reserved_size;
> +		total_bytes = bytes + ring->reserved_size;
>  
>  	if (unlikely(bytes > remain_usable)) {
>  		/*
> @@ -2292,9 +2292,9 @@ static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
>  			 * falls off the end. So only need to to wait for the
>  			 * reserved size after flushing out the remainder.
>  			 */
> -			wait_bytes = remain_actual + ringbuf->reserved_size;
> +			wait_bytes = remain_actual + ring->reserved_size;
>  			need_wrap = true;
> -		} else if (total_bytes > ringbuf->space) {
> +		} else if (total_bytes > ring->space) {
>  			/* No wrapping required, just waiting. */
>  			wait_bytes = total_bytes;
>  		}
> @@ -2306,7 +2306,7 @@ static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
>  			return ret;
>  
>  		if (need_wrap)
> -			ring_wrap(ringbuf);
> +			ring_wrap(ring);
>  	}
>  
>  	return 0;
> @@ -2325,14 +2325,14 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  	if (ret)
>  		return ret;
>  
> -	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
> +	req->ring->space -= num_dwords * sizeof(uint32_t);
>  	return 0;
>  }
>  
>  /* Align the ring tail to a cacheline boundary */
>  int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
>  	int ret;
>  
> @@ -2404,7 +2404,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
>  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>  			       u32 invalidate, u32 flush)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	uint32_t cmd;
>  	int ret;
>  
> @@ -2450,7 +2450,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      u64 offset, u32 len,
>  			      unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	bool ppgtt = USES_PPGTT(req->i915) &&
>  			!(dispatch_flags & I915_DISPATCH_SECURE);
>  	int ret;
> @@ -2476,7 +2476,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			     u64 offset, u32 len,
>  			     unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2501,7 +2501,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      u64 offset, u32 len,
>  			      unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2524,7 +2524,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  static int gen6_ring_flush(struct drm_i915_gem_request *req,
>  			   u32 invalidate, u32 flush)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>  	uint32_t cmd;
>  	int ret;
>  
> -- 
> 2.6.2
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Tvrtko Ursulin Nov. 24, 2015, 3:08 p.m. UTC | #2
On 20/11/15 12:43, Chris Wilson wrote:
> Now that we have disambuigated ring and engine, we can use the clearer
> and more consistent name for the intel_ringbuffer pointer in the
> request.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_drv.h            |   2 +-
>   drivers/gpu/drm/i915/i915_gem.c            |  28 +++---
>   drivers/gpu/drm/i915/i915_gem_context.c    |   2 +-
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |   4 +-
>   drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
>   drivers/gpu/drm/i915/intel_display.c       |  10 +-
>   drivers/gpu/drm/i915/intel_lrc.c           | 149 ++++++++++++++---------------
>   drivers/gpu/drm/i915/intel_mocs.c          |  32 +++----
>   drivers/gpu/drm/i915/intel_overlay.c       |  42 ++++----
>   drivers/gpu/drm/i915/intel_ringbuffer.c    |  86 ++++++++---------
>   10 files changed, 178 insertions(+), 183 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 9ce8b3fcb3a0..b7eaa2deb437 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2185,7 +2185,7 @@ struct drm_i915_gem_request {
>   	 * context.
>   	 */
>   	struct intel_context *ctx;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ringbuffer *ring;

What was the problem with ringbuf? Struct is still called ringbuf and 
the files as well after the patch series.

Regards,

Tvrtko
Chris Wilson Nov. 24, 2015, 3:25 p.m. UTC | #3
On Tue, Nov 24, 2015 at 03:08:09PM +0000, Tvrtko Ursulin wrote:
> 
> On 20/11/15 12:43, Chris Wilson wrote:
> >Now that we have disambuigated ring and engine, we can use the clearer
> >and more consistent name for the intel_ringbuffer pointer in the
> >request.
> >
> >Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >---
> >  drivers/gpu/drm/i915/i915_drv.h            |   2 +-
> >  drivers/gpu/drm/i915/i915_gem.c            |  28 +++---
> >  drivers/gpu/drm/i915/i915_gem_context.c    |   2 +-
> >  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   4 +-
> >  drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
> >  drivers/gpu/drm/i915/intel_display.c       |  10 +-
> >  drivers/gpu/drm/i915/intel_lrc.c           | 149 ++++++++++++++---------------
> >  drivers/gpu/drm/i915/intel_mocs.c          |  32 +++----
> >  drivers/gpu/drm/i915/intel_overlay.c       |  42 ++++----
> >  drivers/gpu/drm/i915/intel_ringbuffer.c    |  86 ++++++++---------
> >  10 files changed, 178 insertions(+), 183 deletions(-)
> >
> >diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> >index 9ce8b3fcb3a0..b7eaa2deb437 100644
> >--- a/drivers/gpu/drm/i915/i915_drv.h
> >+++ b/drivers/gpu/drm/i915/i915_drv.h
> >@@ -2185,7 +2185,7 @@ struct drm_i915_gem_request {
> >  	 * context.
> >  	 */
> >  	struct intel_context *ctx;
> >-	struct intel_ringbuffer *ringbuf;
> >+	struct intel_ringbuffer *ring;
> 
> What was the problem with ringbuf? Struct is still called ringbuf
> and the files as well after the patch series.

It introduced a major naming clash with existing code. I am trying to
remove the needlessly duplicated interfaces, and restore the historic
naming conventions.
-Chris
Tvrtko Ursulin Nov. 25, 2015, 10:22 a.m. UTC | #4
On 24/11/15 15:25, Chris Wilson wrote:
> On Tue, Nov 24, 2015 at 03:08:09PM +0000, Tvrtko Ursulin wrote:
>>
>> On 20/11/15 12:43, Chris Wilson wrote:
>>> Now that we have disambuigated ring and engine, we can use the clearer
>>> and more consistent name for the intel_ringbuffer pointer in the
>>> request.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> ---
>>>   drivers/gpu/drm/i915/i915_drv.h            |   2 +-
>>>   drivers/gpu/drm/i915/i915_gem.c            |  28 +++---
>>>   drivers/gpu/drm/i915/i915_gem_context.c    |   2 +-
>>>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |   4 +-
>>>   drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
>>>   drivers/gpu/drm/i915/intel_display.c       |  10 +-
>>>   drivers/gpu/drm/i915/intel_lrc.c           | 149 ++++++++++++++---------------
>>>   drivers/gpu/drm/i915/intel_mocs.c          |  32 +++----
>>>   drivers/gpu/drm/i915/intel_overlay.c       |  42 ++++----
>>>   drivers/gpu/drm/i915/intel_ringbuffer.c    |  86 ++++++++---------
>>>   10 files changed, 178 insertions(+), 183 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>>> index 9ce8b3fcb3a0..b7eaa2deb437 100644
>>> --- a/drivers/gpu/drm/i915/i915_drv.h
>>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>>> @@ -2185,7 +2185,7 @@ struct drm_i915_gem_request {
>>>   	 * context.
>>>   	 */
>>>   	struct intel_context *ctx;
>>> -	struct intel_ringbuffer *ringbuf;
>>> +	struct intel_ringbuffer *ring;
>>
>> What was the problem with ringbuf? Struct is still called ringbuf
>> and the files as well after the patch series.
>
> It introduced a major naming clash with existing code. I am trying to
> remove the needlessly duplicated interfaces, and restore the historic
> naming conventions.

Ok my point was that I am not sure if it is worth renaming things a) 
partially, and b) that ring is a good name for intel_ringbuffer. Ringbuf 
sounds at least just as good, in fact better to me. So this renaming 
feels like unnecessary churn. And the fact you don't even do all of them 
in the patch series just reinforces that.

But as Daniel already approved this it doesn't really matter apart for 
"for the record".

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9ce8b3fcb3a0..b7eaa2deb437 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2185,7 +2185,7 @@  struct drm_i915_gem_request {
 	 * context.
 	 */
 	struct intel_context *ctx;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 
 	/** Batch buffer related to this request if any (used for
 	    error state dump only) */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5a1b51a27fe3..d6706dd4117c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1386,7 +1386,7 @@  static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	 * Note this requires that we are always called in request
 	 * completion order.
 	 */
-	request->ringbuf->last_retired_head = request->postfix;
+	request->ring->last_retired_head = request->postfix;
 
 	list_del_init(&request->list);
 	i915_gem_request_remove_from_client(request);
@@ -2553,7 +2553,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 {
 	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	u32 request_start;
 	int ret;
 
@@ -2562,16 +2562,16 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 
 	engine = request->engine;
 	dev_priv = request->i915;
-	ringbuf = request->ringbuf;
+	ring = request->ring;
 
 	/*
 	 * To ensure that this call will not fail, space for its emissions
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
-	intel_ring_reserved_space_use(ringbuf);
+	intel_ring_reserved_space_use(ring);
 
-	request_start = intel_ring_get_tail(ringbuf);
+	request_start = intel_ring_get_tail(ring);
 	/*
 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
 	 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2593,14 +2593,14 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	 * GPU processing the request, we never over-estimate the
 	 * position of the head.
 	 */
-	request->postfix = intel_ring_get_tail(ringbuf);
+	request->postfix = intel_ring_get_tail(ring);
 
 	if (i915.enable_execlists)
 		ret = engine->emit_request(request);
 	else {
 		ret = engine->add_request(request);
 
-		request->tail = intel_ring_get_tail(ringbuf);
+		request->tail = intel_ring_get_tail(ring);
 	}
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
@@ -2629,7 +2629,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	intel_mark_busy(dev_priv->dev);
 
 	/* Sanity check that the reserved size was large enough. */
-	intel_ring_reserved_space_end(ringbuf);
+	intel_ring_reserved_space_end(ring);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2741,7 +2741,7 @@  int i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * to be redone if the request is not actually submitted straight
 	 * away, e.g. because a GPU scheduler has deferred it.
 	 */
-	intel_ring_reserved_space_reserve(req->ringbuf,
+	intel_ring_reserved_space_reserve(req->ring,
 					  MIN_SPACE_FOR_ADD_REQUEST);
 	ret = intel_ring_begin(req, 0);
 	if (ret) {
@@ -2764,7 +2764,7 @@  err:
 
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
-	intel_ring_reserved_space_cancel(req->ringbuf);
+	intel_ring_reserved_space_cancel(req->ring);
 
 	i915_gem_request_unreference(req);
 }
@@ -4657,11 +4657,11 @@  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 	 * at initialization time.
 	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-		intel_ring_emit(req->ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(req->ringbuf, reg_base + i);
-		intel_ring_emit(req->ringbuf, remap_info[i/4]);
+		intel_ring_emit(req->ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(req->ring, reg_base + i);
+		intel_ring_emit(req->ring, remap_info[i/4]);
 	}
-	intel_ring_advance(req->ringbuf);
+	intel_ring_advance(req->ring);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 047c2f94bd22..5d0516930b16 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -479,7 +479,7 @@  i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 32c2d08bdd4c..afa930dab632 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1115,7 +1115,7 @@  i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret, i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
@@ -1196,7 +1196,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas)
 {
-	struct intel_ringbuffer *ring = params->request->ringbuf;
+	struct intel_ringbuffer *ring = params->request->ring;
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c222456961fb..b1ee6f89e70b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -651,7 +651,7 @@  static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -1588,7 +1588,7 @@  static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1626,7 +1626,7 @@  static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2447f1a36fb0..cdd6074257af 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10824,7 +10824,7 @@  static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -10859,7 +10859,7 @@  static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -10891,7 +10891,7 @@  static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -10930,7 +10930,7 @@  static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -10966,7 +10966,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1502e53d2ad6..5c37922c3cde 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -339,7 +339,7 @@  static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
 	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[rq->engine->id].state;
-	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
+	struct drm_i915_gem_object *rb_obj = rq->ring->obj;
 	struct page *page;
 	uint32_t *reg_state;
 
@@ -545,7 +545,7 @@  static int execlists_context_queue(struct drm_i915_gem_request *request)
 
 	i915_gem_request_reference(request);
 
-	request->tail = request->ringbuf->tail;
+	request->tail = request->ring->tail;
 
 	spin_lock_irq(&engine->execlist_lock);
 
@@ -633,7 +633,7 @@  int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 {
 	int ret;
 
-	request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+	request->ring = request->ctx->engine[request->engine->id].ringbuf;
 
 	if (request->ctx != request->engine->default_context) {
 		ret = intel_lr_context_pin(request);
@@ -656,7 +656,7 @@  int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 static void
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	intel_ring_advance(request->ringbuf);
+	intel_ring_advance(request->ring);
 
 	if (intel_ring_stopped(request->engine))
 		return;
@@ -686,9 +686,9 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct list_head *vmas)
 {
 	struct drm_device       *dev = params->dev;
-	struct intel_engine_cs  *ring = params->ring;
+	struct intel_engine_cs  *engine = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ring = params->request->ring;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -700,7 +700,7 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -729,17 +729,17 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ringbuf, MI_NOOP);
-		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ringbuf, INSTPM);
-		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ringbuf);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -747,7 +747,7 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     args->batch_start_offset;
 
-	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
 		return ret;
 
@@ -827,7 +827,7 @@  static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
 	int engine = rq->engine->id;
 	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
+	struct intel_ringbuffer *ring = rq->ring;
 	int ret = 0;
 
 	WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
@@ -837,7 +837,7 @@  static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 		if (ret)
 			goto reset_pin_count;
 
-		ret = intel_pin_and_map_ringbuffer_obj(rq->i915->dev, ringbuf);
+		ret = intel_pin_and_map_ringbuffer_obj(rq->i915->dev, ring);
 		if (ret)
 			goto unpin_ctx_obj;
 
@@ -858,12 +858,12 @@  void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
 	int engine = rq->engine->id;
 	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
+	struct intel_ringbuffer *ring = rq->ring;
 
 	if (ctx_obj) {
 		WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
 		if (--rq->ctx->engine[engine].pin_count == 0) {
-			intel_unpin_ringbuffer_obj(ringbuf);
+			intel_unpin_ringbuffer_obj(ring);
 			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
 	}
@@ -873,7 +873,7 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
@@ -889,14 +889,14 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit(ringbuf, w->reg[i].addr);
-		intel_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
 	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
@@ -1318,7 +1318,7 @@  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
@@ -1326,18 +1326,18 @@  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+		intel_ring_emit(ring, GEN8_RING_PDP_UDW(engine, i));
+		intel_ring_emit(ring, upper_32_bits(pd_daddr));
+		intel_ring_emit(ring, GEN8_RING_PDP_LDW(engine, i));
+		intel_ring_emit(ring, lower_32_bits(pd_daddr));
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1345,7 +1345,7 @@  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1371,14 +1371,14 @@  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
 			(ppgtt<<8) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ringbuf, lower_32_bits(offset));
-	intel_ring_emit(ringbuf, upper_32_bits(offset));
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1420,10 +1420,7 @@  static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ring = request->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -1442,17 +1439,17 @@  static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (ring == &dev_priv->ring[VCS])
+		if (request->engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ringbuf, cmd);
-	intel_ring_emit(ringbuf,
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
 			I915_GEM_HWS_SCRATCH_ADDR |
 			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ringbuf, 0); /* upper addr */
-	intel_ring_emit(ringbuf, 0); /* value */
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, 0); /* upper addr */
+	intel_ring_emit(ring, 0); /* value */
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1461,9 +1458,8 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ringbuffer *ring = request->ring;
+	u32 scratch_addr = request->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa;
 	u32 flags = 0;
 	int ret;
@@ -1491,7 +1487,7 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	 * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
 	 * control.
 	 */
-	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
+	vf_flush_wa = INTEL_INFO(request->i915)->gen >= 9 &&
 		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
 
 	ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
@@ -1499,21 +1495,21 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		return ret;
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ringbuf, 0);
-		intel_ring_emit(ringbuf, 0);
-		intel_ring_emit(ringbuf, 0);
-		intel_ring_emit(ringbuf, 0);
-		intel_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ringbuf, flags);
-	intel_ring_emit(ringbuf, scratch_addr);
-	intel_ring_emit(ringbuf, 0);
-	intel_ring_emit(ringbuf, 0);
-	intel_ring_emit(ringbuf, 0);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1530,8 +1526,7 @@  static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_ringbuffer *ring = request->ring;
 	u32 cmd;
 	int ret;
 
@@ -1547,23 +1542,23 @@  static int gen8_emit_request(struct drm_i915_gem_request *request)
 	cmd = MI_STORE_DWORD_IMM_GEN4;
 	cmd |= MI_GLOBAL_GTT;
 
-	intel_ring_emit(ringbuf, cmd);
-	intel_ring_emit(ringbuf,
-			(ring->status_page.gfx_addr +
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
+			(request->engine->status_page.gfx_addr +
 			 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
-	intel_ring_emit(ringbuf, 0);
-	intel_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
-	intel_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
 	intel_logical_ring_advance_and_submit(request);
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
 	 * lite restore of a context with HEAD==TAIL.
 	 */
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index d79c9c0bbffb..3a1bd5f7af55 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -174,7 +174,7 @@  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table,
 				   u32 reg_base)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	unsigned int index;
 	int ret;
 
@@ -185,11 +185,11 @@  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit(ringbuf, reg_base + index * 4);
-		intel_ring_emit(ringbuf, table->table[index].control_value);
+		intel_ring_emit(ring, reg_base + index * 4);
+		intel_ring_emit(ring, table->table[index].control_value);
 	}
 
 	/*
@@ -201,12 +201,12 @@  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit(ringbuf, reg_base + index * 4);
-		intel_ring_emit(ringbuf, table->table[0].control_value);
+		intel_ring_emit(ring, reg_base + index * 4);
+		intel_ring_emit(ring, table->table[0].control_value);
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -225,7 +225,7 @@  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	unsigned int count;
 	unsigned int i;
 	u32 value;
@@ -240,15 +240,15 @@  static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf,
+	intel_ring_emit(ring,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
 		value = (table->table[count].l3cc_value & 0xffff) |
 			((table->table[count + 1].l3cc_value & 0xffff) << 16);
 
-		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
-		intel_ring_emit(ringbuf, value);
+		intel_ring_emit(ring, GEN9_LNCFCMOCS0 + i * 4);
+		intel_ring_emit(ring, value);
 	}
 
 	if (table->size & 0x01) {
@@ -264,14 +264,14 @@  static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
-		intel_ring_emit(ringbuf, value);
+		intel_ring_emit(ring, GEN9_LNCFCMOCS0 + i * 4);
+		intel_ring_emit(ring, value);
 
 		value = filler;
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 5e9c7c15a84b..451c59b8f526 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -252,11 +252,11 @@  static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(req->ringbuf, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(req->ringbuf, MI_NOOP);
-	intel_ring_advance(req->ringbuf);
+	intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(req->ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(req->ring, MI_NOOP);
+	intel_ring_advance(req->ring);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -293,9 +293,9 @@  static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(req->ringbuf, flip_addr);
-	intel_ring_advance(req->ringbuf);
+	intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(req->ring, flip_addr);
+	intel_ring_advance(req->ring);
 
 	WARN_ON(overlay->last_flip_req);
 	i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -360,22 +360,22 @@  static int intel_overlay_off(struct intel_overlay *overlay)
 	}
 
 	/* wait for overlay to go idle */
-	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(req->ringbuf, flip_addr);
-	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(req->ring, flip_addr);
+	intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(req->ringbuf, MI_NOOP);
-		intel_ring_emit(req->ringbuf, MI_NOOP);
-		intel_ring_emit(req->ringbuf, MI_NOOP);
+		intel_ring_emit(req->ring, MI_NOOP);
+		intel_ring_emit(req->ring, MI_NOOP);
+		intel_ring_emit(req->ring, MI_NOOP);
 	} else {
-		intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(req->ringbuf, flip_addr);
-		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(req->ring, flip_addr);
+		intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(req->ringbuf);
+	intel_ring_advance(req->ring);
 
 	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
@@ -433,9 +433,9 @@  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(req->ringbuf, MI_NOOP);
-		intel_ring_advance(req->ringbuf);
+		intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(req->ring, MI_NOOP);
+		intel_ring_advance(req->ring);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cc060588a287..c4610c727c49 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -95,7 +95,7 @@  gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -122,7 +122,7 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -215,7 +215,7 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
@@ -251,7 +251,7 @@  static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -303,7 +303,7 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -324,7 +324,7 @@  static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -387,7 +387,7 @@  static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -712,7 +712,7 @@  err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 	int ret, i;
@@ -1184,7 +1184,7 @@  static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
@@ -1224,7 +1224,7 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
@@ -1261,7 +1261,7 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	int i, ret, num_rings;
@@ -1303,7 +1303,7 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	if (req->engine->semaphore.signal)
@@ -1342,7 +1342,7 @@  gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+	struct intel_ringbuffer *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
 	int ret;
 
@@ -1370,7 +1370,7 @@  gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+	struct intel_ringbuffer *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1418,7 +1418,7 @@  do {									\
 static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
@@ -1612,7 +1612,7 @@  bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1628,7 +1628,7 @@  bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -1773,7 +1773,7 @@  i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1800,7 +1800,7 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
@@ -1863,7 +1863,7 @@  i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2164,7 +2164,7 @@  int intel_ring_idle(struct intel_engine_cs *ring)
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-	request->ringbuf = request->engine->buffer;
+	request->ring = request->engine->buffer;
 	return 0;
 }
 
@@ -2217,17 +2217,17 @@  void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 	unsigned space;
 	int ret;
 
-	if (intel_ring_space(ringbuf) >= bytes)
+	if (intel_ring_space(ring) >= bytes)
 		return 0;
 
 	/* The whole point of reserving space is to not wait! */
-	WARN_ON(ringbuf->reserved_in_use);
+	WARN_ON(ring->reserved_in_use);
 
 	list_for_each_entry(target, &engine->request_list, list) {
 		/*
@@ -2235,12 +2235,12 @@  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		if (target->ringbuf != ringbuf)
+		if (target->ring != ring)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(target->postfix, ringbuf->tail,
-					   ringbuf->size);
+		space = __intel_ring_space(target->postfix, ring->tail,
+					   ring->size);
 		if (space >= bytes)
 			break;
 	}
@@ -2252,7 +2252,7 @@  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	if (ret)
 		return ret;
 
-	ringbuf->space = space;
+	ring->space = space;
 	return 0;
 }
 
@@ -2267,16 +2267,16 @@  static void ring_wrap(struct intel_ringbuffer *ringbuf)
 
 static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	int remain_usable = ringbuf->effective_size - ringbuf->tail;
-	int remain_actual = ringbuf->size - ringbuf->tail;
+	struct intel_ringbuffer *ring = req->ring;
+	int remain_usable = ring->effective_size - ring->tail;
+	int remain_actual = ring->size - ring->tail;
 	int ret, total_bytes, wait_bytes = 0;
 	bool need_wrap = false;
 
-	if (ringbuf->reserved_in_use)
+	if (ring->reserved_in_use)
 		total_bytes = bytes;
 	else
-		total_bytes = bytes + ringbuf->reserved_size;
+		total_bytes = bytes + ring->reserved_size;
 
 	if (unlikely(bytes > remain_usable)) {
 		/*
@@ -2292,9 +2292,9 @@  static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
 			 * falls off the end. So only need to to wait for the
 			 * reserved size after flushing out the remainder.
 			 */
-			wait_bytes = remain_actual + ringbuf->reserved_size;
+			wait_bytes = remain_actual + ring->reserved_size;
 			need_wrap = true;
-		} else if (total_bytes > ringbuf->space) {
+		} else if (total_bytes > ring->space) {
 			/* No wrapping required, just waiting. */
 			wait_bytes = total_bytes;
 		}
@@ -2306,7 +2306,7 @@  static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
 			return ret;
 
 		if (need_wrap)
-			ring_wrap(ringbuf);
+			ring_wrap(ring);
 	}
 
 	return 0;
@@ -2325,14 +2325,14 @@  int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (ret)
 		return ret;
 
-	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
+	req->ring->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
@@ -2404,7 +2404,7 @@  static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2450,7 +2450,7 @@  gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
@@ -2476,7 +2476,7 @@  hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2501,7 +2501,7 @@  gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2524,7 +2524,7 @@  gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	uint32_t cmd;
 	int ret;