diff mbox

[03/12] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit

Message ID 1448023432-10726-3-git-send-email-chris@chris-wilson.co.uk
State New, archived
Headers show

Commit Message

Chris Wilson Nov. 20, 2015, 12:43 p.m. UTC
Both perform the same actions with more or less indirection, so just
unify the code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c            |   2 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |   8 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  34 ++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  26 +++----
 drivers/gpu/drm/i915/intel_display.c       |  26 +++----
 drivers/gpu/drm/i915/intel_lrc.c           | 118 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_lrc.h           |  21 -----
 drivers/gpu/drm/i915/intel_mocs.c          |  30 ++++----
 drivers/gpu/drm/i915/intel_overlay.c       |  42 +++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 107 +++++++++++++-------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  15 +---
 11 files changed, 195 insertions(+), 234 deletions(-)

Comments

Daniel Vetter Nov. 24, 2015, 2:33 p.m. UTC | #1
On Fri, Nov 20, 2015 at 12:43:43PM +0000, Chris Wilson wrote:
> Both perform the same actions with more or less indirection, so just
> unify the code.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem.c            |   2 +-
>  drivers/gpu/drm/i915/i915_gem_context.c    |   8 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  34 ++++-----
>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  26 +++----
>  drivers/gpu/drm/i915/intel_display.c       |  26 +++----
>  drivers/gpu/drm/i915/intel_lrc.c           | 118 ++++++++++++++---------------
>  drivers/gpu/drm/i915/intel_lrc.h           |  21 -----
>  drivers/gpu/drm/i915/intel_mocs.c          |  30 ++++----
>  drivers/gpu/drm/i915/intel_overlay.c       |  42 +++++-----
>  drivers/gpu/drm/i915/intel_ringbuffer.c    | 107 +++++++++++++-------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |  15 +---
>  11 files changed, 195 insertions(+), 234 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 5c3d11d020f0..c60105e36263 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4642,7 +4642,7 @@ err:
>  
>  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
>  	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 82a9f7197d32..c3adc121aab4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -479,7 +479,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
>  static inline int
>  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 flags = hw_flags | MI_MM_SPACE_GTT;
>  	const int num_rings =
>  		/* Use an extended w/a on ivb+ if signalling from other rings */
> @@ -494,7 +494,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  	 * itlb_before_ctx_switch.
>  	 */
>  	if (IS_GEN6(req->i915)) {
> -		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
> +		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
>  		if (ret)
>  			return ret;
>  	}
> @@ -522,7 +522,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  
>  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
>  			for_each_ring(signaller, req->i915, i) {
> -				if (signaller == ring)
> +				if (signaller == req->ring)
>  					continue;
>  
>  				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
> @@ -547,7 +547,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  
>  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
>  			for_each_ring(signaller, req->i915, i) {
> -				if (signaller == ring)
> +				if (signaller == req->ring)
>  					continue;

Tsk, unrelated hunks above. And maybe a cocci to s/req->ring/req->engine/.
At least mention in the commit message that your on a crusade against
superflous ring/dev/whatever pointers and arguments while in the area.

>  				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index e8164b644e0e..5f1b9b7df051 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1113,14 +1113,12 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
>  }
>  
>  static int
> -i915_reset_gen7_sol_offsets(struct drm_device *dev,
> -			    struct drm_i915_gem_request *req)
> +i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret, i;
>  
> -	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
> +	if (!IS_GEN7(req->i915) || req->ring->id != RCS) {
>  		DRM_DEBUG("sol reset is gen7/rcs only\n");
>  		return -EINVAL;
>  	}
> @@ -1198,9 +1196,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  			       struct drm_i915_gem_execbuffer2 *args,
>  			       struct list_head *vmas)
>  {
> -	struct drm_device *dev = params->dev;
> -	struct intel_engine_cs *ring = params->ring;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct intel_ringbuffer *ring = params->request->ringbuf;
> +	struct drm_i915_private *dev_priv = params->request->i915;
>  	u64 exec_start, exec_len;
>  	int instp_mode;
>  	u32 instp_mask;
> @@ -1214,34 +1211,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  	if (ret)
>  		return ret;
>  
> -	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
> -	     "%s didn't clear reload\n", ring->name);
> -
>  	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
>  	instp_mask = I915_EXEC_CONSTANTS_MASK;
>  	switch (instp_mode) {
>  	case I915_EXEC_CONSTANTS_REL_GENERAL:
>  	case I915_EXEC_CONSTANTS_ABSOLUTE:
>  	case I915_EXEC_CONSTANTS_REL_SURFACE:
> -		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
> +		if (instp_mode != 0 && params->ring->id != RCS) {
>  			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
>  			return -EINVAL;
>  		}
>  
>  		if (instp_mode != dev_priv->relative_constants_mode) {
> -			if (INTEL_INFO(dev)->gen < 4) {
> +			if (INTEL_INFO(dev_priv)->gen < 4) {
>  				DRM_DEBUG("no rel constants on pre-gen4\n");
>  				return -EINVAL;
>  			}
>  
> -			if (INTEL_INFO(dev)->gen > 5 &&
> +			if (INTEL_INFO(dev_priv)->gen > 5 &&
>  			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
>  				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
>  				return -EINVAL;
>  			}
>  
>  			/* The HW changed the meaning on this bit on gen6 */
> -			if (INTEL_INFO(dev)->gen >= 6)
> +			if (INTEL_INFO(dev_priv)->gen >= 6)
>  				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
>  		}
>  		break;
> @@ -1250,7 +1244,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  		return -EINVAL;
>  	}
>  
> -	if (ring == &dev_priv->ring[RCS] &&
> +	if (params->ring->id == RCS &&
>  	    instp_mode != dev_priv->relative_constants_mode) {
>  		ret = intel_ring_begin(params->request, 4);
>  		if (ret)
> @@ -1266,7 +1260,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  	}
>  
>  	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
> -		ret = i915_reset_gen7_sol_offsets(dev, params->request);
> +		ret = i915_reset_gen7_sol_offsets(params->request);
>  		if (ret)
>  			return ret;
>  	}
> @@ -1275,9 +1269,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  	exec_start = params->batch_obj_vm_offset +
>  		     params->args_batch_start_offset;
>  
> -	ret = ring->dispatch_execbuffer(params->request,
> -					exec_start, exec_len,
> -					params->dispatch_flags);
> +	ret = params->ring->dispatch_execbuffer(params->request,
> +						exec_start, exec_len,
> +						params->dispatch_flags);

I guess we should just shovel all the stuff in params into request
eventually ...

>  	if (ret)
>  		return ret;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 7820e8983136..4608884adfc8 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -651,7 +651,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
>  			  unsigned entry,
>  			  dma_addr_t addr)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	BUG_ON(entry >= 4);
> @@ -661,10 +661,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
>  		return ret;
>  
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> -	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
> +	intel_ring_emit(ring, GEN8_RING_PDP_UDW(req->ring, entry));
>  	intel_ring_emit(ring, upper_32_bits(addr));
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> -	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
> +	intel_ring_emit(ring, GEN8_RING_PDP_LDW(req->ring, entry));
>  	intel_ring_emit(ring, lower_32_bits(addr));
>  	intel_ring_advance(ring);
>  
> @@ -1588,11 +1588,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
>  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			 struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> -	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> +	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
>  	if (ret)
>  		return ret;
>  
> @@ -1601,9 +1601,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  		return ret;
>  
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
> -	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
> +	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
>  	intel_ring_emit(ring, PP_DIR_DCLV_2G);
> -	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
> +	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
>  	intel_ring_emit(ring, get_pd_offset(ppgtt));
>  	intel_ring_emit(ring, MI_NOOP);
>  	intel_ring_advance(ring);
> @@ -1625,11 +1625,11 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			  struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> -	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> +	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
>  	if (ret)
>  		return ret;
>  
> @@ -1638,16 +1638,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  		return ret;
>  
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
> -	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
> +	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
>  	intel_ring_emit(ring, PP_DIR_DCLV_2G);
> -	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
> +	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
>  	intel_ring_emit(ring, get_pd_offset(ppgtt));
>  	intel_ring_emit(ring, MI_NOOP);
>  	intel_ring_advance(ring);
>  
>  	/* XXX: RCS is the only one to auto invalidate the TLBs? */
> -	if (ring->id != RCS) {
> -		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> +	if (req->ring->id != RCS) {
> +		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
>  		if (ret)
>  			return ret;
>  	}
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 6bd204980b70..f6dae2cfd9c9 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -10824,7 +10824,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	u32 flip_mask;
>  	int ret;
> @@ -10859,7 +10859,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	u32 flip_mask;
>  	int ret;
> @@ -10891,8 +10891,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct drm_i915_private *dev_priv = req->i915;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t pf, pipesrc;
>  	int ret;
> @@ -10930,8 +10930,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct drm_i915_private *dev_priv = req->i915;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t pf, pipesrc;
>  	int ret;
> @@ -10966,7 +10966,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t plane_bit = 0;
>  	int len, ret;
> @@ -10987,14 +10987,14 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  	}
>  
>  	len = 4;
> -	if (ring->id == RCS) {
> +	if (req->ring->id == RCS) {
>  		len += 6;
>  		/*
>  		 * On Gen 8, SRM is now taking an extra dword to accommodate
>  		 * 48bits addresses, and we need a NOOP for the batch size to
>  		 * stay even.
>  		 */
> -		if (IS_GEN8(dev))
> +		if (IS_GEN8(req->i915))
>  			len += 2;
>  	}
>  
> @@ -11025,21 +11025,21 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
>  	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
>  	 */
> -	if (ring->id == RCS) {
> +	if (req->ring->id == RCS) {
>  		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
>  		intel_ring_emit(ring, DERRMR);
>  		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
>  					DERRMR_PIPEB_PRI_FLIP_DONE |
>  					DERRMR_PIPEC_PRI_FLIP_DONE));
> -		if (IS_GEN8(dev))
> +		if (IS_GEN8(req->i915))
>  			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
>  					      MI_SRM_LRM_GLOBAL_GTT);
>  		else
>  			intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
>  					      MI_SRM_LRM_GLOBAL_GTT);
>  		intel_ring_emit(ring, DERRMR);
> -		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
> -		if (IS_GEN8(dev)) {
> +		intel_ring_emit(ring, req->ring->scratch.gtt_offset + 256);
> +		if (IS_GEN8(req->i915)) {
>  			intel_ring_emit(ring, 0);
>  			intel_ring_emit(ring, MI_NOOP);
>  		}
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index cfa77a59b352..0db23c474045 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -700,11 +700,9 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
>  static void
>  intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  {
> -	struct intel_engine_cs *ring = request->ring;
> -
> -	intel_logical_ring_advance(request->ringbuf);
> +	intel_ring_advance(request->ringbuf);
>  
> -	if (intel_ring_stopped(ring))
> +	if (intel_ring_stopped(request->ring))
>  		return;
>  
>  	execlists_context_queue(request);
> @@ -887,11 +885,11 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  		if (ret)
>  			return ret;
>  
> -		intel_logical_ring_emit(ringbuf, MI_NOOP);
> -		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
> -		intel_logical_ring_emit(ringbuf, INSTPM);
> -		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
> -		intel_logical_ring_advance(ringbuf);
> +		intel_ring_emit(ringbuf, MI_NOOP);
> +		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
> +		intel_ring_emit(ringbuf, INSTPM);
> +		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
> +		intel_ring_advance(ringbuf);
>  
>  		dev_priv->relative_constants_mode = instp_mode;
>  	}
> @@ -1041,14 +1039,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  	if (ret)
>  		return ret;
>  
> -	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
> +	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
>  	for (i = 0; i < w->count; i++) {
> -		intel_logical_ring_emit(ringbuf, w->reg[i].addr);
> -		intel_logical_ring_emit(ringbuf, w->reg[i].value);
> +		intel_ring_emit(ringbuf, w->reg[i].addr);
> +		intel_ring_emit(ringbuf, w->reg[i].value);
>  	}
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_emit(ringbuf, MI_NOOP);
>  
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_advance(ringbuf);
>  
>  	ring->gpu_caches_dirty = true;
>  	ret = logical_ring_flush_all_caches(req);
> @@ -1478,18 +1476,18 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  	if (ret)
>  		return ret;
>  
> -	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
> +	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
>  	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
>  		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
>  
> -		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
> -		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
> -		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
> -		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
> +		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
> +		intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
> +		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
> +		intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
>  	}
>  
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> @@ -1523,14 +1521,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>  		return ret;
>  
>  	/* FIXME(BDW): Address space and security selectors. */
> -	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
> -				(ppgtt<<8) |
> -				(dispatch_flags & I915_DISPATCH_RS ?
> -				 MI_BATCH_RESOURCE_STREAMER : 0));
> -	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
> -	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
> +			(ppgtt<<8) |
> +			(dispatch_flags & I915_DISPATCH_RS ?
> +			 MI_BATCH_RESOURCE_STREAMER : 0));
> +	intel_ring_emit(ringbuf, lower_32_bits(offset));
> +	intel_ring_emit(ringbuf, upper_32_bits(offset));
> +	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> @@ -1598,13 +1596,13 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>  			cmd |= MI_INVALIDATE_BSD;
>  	}
>  
> -	intel_logical_ring_emit(ringbuf, cmd);
> -	intel_logical_ring_emit(ringbuf,
> -				I915_GEM_HWS_SCRATCH_ADDR |
> -				MI_FLUSH_DW_USE_GTT);
> -	intel_logical_ring_emit(ringbuf, 0); /* upper addr */
> -	intel_logical_ring_emit(ringbuf, 0); /* value */
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, cmd);
> +	intel_ring_emit(ringbuf,
> +			I915_GEM_HWS_SCRATCH_ADDR |
> +			MI_FLUSH_DW_USE_GTT);
> +	intel_ring_emit(ringbuf, 0); /* upper addr */
> +	intel_ring_emit(ringbuf, 0); /* value */
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> @@ -1651,21 +1649,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>  		return ret;
>  
>  	if (vf_flush_wa) {
> -		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
> -		intel_logical_ring_emit(ringbuf, 0);
> -		intel_logical_ring_emit(ringbuf, 0);
> -		intel_logical_ring_emit(ringbuf, 0);
> -		intel_logical_ring_emit(ringbuf, 0);
> -		intel_logical_ring_emit(ringbuf, 0);
> +		intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
> +		intel_ring_emit(ringbuf, 0);
> +		intel_ring_emit(ringbuf, 0);
> +		intel_ring_emit(ringbuf, 0);
> +		intel_ring_emit(ringbuf, 0);
> +		intel_ring_emit(ringbuf, 0);
>  	}
>  
> -	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
> -	intel_logical_ring_emit(ringbuf, flags);
> -	intel_logical_ring_emit(ringbuf, scratch_addr);
> -	intel_logical_ring_emit(ringbuf, 0);
> -	intel_logical_ring_emit(ringbuf, 0);
> -	intel_logical_ring_emit(ringbuf, 0);
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
> +	intel_ring_emit(ringbuf, flags);
> +	intel_ring_emit(ringbuf, scratch_addr);
> +	intel_ring_emit(ringbuf, 0);
> +	intel_ring_emit(ringbuf, 0);
> +	intel_ring_emit(ringbuf, 0);
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> @@ -1699,23 +1697,23 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>  	cmd = MI_STORE_DWORD_IMM_GEN4;
>  	cmd |= MI_GLOBAL_GTT;
>  
> -	intel_logical_ring_emit(ringbuf, cmd);
> -	intel_logical_ring_emit(ringbuf,
> -				(ring->status_page.gfx_addr +
> -				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
> -	intel_logical_ring_emit(ringbuf, 0);
> -	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
> -	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_emit(ringbuf, cmd);
> +	intel_ring_emit(ringbuf,
> +			(ring->status_page.gfx_addr +
> +			 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
> +	intel_ring_emit(ringbuf, 0);
> +	intel_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
> +	intel_ring_emit(ringbuf, MI_USER_INTERRUPT);
> +	intel_ring_emit(ringbuf, MI_NOOP);
>  	intel_logical_ring_advance_and_submit(request);
>  
>  	/*
>  	 * Here we add two extra NOOPs as padding to avoid
>  	 * lite restore of a context with HEAD==TAIL.
>  	 */
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
> index 8b56fb934763..861668919e5a 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -45,27 +45,6 @@ int intel_logical_rings_init(struct drm_device *dev);
>  int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
>  
>  int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
> -/**
> - * intel_logical_ring_advance() - advance the ringbuffer tail
> - * @ringbuf: Ringbuffer to advance.
> - *
> - * The tail is only updated in our logical ringbuffer struct.
> - */
> -static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
> -{
> -	intel_ringbuffer_advance(ringbuf);
> -}
> -
> -/**
> - * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
> - * @ringbuf: Ringbuffer to write to.
> - * @data: DWORD to write.
> - */
> -static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
> -					   u32 data)
> -{
> -	intel_ringbuffer_emit(ringbuf, data);
> -}
>  
>  /* Logical Ring Contexts */
>  void intel_lr_context_free(struct intel_context *ctx);
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index 6d3c6c0a5c62..399a131a94b6 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -187,13 +187,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  		return ret;
>  	}
>  
> -	intel_logical_ring_emit(ringbuf,
> -				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
> +	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
>  
>  	for (index = 0; index < table->size; index++) {
> -		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
> -		intel_logical_ring_emit(ringbuf,
> -					table->table[index].control_value);
> +		intel_ring_emit(ringbuf, reg_base + index * 4);
> +		intel_ring_emit(ringbuf, table->table[index].control_value);
>  	}
>  
>  	/*
> @@ -205,12 +203,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  	 * that value to all the used entries.
>  	 */
>  	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
> -		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
> -		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
> +		intel_ring_emit(ringbuf, reg_base + index * 4);
> +		intel_ring_emit(ringbuf, table->table[0].control_value);
>  	}
>  
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> @@ -246,15 +244,15 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>  		return ret;
>  	}
>  
> -	intel_logical_ring_emit(ringbuf,
> +	intel_ring_emit(ringbuf,
>  			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
>  
>  	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
>  		value = (table->table[count].l3cc_value & 0xffff) |
>  			((table->table[count + 1].l3cc_value & 0xffff) << 16);
>  
> -		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
> -		intel_logical_ring_emit(ringbuf, value);
> +		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
> +		intel_ring_emit(ringbuf, value);
>  	}
>  
>  	if (table->size & 0x01) {
> @@ -270,14 +268,14 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>  	 * they are reserved by the hardware.
>  	 */
>  	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
> -		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
> -		intel_logical_ring_emit(ringbuf, value);
> +		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
> +		intel_ring_emit(ringbuf, value);
>  
>  		value = filler;
>  	}
>  
> -	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_advance(ringbuf);
> +	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_advance(ringbuf);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index 444542696a2c..5e9c7c15a84b 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
>  
>  	overlay->active = true;
>  
> -	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
> -	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
> -	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> -	intel_ring_emit(ring, MI_NOOP);
> -	intel_ring_advance(ring);
> +	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
> +	intel_ring_emit(req->ringbuf, overlay->flip_addr | OFC_UPDATE);
> +	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +	intel_ring_emit(req->ringbuf, MI_NOOP);
> +	intel_ring_advance(req->ringbuf);
>  
>  	return intel_overlay_do_wait_request(overlay, req, NULL);
>  }
> @@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
>  		return ret;
>  	}
>  
> -	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> -	intel_ring_emit(ring, flip_addr);
> -	intel_ring_advance(ring);
> +	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> +	intel_ring_emit(req->ringbuf, flip_addr);
> +	intel_ring_advance(req->ringbuf);
>  
>  	WARN_ON(overlay->last_flip_req);
>  	i915_gem_request_assign(&overlay->last_flip_req, req);
> @@ -360,22 +360,22 @@ static int intel_overlay_off(struct intel_overlay *overlay)
>  	}
>  
>  	/* wait for overlay to go idle */
> -	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> -	intel_ring_emit(ring, flip_addr);
> -	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> +	intel_ring_emit(req->ringbuf, flip_addr);
> +	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
>  	/* turn overlay off */
>  	if (IS_I830(dev)) {
>  		/* Workaround: Don't disable the overlay fully, since otherwise
>  		 * it dies on the next OVERLAY_ON cmd. */
> -		intel_ring_emit(ring, MI_NOOP);
> -		intel_ring_emit(ring, MI_NOOP);
> -		intel_ring_emit(ring, MI_NOOP);
> +		intel_ring_emit(req->ringbuf, MI_NOOP);
> +		intel_ring_emit(req->ringbuf, MI_NOOP);
> +		intel_ring_emit(req->ringbuf, MI_NOOP);
>  	} else {
> -		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
> -		intel_ring_emit(ring, flip_addr);
> -		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +		intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
> +		intel_ring_emit(req->ringbuf, flip_addr);
> +		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
>  	}
> -	intel_ring_advance(ring);
> +	intel_ring_advance(req->ringbuf);
>  
>  	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
>  }
> @@ -433,9 +433,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
>  			return ret;
>  		}
>  
> -		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> -		intel_ring_emit(ring, MI_NOOP);
> -		intel_ring_advance(ring);
> +		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> +		intel_ring_emit(req->ringbuf, MI_NOOP);
> +		intel_ring_advance(req->ringbuf);
>  
>  		ret = intel_overlay_do_wait_request(overlay, req,
>  						    intel_overlay_release_old_vid_tail);

Any reason for not doing your usual transform to intel_ringbuffer for the
local ring here in the overlay code? Or does that happen when
intel_ring_begin falls?

> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 9fbe730aae47..b3de8b1fde2f 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -95,7 +95,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
>  		       u32	flush_domains)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 cmd;
>  	int ret;
>  
> @@ -122,7 +122,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
>  		       u32	flush_domains)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 cmd;
>  	int ret;
>  
> @@ -215,8 +215,8 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	struct intel_ringbuffer *ring = req->ringbuf;
> +	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -251,9 +251,9 @@ static int
>  gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 flags = 0;
> -	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	/* Force SNB workarounds for PIPE_CONTROL flushes */
> @@ -303,7 +303,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -324,9 +324,9 @@ static int
>  gen7_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 flags = 0;
> -	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	/*
> @@ -387,7 +387,7 @@ static int
>  gen8_emit_pipe_control(struct drm_i915_gem_request *req,
>  		       u32 flags, u32 scratch_addr)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -712,15 +712,15 @@ err:
>  
>  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
> -	int ret, i;
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	struct i915_workarounds *w = &dev_priv->workarounds;
> +	int ret, i;
>  
>  	if (WARN_ON_ONCE(w->count == 0))
>  		return 0;
>  
> -	ring->gpu_caches_dirty = true;
> +	req->ring->gpu_caches_dirty = true;
>  	ret = intel_ring_flush_all_caches(req);
>  	if (ret)
>  		return ret;
> @@ -738,7 +738,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  
>  	intel_ring_advance(ring);
>  
> -	ring->gpu_caches_dirty = true;
> +	req->ring->gpu_caches_dirty = true;
>  	ret = intel_ring_flush_all_caches(req);
>  	if (ret)
>  		return ret;
> @@ -1184,7 +1184,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  			   unsigned int num_dwords)
>  {
>  #define MBOX_UPDATE_DWORDS 8
> -	struct intel_engine_cs *signaller = signaller_req->ring;
> +	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	int i, ret, num_rings;
> @@ -1199,7 +1199,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  
>  	for_each_ring(waiter, dev_priv, i) {
>  		u32 seqno;
> -		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
> +		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
>  		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
>  			continue;
>  
> @@ -1224,7 +1224,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  			   unsigned int num_dwords)
>  {
>  #define MBOX_UPDATE_DWORDS 6
> -	struct intel_engine_cs *signaller = signaller_req->ring;
> +	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	int i, ret, num_rings;
> @@ -1239,7 +1239,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  
>  	for_each_ring(waiter, dev_priv, i) {
>  		u32 seqno;
> -		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
> +		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
>  		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
>  			continue;
>  
> @@ -1261,7 +1261,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  		       unsigned int num_dwords)
>  {
> -	struct intel_engine_cs *signaller = signaller_req->ring;
> +	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *useless;
>  	int i, ret, num_rings;
> @@ -1276,7 +1276,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  		return ret;
>  
>  	for_each_ring(useless, dev_priv, i) {
> -		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
> +		u32 mbox_reg = signaller_req->ring->semaphore.mbox.signal[i];
>  		if (mbox_reg != GEN6_NOSYNC) {
>  			u32 seqno = i915_gem_request_get_seqno(signaller_req);
>  			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
> @@ -1303,11 +1303,11 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  static int
>  gen6_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
> -	if (ring->semaphore.signal)
> -		ret = ring->semaphore.signal(req, 4);
> +	if (req->ring->semaphore.signal)
> +		ret = req->ring->semaphore.signal(req, 4);
>  	else
>  		ret = intel_ring_begin(req, 4);
>  
> @@ -1318,15 +1318,14 @@ gen6_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	__intel_ring_advance(ring);
> +	__intel_ring_advance(req->ring);
>  
>  	return 0;
>  }
>  
> -static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
> +static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
>  					      u32 seqno)
>  {
> -	struct drm_i915_private *dev_priv = dev->dev_private;
>  	return dev_priv->last_seqno < seqno;
>  }
>  
> @@ -1343,7 +1342,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       struct intel_engine_cs *signaller,
>  	       u32 seqno)
>  {
> -	struct intel_engine_cs *waiter = waiter_req->ring;
> +	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
>  	struct drm_i915_private *dev_priv = waiter_req->i915;
>  	int ret;
>  
> @@ -1357,9 +1356,11 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  				MI_SEMAPHORE_SAD_GTE_SDD);
>  	intel_ring_emit(waiter, seqno);
>  	intel_ring_emit(waiter,
> -			lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
> +			lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
> +						       signaller->id)));
>  	intel_ring_emit(waiter,
> -			upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
> +			upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
> +						       signaller->id)));
>  	intel_ring_advance(waiter);
>  	return 0;
>  }
> @@ -1369,11 +1370,11 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       struct intel_engine_cs *signaller,
>  	       u32 seqno)
>  {
> -	struct intel_engine_cs *waiter = waiter_req->ring;
> +	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
>  	u32 dw1 = MI_SEMAPHORE_MBOX |
>  		  MI_SEMAPHORE_COMPARE |
>  		  MI_SEMAPHORE_REGISTER;
> -	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
> +	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->ring->id];
>  	int ret;
>  
>  	/* Throughout all of the GEM code, seqno passed implies our current
> @@ -1389,7 +1390,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
>  		return ret;
>  
>  	/* If seqno wrap happened, omit the wait with no-ops */
> -	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
> +	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
>  		intel_ring_emit(waiter, dw1 | wait_mbox);
>  		intel_ring_emit(waiter, seqno);
>  		intel_ring_emit(waiter, 0);
> @@ -1417,8 +1418,8 @@ do {									\
>  static int
>  pc_render_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	struct intel_ringbuffer *ring = req->ringbuf;
> +	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
> @@ -1436,7 +1437,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
>  			PIPE_CONTROL_WRITE_FLUSH |
>  			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
> -	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
> +	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, 0);
>  	PIPE_CONTROL_FLUSH(ring, scratch_addr);
> @@ -1455,10 +1456,10 @@ pc_render_add_request(struct drm_i915_gem_request *req)
>  			PIPE_CONTROL_WRITE_FLUSH |
>  			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
>  			PIPE_CONTROL_NOTIFY);
> -	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
> +	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, 0);
> -	__intel_ring_advance(ring);
> +	__intel_ring_advance(req->ring);
>  
>  	return 0;
>  }
> @@ -1611,7 +1612,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  	       u32     invalidate_domains,
>  	       u32     flush_domains)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1627,7 +1628,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  i9xx_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -1638,7 +1639,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	__intel_ring_advance(ring);
> +	__intel_ring_advance(req->ring);
>  
>  	return 0;
>  }
> @@ -1772,7 +1773,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 length,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1799,8 +1800,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 len,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	u32 cs_offset = ring->scratch.gtt_offset;
> +	struct intel_ringbuffer *ring = req->ringbuf;
> +	u32 cs_offset = req->ring->scratch.gtt_offset;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -1862,7 +1863,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 len,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2343,8 +2344,8 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
>  /* Align the ring tail to a cacheline boundary */
>  int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
> +	struct intel_ringbuffer *ring = req->ringbuf;
> +	int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
>  	int ret;
>  
>  	if (num_dwords == 0)
> @@ -2415,7 +2416,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
>  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>  			       u32 invalidate, u32 flush)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	uint32_t cmd;
>  	int ret;
>  
> @@ -2424,7 +2425,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>  		return ret;
>  
>  	cmd = MI_FLUSH_DW;
> -	if (INTEL_INFO(ring->dev)->gen >= 8)
> +	if (INTEL_INFO(req->i915)->gen >= 8)
>  		cmd += 1;
>  
>  	/* We always require a command barrier so that subsequent
> @@ -2445,7 +2446,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>  
>  	intel_ring_emit(ring, cmd);
>  	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
> -	if (INTEL_INFO(ring->dev)->gen >= 8) {
> +	if (INTEL_INFO(req->i915)->gen >= 8) {
>  		intel_ring_emit(ring, 0); /* upper addr */
>  		intel_ring_emit(ring, 0); /* value */
>  	} else  {
> @@ -2461,7 +2462,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      u64 offset, u32 len,
>  			      unsigned dispatch_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	bool ppgtt = USES_PPGTT(req->i915) &&
>  			!(dispatch_flags & I915_DISPATCH_SECURE);
>  	int ret;
> @@ -2487,7 +2488,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			     u64 offset, u32 len,
>  			     unsigned dispatch_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2512,7 +2513,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      u64 offset, u32 len,
>  			      unsigned dispatch_flags)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2535,7 +2536,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  static int gen6_ring_flush(struct drm_i915_gem_request *req,
>  			   u32 invalidate, u32 flush)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_ringbuffer *ring = req->ringbuf;
>  	uint32_t cmd;
>  	int ret;
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 10f80df5f121..212b402818f9 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -427,25 +427,16 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
>  
>  int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
>  int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
> -static inline void intel_ringbuffer_emit(struct intel_ringbuffer *rb,
> -					 u32 data)
> +static inline void intel_ring_emit(struct intel_ringbuffer *rb,
> +				   u32 data)
>  {
>  	*(uint32_t *)(rb->virtual_start + rb->tail) = data;
>  	rb->tail += 4;
>  }
> -static inline void intel_ringbuffer_advance(struct intel_ringbuffer *rb)
> +static inline void intel_ring_advance(struct intel_ringbuffer *rb)
>  {
>  	rb->tail &= rb->size - 1;
>  }
> -static inline void intel_ring_emit(struct intel_engine_cs *ring,
> -				   u32 data)
> -{
> -	intel_ringbuffer_emit(ring->buffer, data);
> -}
> -static inline void intel_ring_advance(struct intel_engine_cs *ring)
> -{
> -	intel_ringbuffer_advance(ring->buffer);
> -}
>  int __intel_ring_space(int head, int tail, int size);
>  void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
>  int intel_ring_space(struct intel_ringbuffer *ringbuf);

At the end of this crusade we should add some pretty kerneldoc for our
ringbuffer interface.

Anyway, with some notes about your dev_priv/ringbuffer/whatver crusade
added to the commit message, just to align and clarify the new naming
convention:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Chris Wilson Nov. 24, 2015, 2:52 p.m. UTC | #2
On Tue, Nov 24, 2015 at 03:33:12PM +0100, Daniel Vetter wrote:
> On Fri, Nov 20, 2015 at 12:43:43PM +0000, Chris Wilson wrote:
> > @@ -547,7 +547,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
> >  
> >  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
> >  			for_each_ring(signaller, req->i915, i) {
> > -				if (signaller == ring)
> > +				if (signaller == req->ring)
> >  					continue;
> 
> Tsk, unrelated hunks above. And maybe a cocci to s/req->ring/req->engine/.
> At least mention in the commit message that your on a crusade against
> superflous ring/dev/whatever pointers and arguments while in the area.

It was hard to tell where one patch began and another ended.

> > @@ -1275,9 +1269,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> >  	exec_start = params->batch_obj_vm_offset +
> >  		     params->args_batch_start_offset;
> >  
> > -	ret = ring->dispatch_execbuffer(params->request,
> > -					exec_start, exec_len,
> > -					params->dispatch_flags);
> > +	ret = params->ring->dispatch_execbuffer(params->request,
> > +						exec_start, exec_len,
> > +						params->dispatch_flags);
> 
> I guess we should just shovel all the stuff in params into request
> eventually ...

Have you been reading ahead? The only argument for these exact
parameters to be recorded would be for hangcheck. But we do get to
cleanup execbuf dispatch eventually as well.

> > diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> > index 444542696a2c..5e9c7c15a84b 100644
> > --- a/drivers/gpu/drm/i915/intel_overlay.c
> > +++ b/drivers/gpu/drm/i915/intel_overlay.c
> > @@ -433,9 +433,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
> >  			return ret;
> >  		}
> >  
> > -		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> > -		intel_ring_emit(ring, MI_NOOP);
> > -		intel_ring_advance(ring);
> > +		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> > +		intel_ring_emit(req->ringbuf, MI_NOOP);
> > +		intel_ring_advance(req->ringbuf);
> >  
> >  		ret = intel_overlay_do_wait_request(overlay, req,
> >  						    intel_overlay_release_old_vid_tail);
> 
> Any reason for not doing your usual transform to intel_ringbuffer for the
> local ring here in the overlay code? Or does that happen when
> intel_ring_begin falls?

Later patches looked a bit neater with not adding a temporary
intel_engine_cs *ring; intel_ringbuf *ringbuf. That was my thinking at
least.

> At the end of this crusade we should add some pretty kerneldoc for our
> ringbuffer interface.

request_create
ring_begin{ ring_emit }ring_advance
request_commit (add_request)

But one may ask how do we go from request to ring. Wouldn't it be neat
to have that explicit in the interface:

req = request_create()
..
ring = ring_begin(req);
ring_emit(ring)...
ring_advance(ring)
...
request_commit(req);

Deja vu.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c3d11d020f0..c60105e36263 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4642,7 +4642,7 @@  err:
 
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = req->i915;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 82a9f7197d32..c3adc121aab4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -479,7 +479,7 @@  i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -494,7 +494,7 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(req->i915)) {
-		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
@@ -522,7 +522,7 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_ring(signaller, req->i915, i) {
-				if (signaller == ring)
+				if (signaller == req->ring)
 					continue;
 
 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
@@ -547,7 +547,7 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_ring(signaller, req->i915, i) {
-				if (signaller == ring)
+				if (signaller == req->ring)
 					continue;
 
 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e8164b644e0e..5f1b9b7df051 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1113,14 +1113,12 @@  i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 }
 
 static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+	if (!IS_GEN7(req->i915) || req->ring->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -1198,9 +1196,8 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas)
 {
-	struct drm_device *dev = params->dev;
-	struct intel_engine_cs *ring = params->ring;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ring = params->request->ringbuf;
+	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
@@ -1214,34 +1211,31 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
-	     "%s didn't clear reload\n", ring->name);
-
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	instp_mask = I915_EXEC_CONSTANTS_MASK;
 	switch (instp_mode) {
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && params->ring->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
 
 		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (INTEL_INFO(dev)->gen < 4) {
+			if (INTEL_INFO(dev_priv)->gen < 4) {
 				DRM_DEBUG("no rel constants on pre-gen4\n");
 				return -EINVAL;
 			}
 
-			if (INTEL_INFO(dev)->gen > 5 &&
+			if (INTEL_INFO(dev_priv)->gen > 5 &&
 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
 				return -EINVAL;
 			}
 
 			/* The HW changed the meaning on this bit on gen6 */
-			if (INTEL_INFO(dev)->gen >= 6)
+			if (INTEL_INFO(dev_priv)->gen >= 6)
 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
 		}
 		break;
@@ -1250,7 +1244,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		return -EINVAL;
 	}
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (params->ring->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
@@ -1266,7 +1260,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, params->request);
+		ret = i915_reset_gen7_sol_offsets(params->request);
 		if (ret)
 			return ret;
 	}
@@ -1275,9 +1269,9 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     params->args_batch_start_offset;
 
-	ret = ring->dispatch_execbuffer(params->request,
-					exec_start, exec_len,
-					params->dispatch_flags);
+	ret = params->ring->dispatch_execbuffer(params->request,
+						exec_start, exec_len,
+						params->dispatch_flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7820e8983136..4608884adfc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -651,7 +651,7 @@  static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -661,10 +661,10 @@  static int gen8_write_pdp(struct drm_i915_gem_request *req,
 		return ret;
 
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
+	intel_ring_emit(ring, GEN8_RING_PDP_UDW(req->ring, entry));
 	intel_ring_emit(ring, upper_32_bits(addr));
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
+	intel_ring_emit(ring, GEN8_RING_PDP_LDW(req->ring, entry));
 	intel_ring_emit(ring, lower_32_bits(addr));
 	intel_ring_advance(ring);
 
@@ -1588,11 +1588,11 @@  static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1601,9 +1601,9 @@  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 		return ret;
 
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
 	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
 	intel_ring_emit(ring, get_pd_offset(ppgtt));
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
@@ -1625,11 +1625,11 @@  static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1638,16 +1638,16 @@  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 		return ret;
 
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
 	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
 	intel_ring_emit(ring, get_pd_offset(ppgtt));
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
-	if (ring->id != RCS) {
-		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (req->ring->id != RCS) {
+		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6bd204980b70..f6dae2cfd9c9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10824,7 +10824,7 @@  static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -10859,7 +10859,7 @@  static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -10891,8 +10891,8 @@  static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
@@ -10930,8 +10930,8 @@  static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
@@ -10966,7 +10966,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -10987,14 +10987,14 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 	}
 
 	len = 4;
-	if (ring->id == RCS) {
+	if (req->ring->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
 		 * 48bits addresses, and we need a NOOP for the batch size to
 		 * stay even.
 		 */
-		if (IS_GEN8(dev))
+		if (IS_GEN8(req->i915))
 			len += 2;
 	}
 
@@ -11025,21 +11025,21 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (ring->id == RCS) {
+	if (req->ring->id == RCS) {
 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 		intel_ring_emit(ring, DERRMR);
 		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
 					DERRMR_PIPEB_PRI_FLIP_DONE |
 					DERRMR_PIPEC_PRI_FLIP_DONE));
-		if (IS_GEN8(dev))
+		if (IS_GEN8(req->i915))
 			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
 			intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		intel_ring_emit(ring, DERRMR);
-		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
-		if (IS_GEN8(dev)) {
+		intel_ring_emit(ring, req->ring->scratch.gtt_offset + 256);
+		if (IS_GEN8(req->i915)) {
 			intel_ring_emit(ring, 0);
 			intel_ring_emit(ring, MI_NOOP);
 		}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index cfa77a59b352..0db23c474045 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -700,11 +700,9 @@  static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 static void
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = request->ring;
-
-	intel_logical_ring_advance(request->ringbuf);
+	intel_ring_advance(request->ringbuf);
 
-	if (intel_ring_stopped(ring))
+	if (intel_ring_stopped(request->ring))
 		return;
 
 	execlists_context_queue(request);
@@ -887,11 +885,11 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 		if (ret)
 			return ret;
 
-		intel_logical_ring_emit(ringbuf, MI_NOOP);
-		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_logical_ring_emit(ringbuf, INSTPM);
-		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_logical_ring_advance(ringbuf);
+		intel_ring_emit(ringbuf, MI_NOOP);
+		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ringbuf, INSTPM);
+		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ringbuf);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1041,14 +1039,14 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_logical_ring_emit(ringbuf, w->reg[i].addr);
-		intel_logical_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit(ringbuf, w->reg[i].addr);
+		intel_ring_emit(ringbuf, w->reg[i].value);
 	}
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, MI_NOOP);
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ringbuf);
 
 	ring->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
@@ -1478,18 +1476,18 @@  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
-		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
-		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -1523,14 +1521,14 @@  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
-				(ppgtt<<8) |
-				(dispatch_flags & I915_DISPATCH_RS ?
-				 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+			(ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
+	intel_ring_emit(ringbuf, lower_32_bits(offset));
+	intel_ring_emit(ringbuf, upper_32_bits(offset));
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -1598,13 +1596,13 @@  static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_logical_ring_emit(ringbuf, cmd);
-	intel_logical_ring_emit(ringbuf,
-				I915_GEM_HWS_SCRATCH_ADDR |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-	intel_logical_ring_emit(ringbuf, 0); /* value */
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, cmd);
+	intel_ring_emit(ringbuf,
+			I915_GEM_HWS_SCRATCH_ADDR |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ringbuf, 0); /* upper addr */
+	intel_ring_emit(ringbuf, 0); /* value */
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -1651,21 +1649,21 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		return ret;
 
 	if (vf_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ringbuf, 0);
+		intel_ring_emit(ringbuf, 0);
+		intel_ring_emit(ringbuf, 0);
+		intel_ring_emit(ringbuf, 0);
+		intel_ring_emit(ringbuf, 0);
 	}
 
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf, flags);
-	intel_logical_ring_emit(ringbuf, scratch_addr);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ringbuf, flags);
+	intel_ring_emit(ringbuf, scratch_addr);
+	intel_ring_emit(ringbuf, 0);
+	intel_ring_emit(ringbuf, 0);
+	intel_ring_emit(ringbuf, 0);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -1699,23 +1697,23 @@  static int gen8_emit_request(struct drm_i915_gem_request *request)
 	cmd = MI_STORE_DWORD_IMM_GEN4;
 	cmd |= MI_GLOBAL_GTT;
 
-	intel_logical_ring_emit(ringbuf, cmd);
-	intel_logical_ring_emit(ringbuf,
-				(ring->status_page.gfx_addr +
-				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, cmd);
+	intel_ring_emit(ringbuf,
+			(ring->status_page.gfx_addr +
+			 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+	intel_ring_emit(ringbuf, 0);
+	intel_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	intel_ring_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_ring_emit(ringbuf, MI_NOOP);
 	intel_logical_ring_advance_and_submit(request);
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
 	 * lite restore of a context with HEAD==TAIL.
 	 */
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 8b56fb934763..861668919e5a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -45,27 +45,6 @@  int intel_logical_rings_init(struct drm_device *dev);
 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
-	intel_ringbuffer_advance(ringbuf);
-}
-
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-					   u32 data)
-{
-	intel_ringbuffer_emit(ringbuf, data);
-}
 
 /* Logical Ring Contexts */
 void intel_lr_context_free(struct intel_context *ctx);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6d3c6c0a5c62..399a131a94b6 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -187,13 +187,11 @@  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 		return ret;
 	}
 
-	intel_logical_ring_emit(ringbuf,
-				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
-		intel_logical_ring_emit(ringbuf,
-					table->table[index].control_value);
+		intel_ring_emit(ringbuf, reg_base + index * 4);
+		intel_ring_emit(ringbuf, table->table[index].control_value);
 	}
 
 	/*
@@ -205,12 +203,12 @@  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
-		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+		intel_ring_emit(ringbuf, reg_base + index * 4);
+		intel_ring_emit(ringbuf, table->table[0].control_value);
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -246,15 +244,15 @@  static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 		return ret;
 	}
 
-	intel_logical_ring_emit(ringbuf,
+	intel_ring_emit(ringbuf,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
 		value = (table->table[count].l3cc_value & 0xffff) |
 			((table->table[count + 1].l3cc_value & 0xffff) << 16);
 
-		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
-		intel_logical_ring_emit(ringbuf, value);
+		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_ring_emit(ringbuf, value);
 	}
 
 	if (table->size & 0x01) {
@@ -270,14 +268,14 @@  static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
-		intel_logical_ring_emit(ringbuf, value);
+		intel_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_ring_emit(ringbuf, value);
 
 		value = filler;
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 444542696a2c..5e9c7c15a84b 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -252,11 +252,11 @@  static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(req->ringbuf, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(req->ringbuf, MI_NOOP);
+	intel_ring_advance(req->ringbuf);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -293,9 +293,9 @@  static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(req->ringbuf, flip_addr);
+	intel_ring_advance(req->ringbuf);
 
 	WARN_ON(overlay->last_flip_req);
 	i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -360,22 +360,22 @@  static int intel_overlay_off(struct intel_overlay *overlay)
 	}
 
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(req->ringbuf, flip_addr);
+	intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(req->ringbuf, MI_NOOP);
+		intel_ring_emit(req->ringbuf, MI_NOOP);
+		intel_ring_emit(req->ringbuf, MI_NOOP);
 	} else {
-		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(ring, flip_addr);
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(req->ringbuf, flip_addr);
+		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req->ringbuf);
 
 	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
@@ -433,9 +433,9 @@  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(req->ringbuf, MI_NOOP);
+		intel_ring_advance(req->ringbuf);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9fbe730aae47..b3de8b1fde2f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -95,7 +95,7 @@  gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 cmd;
 	int ret;
 
@@ -122,7 +122,7 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 cmd;
 	int ret;
 
@@ -215,8 +215,8 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -251,9 +251,9 @@  static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -303,7 +303,7 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -324,9 +324,9 @@  static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -387,7 +387,7 @@  static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -712,15 +712,15 @@  err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
+	int ret, i;
 
 	if (WARN_ON_ONCE(w->count == 0))
 		return 0;
 
-	ring->gpu_caches_dirty = true;
+	req->ring->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -738,7 +738,7 @@  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	ring->gpu_caches_dirty = true;
+	req->ring->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1184,7 +1184,7 @@  static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
@@ -1199,7 +1199,7 @@  static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 
 	for_each_ring(waiter, dev_priv, i) {
 		u32 seqno;
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
@@ -1224,7 +1224,7 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
@@ -1239,7 +1239,7 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 
 	for_each_ring(waiter, dev_priv, i) {
 		u32 seqno;
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
@@ -1261,7 +1261,7 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	int i, ret, num_rings;
@@ -1276,7 +1276,7 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_ring(useless, dev_priv, i) {
-		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
+		u32 mbox_reg = signaller_req->ring->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
 			u32 seqno = i915_gem_request_get_seqno(signaller_req);
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1303,11 +1303,11 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
-	if (ring->semaphore.signal)
-		ret = ring->semaphore.signal(req, 4);
+	if (req->ring->semaphore.signal)
+		ret = req->ring->semaphore.signal(req, 4);
 	else
 		ret = intel_ring_begin(req, 4);
 
@@ -1318,15 +1318,14 @@  gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(req->ring);
 
 	return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
 					      u32 seqno)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	return dev_priv->last_seqno < seqno;
 }
 
@@ -1343,7 +1342,7 @@  gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->ring;
+	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
 	int ret;
 
@@ -1357,9 +1356,11 @@  gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 				MI_SEMAPHORE_SAD_GTE_SDD);
 	intel_ring_emit(waiter, seqno);
 	intel_ring_emit(waiter,
-			lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+			lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+						       signaller->id)));
 	intel_ring_emit(waiter,
-			upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+			upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+						       signaller->id)));
 	intel_ring_advance(waiter);
 	return 0;
 }
@@ -1369,11 +1370,11 @@  gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->ring;
+	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->ring->id];
 	int ret;
 
 	/* Throughout all of the GEM code, seqno passed implies our current
@@ -1389,7 +1390,7 @@  gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 		return ret;
 
 	/* If seqno wrap happened, omit the wait with no-ops */
-	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
 		intel_ring_emit(waiter, dw1 | wait_mbox);
 		intel_ring_emit(waiter, seqno);
 		intel_ring_emit(waiter, 0);
@@ -1417,8 +1418,8 @@  do {									\
 static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1436,7 +1437,7 @@  pc_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -1455,10 +1456,10 @@  pc_render_add_request(struct drm_i915_gem_request *req)
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
-	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(req->ring);
 
 	return 0;
 }
@@ -1611,7 +1612,7 @@  bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1627,7 +1628,7 @@  bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -1638,7 +1639,7 @@  i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(req->ring);
 
 	return 0;
 }
@@ -1772,7 +1773,7 @@  i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1799,8 +1800,8 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 cs_offset = ring->scratch.gtt_offset;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 cs_offset = req->ring->scratch.gtt_offset;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -1862,7 +1863,7 @@  i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2343,8 +2344,8 @@  int intel_ring_begin(struct drm_i915_gem_request *req,
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_ringbuffer *ring = req->ringbuf;
+	int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2415,7 +2416,7 @@  static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	uint32_t cmd;
 	int ret;
 
@@ -2424,7 +2425,7 @@  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(ring->dev)->gen >= 8)
+	if (INTEL_INFO(req->i915)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2445,7 +2446,7 @@  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(ring->dev)->gen >= 8) {
+	if (INTEL_INFO(req->i915)->gen >= 8) {
 		intel_ring_emit(ring, 0); /* upper addr */
 		intel_ring_emit(ring, 0); /* value */
 	} else  {
@@ -2461,7 +2462,7 @@  gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
@@ -2487,7 +2488,7 @@  hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2512,7 +2513,7 @@  gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2535,7 +2536,7 @@  gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	uint32_t cmd;
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 10f80df5f121..212b402818f9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -427,25 +427,16 @@  int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ringbuffer_emit(struct intel_ringbuffer *rb,
-					 u32 data)
+static inline void intel_ring_emit(struct intel_ringbuffer *rb,
+				   u32 data)
 {
 	*(uint32_t *)(rb->virtual_start + rb->tail) = data;
 	rb->tail += 4;
 }
-static inline void intel_ringbuffer_advance(struct intel_ringbuffer *rb)
+static inline void intel_ring_advance(struct intel_ringbuffer *rb)
 {
 	rb->tail &= rb->size - 1;
 }
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
-				   u32 data)
-{
-	intel_ringbuffer_emit(ring->buffer, data);
-}
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
-{
-	intel_ringbuffer_advance(ring->buffer);
-}
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);