diff mbox

[06/12] drm/i915: Rename request->ring to request->engine

Message ID 1448023432-10726-6-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Nov. 20, 2015, 12:43 p.m. UTC
In order to disambiguate between the pointer to the intel_engine_cs
(called ring) and the intel_ringbuffer (called ringbuf), rename
s/ring/engine/.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c          |  11 ++-
 drivers/gpu/drm/i915/i915_drv.h              |  12 +--
 drivers/gpu/drm/i915/i915_gem.c              |  76 ++++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c      |  63 ++++++-------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   8 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  49 +++++-----
 drivers/gpu/drm/i915/i915_gem_render_state.c |  18 ++--
 drivers/gpu/drm/i915/i915_gpu_error.c        |   3 +-
 drivers/gpu/drm/i915/i915_trace.h            |  34 +++----
 drivers/gpu/drm/i915/intel_display.c         |  12 +--
 drivers/gpu/drm/i915/intel_lrc.c             | 128 +++++++++++++--------------
 drivers/gpu/drm/i915/intel_mocs.c            |  10 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c      |  66 +++++++-------
 13 files changed, 239 insertions(+), 251 deletions(-)

Comments

Daniel Vetter Nov. 24, 2015, 2:48 p.m. UTC | #1
On Fri, Nov 20, 2015 at 12:43:46PM +0000, Chris Wilson wrote:
> In order to disambiguate between the pointer to the intel_engine_cs
> (called ring) and the intel_ringbuffer (called ringbuf), rename
> s/ring/engine/.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Ah, here we go. Slight amend for the commit message:

"Where known that req is non-NULL replace get_ring() with req->engine too
while at it. And also use dev_priv more while touching code."

With that Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/i915/i915_debugfs.c          |  11 ++-
>  drivers/gpu/drm/i915/i915_drv.h              |  12 +--
>  drivers/gpu/drm/i915/i915_gem.c              |  76 ++++++++--------
>  drivers/gpu/drm/i915/i915_gem_context.c      |  63 ++++++-------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   8 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c          |  49 +++++-----
>  drivers/gpu/drm/i915/i915_gem_render_state.c |  18 ++--
>  drivers/gpu/drm/i915/i915_gpu_error.c        |   3 +-
>  drivers/gpu/drm/i915/i915_trace.h            |  34 +++----
>  drivers/gpu/drm/i915/intel_display.c         |  12 +--
>  drivers/gpu/drm/i915/intel_lrc.c             | 128 +++++++++++++--------------
>  drivers/gpu/drm/i915/intel_mocs.c            |  10 +--
>  drivers/gpu/drm/i915/intel_ringbuffer.c      |  66 +++++++-------
>  13 files changed, 239 insertions(+), 251 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 359436162f3d..56375c36b381 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -190,8 +190,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>  		seq_printf(m, " (%s mappable)", s);
>  	}
>  	if (obj->last_write_req != NULL)
> -		seq_printf(m, " (%s)",
> -			   i915_gem_request_get_ring(obj->last_write_req)->name);
> +		seq_printf(m, " (%s)", obj->last_write_req->engine->name);
>  	if (obj->frontbuffer_bits)
>  		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
>  }
> @@ -594,14 +593,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
>  					   pipe, plane);
>  			}
>  			if (work->flip_queued_req) {
> -				struct intel_engine_cs *ring =
> -					i915_gem_request_get_ring(work->flip_queued_req);
> +				struct intel_engine_cs *engine =
> +					work->flip_queued_req->engine;
>  
>  				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
> -					   ring->name,
> +					   engine->name,
>  					   i915_gem_request_get_seqno(work->flip_queued_req),
>  					   dev_priv->next_seqno,
> -					   ring->get_seqno(ring, true),
> +					   engine->get_seqno(engine, true),
>  					   i915_gem_request_completed(work->flip_queued_req, true));
>  			} else
>  				seq_printf(m, "Flip not associated with any ring\n");
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index fadf2ceb1f72..9ce8b3fcb3a0 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2156,7 +2156,7 @@ struct drm_i915_gem_request {
>  
>  	/** On Which ring this request was generated */
>  	struct drm_i915_private *i915;
> -	struct intel_engine_cs *ring;
> +	struct intel_engine_cs *engine;
>  
>  	/** GEM sequence number associated with this request. */
>  	uint32_t seqno;
> @@ -2240,9 +2240,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
>  }
>  
>  static inline struct intel_engine_cs *
> -i915_gem_request_get_ring(struct drm_i915_gem_request *req)
> +i915_gem_request_get_engine(struct drm_i915_gem_request *req)
>  {
> -	return req ? req->ring : NULL;
> +	return req ? req->engine : NULL;
>  }
>  
>  static inline struct drm_i915_gem_request *
> @@ -2256,7 +2256,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
>  static inline void
>  i915_gem_request_unreference(struct drm_i915_gem_request *req)
>  {
> -	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
> +	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
>  	kref_put(&req->ref, i915_gem_request_free);
>  }
>  
> @@ -2268,7 +2268,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
>  	if (!req)
>  		return;
>  
> -	dev = req->ring->dev;
> +	dev = req->i915->dev;
>  	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
>  		mutex_unlock(&dev->struct_mutex);
>  }
> @@ -2877,7 +2877,7 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
>  
>  	BUG_ON(req == NULL);
>  
> -	seqno = req->ring->get_seqno(req->ring, lazy_coherency);
> +	seqno = req->engine->get_seqno(req->engine, lazy_coherency);
>  
>  	return i915_seqno_passed(seqno, req->seqno);
>  }
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 030fc9d14385..5a1b51a27fe3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1174,7 +1174,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
>  	u64 timeout;
>  	unsigned cpu;
>  
> -	if (i915_gem_request_get_ring(req)->irq_refcount)
> +	if (req->engine->irq_refcount)
>  		return -EBUSY;
>  
>  	timeout = local_clock_us(&cpu) + 2;
> @@ -1220,10 +1220,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
>  			s64 *timeout,
>  			struct intel_rps_client *rps)
>  {
> -	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
> +	struct intel_engine_cs *engine = req->engine;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	const bool irq_test_in_progress =
> -		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
> +		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine);
>  	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
>  	DEFINE_WAIT(wait);
>  	unsigned long timeout_expire;
> @@ -1252,7 +1252,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
>  	if (ret == 0)
>  		goto out;
>  
> -	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
> +	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
>  		ret = -ENODEV;
>  		goto out;
>  	}
> @@ -1260,7 +1260,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
>  	for (;;) {
>  		struct timer_list timer;
>  
> -		prepare_to_wait(&ring->irq_queue, &wait, state);
> +		prepare_to_wait(&engine->irq_queue, &wait, state);
>  
>  		/* We need to check whether any gpu reset happened in between
>  		 * the caller grabbing the seqno and now ... */
> @@ -1289,11 +1289,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
>  		}
>  
>  		timer.function = NULL;
> -		if (timeout || missed_irq(dev_priv, ring)) {
> +		if (timeout || missed_irq(dev_priv, engine)) {
>  			unsigned long expire;
>  
>  			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
> -			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
> +			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
>  			mod_timer(&timer, expire);
>  		}
>  
> @@ -1305,9 +1305,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
>  		}
>  	}
>  	if (!irq_test_in_progress)
> -		ring->irq_put(ring);
> +		engine->irq_put(engine);
>  
> -	finish_wait(&ring->irq_queue, &wait);
> +	finish_wait(&engine->irq_queue, &wait);
>  
>  out:
>  	now = ktime_get_raw_ns();
> @@ -1397,7 +1397,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
>  static void
>  __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *engine = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	struct drm_i915_gem_request *tmp;
>  
>  	lockdep_assert_held(&engine->dev->struct_mutex);
> @@ -1466,7 +1466,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
>  			if (ret)
>  				return ret;
>  
> -			i = obj->last_write_req->ring->id;
> +			i = obj->last_write_req->engine->id;
>  			if (obj->last_read_req[i] == obj->last_write_req)
>  				i915_gem_object_retire__read(obj, i);
>  			else
> @@ -1493,7 +1493,7 @@ static void
>  i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
>  			       struct drm_i915_gem_request *req)
>  {
> -	int ring = req->ring->id;
> +	int ring = req->engine->id;
>  
>  	if (obj->last_read_req[ring] == req)
>  		i915_gem_object_retire__read(obj, ring);
> @@ -2415,17 +2415,15 @@ void i915_vma_move_to_active(struct i915_vma *vma,
>  			     struct drm_i915_gem_request *req)
>  {
>  	struct drm_i915_gem_object *obj = vma->obj;
> -	struct intel_engine_cs *ring;
> -
> -	ring = i915_gem_request_get_ring(req);
> +	struct intel_engine_cs *engine = req->engine;;
>  
>  	/* Add a reference if we're newly entering the active list. */
>  	if (obj->active == 0)
>  		drm_gem_object_reference(&obj->base);
> -	obj->active |= intel_ring_flag(ring);
> +	obj->active |= intel_ring_flag(engine);
>  
> -	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
> -	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
> +	list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
> +	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
>  
>  	list_move_tail(&vma->mm_list, &vma->vm->active_list);
>  }
> @@ -2434,7 +2432,7 @@ static void
>  i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
>  {
>  	RQ_BUG_ON(obj->last_write_req == NULL);
> -	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
> +	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->engine)));
>  
>  	i915_gem_request_assign(&obj->last_write_req, NULL);
>  	intel_fb_obj_flush(obj, true, ORIGIN_CS);
> @@ -2451,7 +2449,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>  	list_del_init(&obj->ring_list[ring]);
>  	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
>  
> -	if (obj->last_write_req && obj->last_write_req->ring->id == ring)
> +	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
>  		i915_gem_object_retire__write(obj);
>  
>  	obj->active &= ~(1 << ring);
> @@ -2553,7 +2551,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  			struct drm_i915_gem_object *obj,
>  			bool flush_caches)
>  {
> -	struct intel_engine_cs *ring;
> +	struct intel_engine_cs *engine;
>  	struct drm_i915_private *dev_priv;
>  	struct intel_ringbuffer *ringbuf;
>  	u32 request_start;
> @@ -2562,7 +2560,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	if (WARN_ON(request == NULL))
>  		return;
>  
> -	ring = request->ring;
> +	engine = request->engine;
>  	dev_priv = request->i915;
>  	ringbuf = request->ringbuf;
>  
> @@ -2598,9 +2596,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	request->postfix = intel_ring_get_tail(ringbuf);
>  
>  	if (i915.enable_execlists)
> -		ret = ring->emit_request(request);
> +		ret = engine->emit_request(request);
>  	else {
> -		ret = ring->add_request(request);
> +		ret = engine->add_request(request);
>  
>  		request->tail = intel_ring_get_tail(ringbuf);
>  	}
> @@ -2618,12 +2616,12 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	request->batch_obj = obj;
>  
>  	request->emitted_jiffies = jiffies;
> -	ring->last_submitted_seqno = request->seqno;
> -	list_add_tail(&request->list, &ring->request_list);
> +	engine->last_submitted_seqno = request->seqno;
> +	list_add_tail(&request->list, &engine->request_list);
>  
>  	trace_i915_gem_request_add(request);
>  
> -	i915_queue_hangcheck(ring->dev);
> +	i915_queue_hangcheck(engine->dev);
>  
>  	queue_delayed_work(dev_priv->wq,
>  			   &dev_priv->mm.retire_work,
> @@ -2690,7 +2688,7 @@ void i915_gem_request_free(struct kref *req_ref)
>  
>  	if (ctx) {
>  		if (i915.enable_execlists) {
> -			if (ctx != req->ring->default_context)
> +			if (ctx != req->engine->default_context)
>  				intel_lr_context_unpin(req);
>  		}
>  
> @@ -2700,11 +2698,11 @@ void i915_gem_request_free(struct kref *req_ref)
>  	kmem_cache_free(req->i915->requests, req);
>  }
>  
> -int i915_gem_request_alloc(struct intel_engine_cs *ring,
> +int i915_gem_request_alloc(struct intel_engine_cs *engine,
>  			   struct intel_context *ctx,
>  			   struct drm_i915_gem_request **req_out)
>  {
> -	struct drm_i915_private *dev_priv = to_i915(ring->dev);
> +	struct drm_i915_private *dev_priv = to_i915(engine->dev);
>  	struct drm_i915_gem_request *req;
>  	int ret;
>  
> @@ -2717,13 +2715,13 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
>  	if (req == NULL)
>  		return -ENOMEM;
>  
> -	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
> +	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
>  	if (ret)
>  		goto err;
>  
>  	kref_init(&req->ref);
>  	req->i915 = dev_priv;
> -	req->ring = ring;
> +	req->engine = engine;
>  	req->ctx  = ctx;
>  	i915_gem_context_reference(req->ctx);
>  
> @@ -3137,7 +3135,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  	struct intel_engine_cs *from;
>  	int ret;
>  
> -	from = i915_gem_request_get_ring(from_req);
> +	from = from_req->engine;
>  	if (to == from)
>  		return 0;
>  
> @@ -4308,7 +4306,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	BUILD_BUG_ON(I915_NUM_RINGS > 16);
>  	args->busy = obj->active << 16;
>  	if (obj->last_write_req)
> -		args->busy |= obj->last_write_req->ring->id;
> +		args->busy |= obj->last_write_req->engine->id;
>  
>  unref:
>  	drm_gem_object_unreference(&obj->base);
> @@ -4641,7 +4639,6 @@ err:
>  
>  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
>  {
> -	struct intel_ringbuffer *ring = req->ringbuf;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
>  	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
> @@ -4660,12 +4657,11 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
>  	 * at initialization time.
>  	 */
>  	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
> -		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> -		intel_ring_emit(ring, reg_base + i);
> -		intel_ring_emit(ring, remap_info[i/4]);
> +		intel_ring_emit(req->ringbuf, MI_LOAD_REGISTER_IMM(1));
> +		intel_ring_emit(req->ringbuf, reg_base + i);
> +		intel_ring_emit(req->ringbuf, remap_info[i/4]);
>  	}
> -
> -	intel_ring_advance(ring);
> +	intel_ring_advance(req->ringbuf);
>  
>  	return ret;
>  }
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index c3adc121aab4..047c2f94bd22 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -410,14 +410,14 @@ void i915_gem_context_fini(struct drm_device *dev)
>  
>  int i915_gem_context_enable(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	int ret;
>  
>  	if (i915.enable_execlists) {
> -		if (ring->init_context == NULL)
> +		if (engine->init_context == NULL)
>  			return 0;
>  
> -		ret = ring->init_context(req);
> +		ret = engine->init_context(req);
>  	} else
>  		ret = i915_switch_context(req);
>  
> @@ -494,7 +494,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  	 * itlb_before_ctx_switch.
>  	 */
>  	if (IS_GEN6(req->i915)) {
> -		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
> +		ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
>  		if (ret)
>  			return ret;
>  	}
> @@ -522,7 +522,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  
>  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
>  			for_each_ring(signaller, req->i915, i) {
> -				if (signaller == req->ring)
> +				if (signaller == req->engine)
>  					continue;
>  
>  				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
> @@ -547,7 +547,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  
>  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
>  			for_each_ring(signaller, req->i915, i) {
> -				if (signaller == req->ring)
> +				if (signaller == req->engine)
>  					continue;
>  
>  				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
> @@ -618,19 +618,19 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
>  static int do_switch(struct drm_i915_gem_request *req)
>  {
>  	struct intel_context *to = req->ctx;
> -	struct intel_engine_cs *ring = req->ring;
> -	struct intel_context *from = ring->last_context;
> +	struct intel_engine_cs *engine = req->engine;
> +	struct intel_context *from = engine->last_context;
>  	u32 hw_flags = 0;
>  	bool uninitialized = false;
>  	int ret, i;
>  
> -	if (should_skip_switch(ring, from, to))
> +	if (should_skip_switch(engine, from, to))
>  		return 0;
>  
>  	/* Trying to pin first makes error handling easier. */
> -	if (ring->id == RCS) {
> +	if (engine->id == RCS) {
>  		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
> -					    get_context_alignment(ring->dev), 0);
> +					    get_context_alignment(engine->dev), 0);
>  		if (ret)
>  			return ret;
>  	}
> @@ -640,23 +640,23 @@ static int do_switch(struct drm_i915_gem_request *req)
>  	 * evict_everything - as a last ditch gtt defrag effort that also
>  	 * switches to the default context. Hence we need to reload from here.
>  	 */
> -	from = ring->last_context;
> +	from = engine->last_context;
>  
> -	if (needs_pd_load_pre(ring, to)) {
> +	if (needs_pd_load_pre(engine, to)) {
>  		/* Older GENs and non render rings still want the load first,
>  		 * "PP_DCLV followed by PP_DIR_BASE register through Load
>  		 * Register Immediate commands in Ring Buffer before submitting
>  		 * a context."*/
> -		trace_switch_mm(ring, to);
> +		trace_switch_mm(engine, to);
>  		ret = to->ppgtt->switch_mm(to->ppgtt, req);
>  		if (ret)
>  			goto unpin_out;
>  
>  		/* Doing a PD load always reloads the page dirs */
> -		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
> +		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
>  	}
>  
> -	if (ring->id != RCS) {
> +	if (engine->id != RCS) {
>  		if (from)
>  			i915_gem_context_unreference(from);
>  		goto done;
> @@ -681,14 +681,14 @@ static int do_switch(struct drm_i915_gem_request *req)
>  		 * space. This means we must enforce that a page table load
>  		 * occur when this occurs. */
>  	} else if (to->ppgtt &&
> -		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
> +		   (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
>  		hw_flags |= MI_FORCE_RESTORE;
> -		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
> +		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
>  	}
>  
>  	/* We should never emit switch_mm more than once */
> -	WARN_ON(needs_pd_load_pre(ring, to) &&
> -		needs_pd_load_post(ring, to, hw_flags));
> +	WARN_ON(needs_pd_load_pre(engine, to) &&
> +		needs_pd_load_post(engine, to, hw_flags));
>  
>  	ret = mi_set_context(req, hw_flags);
>  	if (ret)
> @@ -697,8 +697,8 @@ static int do_switch(struct drm_i915_gem_request *req)
>  	/* GEN8 does *not* require an explicit reload if the PDPs have been
>  	 * setup, and we do not wish to move them.
>  	 */
> -	if (needs_pd_load_post(ring, to, hw_flags)) {
> -		trace_switch_mm(ring, to);
> +	if (needs_pd_load_post(engine, to, hw_flags)) {
> +		trace_switch_mm(engine, to);
>  		ret = to->ppgtt->switch_mm(to->ppgtt, req);
>  		/* The hardware context switch is emitted, but we haven't
>  		 * actually changed the state - so it's probably safe to bail
> @@ -751,11 +751,11 @@ static int do_switch(struct drm_i915_gem_request *req)
>  
>  done:
>  	i915_gem_context_reference(to);
> -	ring->last_context = to;
> +	engine->last_context = to;
>  
>  	if (uninitialized) {
> -		if (ring->init_context) {
> -			ret = ring->init_context(req);
> +		if (engine->init_context) {
> +			ret = engine->init_context(req);
>  			if (ret)
>  				DRM_ERROR("ring init context: %d\n", ret);
>  		}
> @@ -764,7 +764,7 @@ done:
>  	return 0;
>  
>  unpin_out:
> -	if (ring->id == RCS)
> +	if (engine->id == RCS)
>  		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
>  	return ret;
>  }
> @@ -784,17 +784,18 @@ unpin_out:
>   */
>  int i915_switch_context(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
>  
>  	WARN_ON(i915.enable_execlists);
>  	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
>  
>  	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
> -		if (req->ctx != ring->last_context) {
> +		struct intel_engine_cs *engine = req->engine;
> +
> +		if (req->ctx != engine->last_context) {
>  			i915_gem_context_reference(req->ctx);
> -			if (ring->last_context)
> -				i915_gem_context_unreference(ring->last_context);
> -			ring->last_context = req->ctx;
> +			if (engine->last_context)
> +				i915_gem_context_unreference(engine->last_context);
> +			engine->last_context = req->ctx;
>  		}
>  		return 0;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 5f1b9b7df051..32c2d08bdd4c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -922,7 +922,7 @@ static int
>  i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
>  				struct list_head *vmas)
>  {
> -	const unsigned other_rings = ~intel_ring_flag(req->ring);
> +	const unsigned other_rings = ~intel_ring_flag(req->engine);
>  	struct i915_vma *vma;
>  	uint32_t flush_domains = 0;
>  	bool flush_chipset = false;
> @@ -932,7 +932,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
>  		struct drm_i915_gem_object *obj = vma->obj;
>  
>  		if (obj->active & other_rings) {
> -			ret = i915_gem_object_sync(obj, req->ring, &req);
> +			ret = i915_gem_object_sync(obj, req->engine, &req);
>  			if (ret)
>  				return ret;
>  		}
> @@ -944,7 +944,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
>  	}
>  
>  	if (flush_chipset)
> -		i915_gem_chipset_flush(req->ring->dev);
> +		i915_gem_chipset_flush(req->engine->dev);
>  
>  	if (flush_domains & I915_GEM_DOMAIN_GTT)
>  		wmb();
> @@ -1118,7 +1118,7 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
>  	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret, i;
>  
> -	if (!IS_GEN7(req->i915) || req->ring->id != RCS) {
> +	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
>  		DRM_DEBUG("sol reset is gen7/rcs only\n");
>  		return -EINVAL;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 4608884adfc8..c222456961fb 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -661,10 +661,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
>  		return ret;
>  
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> -	intel_ring_emit(ring, GEN8_RING_PDP_UDW(req->ring, entry));
> +	intel_ring_emit(ring, GEN8_RING_PDP_UDW(req->engine, entry));
>  	intel_ring_emit(ring, upper_32_bits(addr));
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> -	intel_ring_emit(ring, GEN8_RING_PDP_LDW(req->ring, entry));
> +	intel_ring_emit(ring, GEN8_RING_PDP_LDW(req->engine, entry));
>  	intel_ring_emit(ring, lower_32_bits(addr));
>  	intel_ring_advance(ring);
>  
> @@ -1592,7 +1592,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> -	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> +	ret = req->engine->flush(req,
> +				 I915_GEM_GPU_DOMAINS,
> +				 I915_GEM_GPU_DOMAINS);
>  	if (ret)
>  		return ret;
>  
> @@ -1601,9 +1603,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  		return ret;
>  
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
> -	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
> +	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->engine));
>  	intel_ring_emit(ring, PP_DIR_DCLV_2G);
> -	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
> +	intel_ring_emit(ring, RING_PP_DIR_BASE(req->engine));
>  	intel_ring_emit(ring, get_pd_offset(ppgtt));
>  	intel_ring_emit(ring, MI_NOOP);
>  	intel_ring_advance(ring);
> @@ -1614,11 +1616,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			  struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
> +	struct drm_i915_private *dev_priv = req->i915;
>  
> -	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
> -	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
> +	I915_WRITE(RING_PP_DIR_DCLV(req->engine), PP_DIR_DCLV_2G);
> +	I915_WRITE(RING_PP_DIR_BASE(req->engine), get_pd_offset(ppgtt));
>  	return 0;
>  }
>  
> @@ -1629,7 +1630,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> -	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> +	ret = req->engine->flush(req,
> +				 I915_GEM_GPU_DOMAINS,
> +				 I915_GEM_GPU_DOMAINS);
>  	if (ret)
>  		return ret;
>  
> @@ -1638,16 +1641,18 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  		return ret;
>  
>  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
> -	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
> +	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->engine));
>  	intel_ring_emit(ring, PP_DIR_DCLV_2G);
> -	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
> +	intel_ring_emit(ring, RING_PP_DIR_BASE(req->engine));
>  	intel_ring_emit(ring, get_pd_offset(ppgtt));
>  	intel_ring_emit(ring, MI_NOOP);
>  	intel_ring_advance(ring);
>  
>  	/* XXX: RCS is the only one to auto invalidate the TLBs? */
> -	if (req->ring->id != RCS) {
> -		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> +	if (req->engine->id != RCS) {
> +		ret = req->engine->flush(req,
> +					 I915_GEM_GPU_DOMAINS,
> +					 I915_GEM_GPU_DOMAINS);
>  		if (ret)
>  			return ret;
>  	}
> @@ -1658,15 +1663,12 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			  struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> -	struct drm_device *dev = ppgtt->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> -
> +	struct drm_i915_private *dev_priv = req->i915;
>  
> -	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
> -	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
> +	I915_WRITE(RING_PP_DIR_DCLV(req->engine), PP_DIR_DCLV_2G);
> +	I915_WRITE(RING_PP_DIR_BASE(req->engine), get_pd_offset(ppgtt));
>  
> -	POSTING_READ(RING_PP_DIR_DCLV(ring));
> +	POSTING_READ(RING_PP_DIR_DCLV(req->engine));
>  
>  	return 0;
>  }
> @@ -2101,8 +2103,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
>  
>  int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
>  {
> -	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
> -	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
> +	struct i915_hw_ppgtt *ppgtt = req->i915->mm.aliasing_ppgtt;
>  	int ret;
>  
>  	if (i915.enable_execlists)
> @@ -2115,7 +2116,7 @@ int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
>  	if (ret)
>  		return ret;
>  
> -	ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
> +	ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine);
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index 5026a6267a88..f11e8685b1af 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -198,25 +198,25 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  	struct render_state so;
>  	int ret;
>  
> -	ret = i915_gem_render_state_prepare(req->ring, &so);
> +	ret = i915_gem_render_state_prepare(req->engine, &so);
>  	if (ret)
>  		return ret;
>  
>  	if (so.rodata == NULL)
>  		return 0;
>  
> -	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
> -					     so.rodata->batch_items * 4,
> -					     I915_DISPATCH_SECURE);
> +	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
> +					       so.rodata->batch_items * 4,
> +					       I915_DISPATCH_SECURE);
>  	if (ret)
>  		goto out;
>  
>  	if (so.aux_batch_size > 8) {
> -		ret = req->ring->dispatch_execbuffer(req,
> -						     (so.ggtt_offset +
> -						      so.aux_batch_offset),
> -						     so.aux_batch_size,
> -						     I915_DISPATCH_SECURE);
> +		ret = req->engine->dispatch_execbuffer(req,
> +						       (so.ggtt_offset +
> +							so.aux_batch_offset),
> +						       so.aux_batch_size,
> +						       I915_DISPATCH_SECURE);
>  		if (ret)
>  			goto out;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index d8e035e126d7..974e3481e449 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -709,8 +709,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>  	err->dirty = obj->dirty;
>  	err->purgeable = obj->madv != I915_MADV_WILLNEED;
>  	err->userptr = obj->userptr.mm != NULL;
> -	err->ring = obj->last_write_req ?
> -			i915_gem_request_get_ring(obj->last_write_req)->id : -1;
> +	err->ring = obj->last_write_req ?  obj->last_write_req->engine->id : -1;
>  	err->cache_level = obj->cache_level;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index ad64d6ba13a2..533b6a87fd1d 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -498,7 +498,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
>  	    TP_fast_assign(
>  			   __entry->dev = from->dev->primary->index;
>  			   __entry->sync_from = from->id;
> -			   __entry->sync_to = to_req->ring->id;
> +			   __entry->sync_to = to_req->engine->id;
>  			   __entry->seqno = i915_gem_request_get_seqno(req);
>  			   ),
>  
> @@ -520,13 +520,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
>  			     ),
>  
>  	    TP_fast_assign(
> -			   struct intel_engine_cs *ring =
> -						i915_gem_request_get_ring(req);
> -			   __entry->dev = ring->dev->primary->index;
> -			   __entry->ring = ring->id;
> -			   __entry->seqno = i915_gem_request_get_seqno(req);
> +			   __entry->dev = req->i915->dev->primary->index;
> +			   __entry->ring = req->engine->id;
> +			   __entry->seqno = req->seqno;
>  			   __entry->flags = flags;
> -			   i915_trace_irq_get(ring, req);
> +			   i915_trace_irq_get(req->engine, req);
>  			   ),
>  
>  	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
> @@ -545,8 +543,8 @@ TRACE_EVENT(i915_gem_ring_flush,
>  			     ),
>  
>  	    TP_fast_assign(
> -			   __entry->dev = req->ring->dev->primary->index;
> -			   __entry->ring = req->ring->id;
> +			   __entry->dev = req->engine->dev->primary->index;
> +			   __entry->ring = req->engine->id;
>  			   __entry->invalidate = invalidate;
>  			   __entry->flush = flush;
>  			   ),
> @@ -567,11 +565,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
>  			     ),
>  
>  	    TP_fast_assign(
> -			   struct intel_engine_cs *ring =
> -						i915_gem_request_get_ring(req);
> -			   __entry->dev = ring->dev->primary->index;
> -			   __entry->ring = ring->id;
> -			   __entry->seqno = i915_gem_request_get_seqno(req);
> +			   __entry->dev = req->i915->dev->primary->index;
> +			   __entry->ring = req->engine->id;
> +			   __entry->seqno = req->seqno;
>  			   ),
>  
>  	    TP_printk("dev=%u, ring=%u, seqno=%u",
> @@ -631,13 +627,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
>  	     * less desirable.
>  	     */
>  	    TP_fast_assign(
> -			   struct intel_engine_cs *ring =
> -						i915_gem_request_get_ring(req);
> -			   __entry->dev = ring->dev->primary->index;
> -			   __entry->ring = ring->id;
> -			   __entry->seqno = i915_gem_request_get_seqno(req);
> +			   __entry->dev = req->i915->dev->primary->index;
> +			   __entry->ring = req->engine->id;
> +			   __entry->seqno = req->seqno;
>  			   __entry->blocking =
> -				     mutex_is_locked(&ring->dev->struct_mutex);
> +				     mutex_is_locked(&req->i915->dev->struct_mutex);
>  			   ),
>  
>  	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index f6dae2cfd9c9..2447f1a36fb0 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -10987,7 +10987,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  	}
>  
>  	len = 4;
> -	if (req->ring->id == RCS) {
> +	if (req->engine->id == RCS) {
>  		len += 6;
>  		/*
>  		 * On Gen 8, SRM is now taking an extra dword to accommodate
> @@ -11025,7 +11025,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
>  	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
>  	 */
> -	if (req->ring->id == RCS) {
> +	if (req->engine->id == RCS) {
>  		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
>  		intel_ring_emit(ring, DERRMR);
>  		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
> @@ -11038,7 +11038,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  			intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
>  					      MI_SRM_LRM_GLOBAL_GTT);
>  		intel_ring_emit(ring, DERRMR);
> -		intel_ring_emit(ring, req->ring->scratch.gtt_offset + 256);
> +		intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
>  		if (IS_GEN8(req->i915)) {
>  			intel_ring_emit(ring, 0);
>  			intel_ring_emit(ring, MI_NOOP);
> @@ -11078,7 +11078,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
>  	else if (i915.enable_execlists)
>  		return true;
>  	else
> -		return ring != i915_gem_request_get_ring(obj->last_write_req);
> +		return ring != i915_gem_request_get_engine(obj->last_write_req);
>  }
>  
>  static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
> @@ -11395,7 +11395,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
>  		ring = &dev_priv->ring[BCS];
>  	} else if (INTEL_INFO(dev)->gen >= 7) {
> -		ring = i915_gem_request_get_ring(obj->last_write_req);
> +		ring = i915_gem_request_get_engine(obj->last_write_req);
>  		if (ring == NULL || ring->id != RCS)
>  			ring = &dev_priv->ring[BCS];
>  	} else {
> @@ -11411,7 +11411,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  	 */
>  	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
>  					 crtc->primary->state,
> -					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
> +					 mmio_flip ? i915_gem_request_get_engine(obj->last_write_req) : ring, &request);
>  	if (ret)
>  		goto cleanup_pending;
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 2458804c521d..1502e53d2ad6 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -272,17 +272,16 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
>  
>  static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
>  {
> -	struct intel_engine_cs *ring = rq->ring;
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
> -	uint64_t desc;
> +	int ring = rq->engine->id;
> +	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring].state;
>  	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
> +	uint64_t desc;
>  
>  	WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
>  
>  	desc = GEN8_CTX_VALID;
> -	desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
> -	if (IS_GEN8(ctx_obj->base.dev))
> +	desc |= GEN8_CTX_ADDRESSING_MODE(rq->i915) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
> +	if (IS_GEN8(rq->i915))
>  		desc |= GEN8_CTX_L3LLC_COHERENT;
>  	desc |= GEN8_CTX_PRIVILEGE;
>  	desc |= lrca;
> @@ -293,10 +292,10 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
>  	/* desc |= GEN8_CTX_FORCE_RESTORE; */
>  
>  	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
> -	if (IS_GEN9(dev) &&
> -	    INTEL_REVID(dev) <= SKL_REVID_B0 &&
> -	    (ring->id == BCS || ring->id == VCS ||
> -	    ring->id == VECS || ring->id == VCS2))
> +	if (IS_GEN9(rq->i915) &&
> +	    INTEL_REVID(rq->i915) <= SKL_REVID_B0 &&
> +	    (ring == BCS || ring == VCS ||
> +	     ring == VECS || ring == VCS2))
>  		desc |= GEN8_CTX_FORCE_RESTORE;
>  
>  	return desc;
> @@ -306,7 +305,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
>  				 struct drm_i915_gem_request *rq1)
>  {
>  
> -	struct intel_engine_cs *ring = rq0->ring;
> +	struct intel_engine_cs *engine = rq0->engine;
>  	struct drm_i915_private *dev_priv = rq0->i915;
>  	uint64_t desc[2];
>  
> @@ -323,24 +322,23 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
>  	/* You must always write both descriptors in the order below. */
>  	spin_lock(&dev_priv->uncore.lock);
>  	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
> -	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
> -	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
> +	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
> +	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
>  
> -	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
> +	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
>  	/* The context is automatically loaded after the following */
> -	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
> +	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
>  
>  	/* ELSP is a wo register, use another nearby reg for posting */
> -	POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
> +	POSTING_READ_FW(RING_EXECLIST_STATUS(engine));
>  	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
>  	spin_unlock(&dev_priv->uncore.lock);
>  }
>  
>  static int execlists_update_context(struct drm_i915_gem_request *rq)
>  {
> -	struct intel_engine_cs *ring = rq->ring;
>  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> -	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
> +	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[rq->engine->id].state;
>  	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
>  	struct page *page;
>  	uint32_t *reg_state;
> @@ -355,7 +353,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
>  	reg_state[CTX_RING_TAIL+1] = rq->tail;
>  	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
>  
> -	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
> +	if (ppgtt && !USES_FULL_48BIT_PPGTT(rq->i915)) {
>  		/* True 32b PPGTT with dynamic page allocation: update PDP
>  		 * registers and point the unallocated PDPs to scratch page.
>  		 * PML4 is allocated during ppgtt init, so this is not needed
> @@ -538,27 +536,27 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
>  
>  static int execlists_context_queue(struct drm_i915_gem_request *request)
>  {
> -	struct intel_engine_cs *ring = request->ring;
> +	struct intel_engine_cs *engine = request->engine;
>  	struct drm_i915_gem_request *cursor;
>  	int num_elements = 0;
>  
> -	if (request->ctx != ring->default_context)
> +	if (request->ctx != engine->default_context)
>  		intel_lr_context_pin(request);
>  
>  	i915_gem_request_reference(request);
>  
>  	request->tail = request->ringbuf->tail;
>  
> -	spin_lock_irq(&ring->execlist_lock);
> +	spin_lock_irq(&engine->execlist_lock);
>  
> -	list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
> +	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
>  		if (++num_elements > 2)
>  			break;
>  
>  	if (num_elements > 2) {
>  		struct drm_i915_gem_request *tail_req;
>  
> -		tail_req = list_last_entry(&ring->execlist_queue,
> +		tail_req = list_last_entry(&engine->execlist_queue,
>  					   struct drm_i915_gem_request,
>  					   execlist_link);
>  
> @@ -567,41 +565,41 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
>  				"More than 2 already-submitted reqs queued\n");
>  			list_del(&tail_req->execlist_link);
>  			list_add_tail(&tail_req->execlist_link,
> -				&ring->execlist_retired_req_list);
> +				&engine->execlist_retired_req_list);
>  		}
>  	}
>  
> -	list_add_tail(&request->execlist_link, &ring->execlist_queue);
> +	list_add_tail(&request->execlist_link, &engine->execlist_queue);
>  	if (num_elements == 0)
> -		execlists_context_unqueue(ring);
> +		execlists_context_unqueue(engine);
>  
> -	spin_unlock_irq(&ring->execlist_lock);
> +	spin_unlock_irq(&engine->execlist_lock);
>  
>  	return 0;
>  }
>  
>  static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	uint32_t flush_domains;
>  	int ret;
>  
>  	flush_domains = 0;
> -	if (ring->gpu_caches_dirty)
> +	if (engine->gpu_caches_dirty)
>  		flush_domains = I915_GEM_GPU_DOMAINS;
>  
> -	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
> +	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
>  	if (ret)
>  		return ret;
>  
> -	ring->gpu_caches_dirty = false;
> +	engine->gpu_caches_dirty = false;
>  	return 0;
>  }
>  
>  static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
>  				 struct list_head *vmas)
>  {
> -	const unsigned other_rings = ~intel_ring_flag(req->ring);
> +	const unsigned other_rings = ~intel_ring_flag(req->engine);
>  	struct i915_vma *vma;
>  	uint32_t flush_domains = 0;
>  	bool flush_chipset = false;
> @@ -611,7 +609,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
>  		struct drm_i915_gem_object *obj = vma->obj;
>  
>  		if (obj->active & other_rings) {
> -			ret = i915_gem_object_sync(obj, req->ring, &req);
> +			ret = i915_gem_object_sync(obj, req->engine, &req);
>  			if (ret)
>  				return ret;
>  		}
> @@ -635,9 +633,9 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>  {
>  	int ret;
>  
> -	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
> +	request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
>  
> -	if (request->ctx != request->ring->default_context) {
> +	if (request->ctx != request->engine->default_context) {
>  		ret = intel_lr_context_pin(request);
>  		if (ret)
>  			return ret;
> @@ -660,7 +658,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  {
>  	intel_ring_advance(request->ringbuf);
>  
> -	if (intel_ring_stopped(request->ring))
> +	if (intel_ring_stopped(request->engine))
>  		return;
>  
>  	execlists_context_queue(request);
> @@ -811,35 +809,35 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
>  
>  int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	int ret;
>  
> -	if (!ring->gpu_caches_dirty)
> +	if (!engine->gpu_caches_dirty)
>  		return 0;
>  
> -	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
> +	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
>  	if (ret)
>  		return ret;
>  
> -	ring->gpu_caches_dirty = false;
> +	engine->gpu_caches_dirty = false;
>  	return 0;
>  }
>  
>  static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
>  {
> -	struct intel_engine_cs *ring = rq->ring;
> -	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
> +	int engine = rq->engine->id;
> +	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
>  	struct intel_ringbuffer *ringbuf = rq->ringbuf;
>  	int ret = 0;
>  
> -	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
> -	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
> +	WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
> +	if (rq->ctx->engine[engine].pin_count++ == 0) {
>  		ret = i915_gem_obj_ggtt_pin(ctx_obj,
>  				GEN8_LR_CONTEXT_ALIGN, 0);
>  		if (ret)
>  			goto reset_pin_count;
>  
> -		ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
> +		ret = intel_pin_and_map_ringbuffer_obj(rq->i915->dev, ringbuf);
>  		if (ret)
>  			goto unpin_ctx_obj;
>  
> @@ -851,20 +849,20 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
>  unpin_ctx_obj:
>  	i915_gem_object_ggtt_unpin(ctx_obj);
>  reset_pin_count:
> -	rq->ctx->engine[ring->id].pin_count = 0;
> +	rq->ctx->engine[engine].pin_count = 0;
>  
>  	return ret;
>  }
>  
>  void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
>  {
> -	struct intel_engine_cs *ring = rq->ring;
> -	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
> +	int engine = rq->engine->id;
> +	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
>  	struct intel_ringbuffer *ringbuf = rq->ringbuf;
>  
>  	if (ctx_obj) {
> -		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
> -		if (--rq->ctx->engine[ring->id].pin_count == 0) {
> +		WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
> +		if (--rq->ctx->engine[engine].pin_count == 0) {
>  			intel_unpin_ringbuffer_obj(ringbuf);
>  			i915_gem_object_ggtt_unpin(ctx_obj);
>  		}
> @@ -874,7 +872,7 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
>  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
>  	int ret, i;
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	struct intel_ringbuffer *ringbuf = req->ringbuf;
>  	struct drm_i915_private *dev_priv = req->i915;
>  	struct i915_workarounds *w = &dev_priv->workarounds;
> @@ -882,7 +880,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  	if (WARN_ON_ONCE(w->count == 0))
>  		return 0;
>  
> -	ring->gpu_caches_dirty = true;
> +	engine->gpu_caches_dirty = true;
>  	ret = logical_ring_flush_all_caches(req);
>  	if (ret)
>  		return ret;
> @@ -900,7 +898,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  
>  	intel_ring_advance(ringbuf);
>  
> -	ring->gpu_caches_dirty = true;
> +	engine->gpu_caches_dirty = true;
>  	ret = logical_ring_flush_all_caches(req);
>  	if (ret)
>  		return ret;
> @@ -1319,7 +1317,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
>  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  {
>  	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	struct intel_ringbuffer *ringbuf = req->ringbuf;
>  	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
>  	int i, ret;
> @@ -1332,9 +1330,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
>  		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
>  
> -		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
> +		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(engine, i));
>  		intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
> -		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
> +		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(engine, i));
>  		intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
>  	}
>  
> @@ -1358,14 +1356,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>  	 * not idle). PML4 is allocated during ppgtt init so this is
>  	 * not needed in 48-bit.*/
>  	if (req->ctx->ppgtt &&
> -	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
> +	    (intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
>  		if (!USES_FULL_48BIT_PPGTT(req->i915)) {
>  			ret = intel_logical_ring_emit_pdps(req);
>  			if (ret)
>  				return ret;
>  		}
>  
> -		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
> +		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine);
>  	}
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -1575,21 +1573,21 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
>  	struct render_state so;
>  	int ret;
>  
> -	ret = i915_gem_render_state_prepare(req->ring, &so);
> +	ret = i915_gem_render_state_prepare(req->engine, &so);
>  	if (ret)
>  		return ret;
>  
>  	if (so.rodata == NULL)
>  		return 0;
>  
> -	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
> -				       I915_DISPATCH_SECURE);
> +	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
> +					 I915_DISPATCH_SECURE);
>  	if (ret)
>  		goto out;
>  
> -	ret = req->ring->emit_bb_start(req,
> -				       (so.ggtt_offset + so.aux_batch_offset),
> -				       I915_DISPATCH_SECURE);
> +	ret = req->engine->emit_bb_start(req,
> +					 (so.ggtt_offset + so.aux_batch_offset),
> +					 I915_DISPATCH_SECURE);
>  	if (ret)
>  		goto out;
>  
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index ac0a982bbf55..d79c9c0bbffb 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -138,21 +138,21 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
>   *
>   * Return: true if there are applicable MOCS settings for the device.
>   */
> -static bool get_mocs_settings(struct drm_device *dev,
> +static bool get_mocs_settings(struct drm_i915_private *dev_priv,
>  			      struct drm_i915_mocs_table *table)
>  {
>  	bool result = false;
>  
> -	if (IS_SKYLAKE(dev)) {
> +	if (IS_SKYLAKE(dev_priv)) {
>  		table->size  = ARRAY_SIZE(skylake_mocs_table);
>  		table->table = skylake_mocs_table;
>  		result = true;
> -	} else if (IS_BROXTON(dev)) {
> +	} else if (IS_BROXTON(dev_priv)) {
>  		table->size  = ARRAY_SIZE(broxton_mocs_table);
>  		table->table = broxton_mocs_table;
>  		result = true;
>  	} else {
> -		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
> +		WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
>  			  "Platform that should have a MOCS table does not.\n");
>  	}
>  
> @@ -297,7 +297,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
>  	struct drm_i915_mocs_table t;
>  	int ret;
>  
> -	if (get_mocs_settings(req->ring->dev, &t)) {
> +	if (get_mocs_settings(req->i915, &t)) {
>  		/* Program the control registers */
>  		ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
>  		if (ret)
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 9821c2a8074a..cc060588a287 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -216,7 +216,7 @@ static int
>  intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ringbuffer *ring = req->ringbuf;
> -	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -253,7 +253,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  {
>  	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 flags = 0;
> -	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	/* Force SNB workarounds for PIPE_CONTROL flushes */
> @@ -326,7 +326,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
>  {
>  	struct intel_ringbuffer *ring = req->ringbuf;
>  	u32 flags = 0;
> -	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	/*
> @@ -410,7 +410,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
>  	u32 flags = 0;
> -	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	flags |= PIPE_CONTROL_CS_STALL;
> @@ -720,7 +720,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  	if (WARN_ON_ONCE(w->count == 0))
>  		return 0;
>  
> -	req->ring->gpu_caches_dirty = true;
> +	req->engine->gpu_caches_dirty = true;
>  	ret = intel_ring_flush_all_caches(req);
>  	if (ret)
>  		return ret;
> @@ -738,7 +738,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  
>  	intel_ring_advance(ring);
>  
> -	req->ring->gpu_caches_dirty = true;
> +	req->engine->gpu_caches_dirty = true;
>  	ret = intel_ring_flush_all_caches(req);
>  	if (ret)
>  		return ret;
> @@ -1199,7 +1199,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  
>  	for_each_ring(waiter, dev_priv, i) {
>  		u32 seqno;
> -		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
> +		u64 gtt_offset = signaller_req->engine->semaphore.signal_ggtt[i];
>  		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
>  			continue;
>  
> @@ -1239,7 +1239,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  
>  	for_each_ring(waiter, dev_priv, i) {
>  		u32 seqno;
> -		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
> +		u64 gtt_offset = signaller_req->engine->semaphore.signal_ggtt[i];
>  		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
>  			continue;
>  
> @@ -1276,7 +1276,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  		return ret;
>  
>  	for_each_ring(useless, dev_priv, i) {
> -		u32 mbox_reg = signaller_req->ring->semaphore.mbox.signal[i];
> +		u32 mbox_reg = signaller_req->engine->semaphore.mbox.signal[i];
>  		if (mbox_reg != GEN6_NOSYNC) {
>  			u32 seqno = i915_gem_request_get_seqno(signaller_req);
>  			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
> @@ -1306,8 +1306,8 @@ gen6_add_request(struct drm_i915_gem_request *req)
>  	struct intel_ringbuffer *ring = req->ringbuf;
>  	int ret;
>  
> -	if (req->ring->semaphore.signal)
> -		ret = req->ring->semaphore.signal(req, 4);
> +	if (req->engine->semaphore.signal)
> +		ret = req->engine->semaphore.signal(req, 4);
>  	else
>  		ret = intel_ring_begin(req, 4);
>  
> @@ -1318,7 +1318,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	__intel_ring_advance(req->ring);
> +	__intel_ring_advance(req->engine);
>  
>  	return 0;
>  }
> @@ -1356,10 +1356,10 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  				MI_SEMAPHORE_SAD_GTE_SDD);
>  	intel_ring_emit(waiter, seqno);
>  	intel_ring_emit(waiter,
> -			lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
> +			lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->engine,
>  						       signaller->id)));
>  	intel_ring_emit(waiter,
> -			upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
> +			upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->engine,
>  						       signaller->id)));
>  	intel_ring_advance(waiter);
>  	return 0;
> @@ -1374,7 +1374,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	u32 dw1 = MI_SEMAPHORE_MBOX |
>  		  MI_SEMAPHORE_COMPARE |
>  		  MI_SEMAPHORE_REGISTER;
> -	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->ring->id];
> +	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
>  	int ret;
>  
>  	/* Throughout all of the GEM code, seqno passed implies our current
> @@ -1419,7 +1419,7 @@ static int
>  pc_render_add_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ringbuffer *ring = req->ringbuf;
> -	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
>  
>  	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
> @@ -1437,7 +1437,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
>  			PIPE_CONTROL_WRITE_FLUSH |
>  			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
> -	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
> +	intel_ring_emit(ring, req->engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, 0);
>  	PIPE_CONTROL_FLUSH(ring, scratch_addr);
> @@ -1456,10 +1456,10 @@ pc_render_add_request(struct drm_i915_gem_request *req)
>  			PIPE_CONTROL_WRITE_FLUSH |
>  			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
>  			PIPE_CONTROL_NOTIFY);
> -	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
> +	intel_ring_emit(ring, req->engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, 0);
> -	__intel_ring_advance(req->ring);
> +	__intel_ring_advance(req->engine);
>  
>  	return 0;
>  }
> @@ -1639,7 +1639,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
>  	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	__intel_ring_advance(req->ring);
> +	__intel_ring_advance(req->engine);
>  
>  	return 0;
>  }
> @@ -1801,7 +1801,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 unsigned dispatch_flags)
>  {
>  	struct intel_ringbuffer *ring = req->ringbuf;
> -	u32 cs_offset = req->ring->scratch.gtt_offset;
> +	u32 cs_offset = req->engine->scratch.gtt_offset;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -2164,7 +2164,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
>  
>  int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>  {
> -	request->ringbuf = request->ring->buffer;
> +	request->ringbuf = request->engine->buffer;
>  	return 0;
>  }
>  
> @@ -2218,7 +2218,7 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
>  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  {
>  	struct intel_ringbuffer *ringbuf = req->ringbuf;
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	struct drm_i915_gem_request *target;
>  	unsigned space;
>  	int ret;
> @@ -2229,7 +2229,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  	/* The whole point of reserving space is to not wait! */
>  	WARN_ON(ringbuf->reserved_in_use);
>  
> -	list_for_each_entry(target, &ring->request_list, list) {
> +	list_for_each_entry(target, &engine->request_list, list) {
>  		/*
>  		 * The request queue is per-engine, so can contain requests
>  		 * from multiple ringbuffers. Here, we must ignore any that
> @@ -2245,7 +2245,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  			break;
>  	}
>  
> -	if (WARN_ON(&target->list == &ring->request_list))
> +	if (WARN_ON(&target->list == &engine->request_list))
>  		return -ENOSPC;
>  
>  	ret = i915_wait_request(target);
> @@ -2931,40 +2931,40 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
>  int
>  intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	int ret;
>  
> -	if (!ring->gpu_caches_dirty)
> +	if (!engine->gpu_caches_dirty)
>  		return 0;
>  
> -	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
> +	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
>  	if (ret)
>  		return ret;
>  
>  	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
>  
> -	ring->gpu_caches_dirty = false;
> +	engine->gpu_caches_dirty = false;
>  	return 0;
>  }
>  
>  int
>  intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = req->ring;
> +	struct intel_engine_cs *engine = req->engine;
>  	uint32_t flush_domains;
>  	int ret;
>  
>  	flush_domains = 0;
> -	if (ring->gpu_caches_dirty)
> +	if (engine->gpu_caches_dirty)
>  		flush_domains = I915_GEM_GPU_DOMAINS;
>  
> -	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
> +	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
>  	if (ret)
>  		return ret;
>  
>  	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
>  
> -	ring->gpu_caches_dirty = false;
> +	engine->gpu_caches_dirty = false;
>  	return 0;
>  }
>  
> -- 
> 2.6.2
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 359436162f3d..56375c36b381 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -190,8 +190,7 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (%s mappable)", s);
 	}
 	if (obj->last_write_req != NULL)
-		seq_printf(m, " (%s)",
-			   i915_gem_request_get_ring(obj->last_write_req)->name);
+		seq_printf(m, " (%s)", obj->last_write_req->engine->name);
 	if (obj->frontbuffer_bits)
 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
@@ -594,14 +593,14 @@  static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 					   pipe, plane);
 			}
 			if (work->flip_queued_req) {
-				struct intel_engine_cs *ring =
-					i915_gem_request_get_ring(work->flip_queued_req);
+				struct intel_engine_cs *engine =
+					work->flip_queued_req->engine;
 
 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
-					   ring->name,
+					   engine->name,
 					   i915_gem_request_get_seqno(work->flip_queued_req),
 					   dev_priv->next_seqno,
-					   ring->get_seqno(ring, true),
+					   engine->get_seqno(engine, true),
 					   i915_gem_request_completed(work->flip_queued_req, true));
 			} else
 				seq_printf(m, "Flip not associated with any ring\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fadf2ceb1f72..9ce8b3fcb3a0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2156,7 +2156,7 @@  struct drm_i915_gem_request {
 
 	/** On Which ring this request was generated */
 	struct drm_i915_private *i915;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 
 	/** GEM sequence number associated with this request. */
 	uint32_t seqno;
@@ -2240,9 +2240,9 @@  i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
 }
 
 static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
 {
-	return req ? req->ring : NULL;
+	return req ? req->engine : NULL;
 }
 
 static inline struct drm_i915_gem_request *
@@ -2256,7 +2256,7 @@  i915_gem_request_reference(struct drm_i915_gem_request *req)
 static inline void
 i915_gem_request_unreference(struct drm_i915_gem_request *req)
 {
-	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
 	kref_put(&req->ref, i915_gem_request_free);
 }
 
@@ -2268,7 +2268,7 @@  i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
 	if (!req)
 		return;
 
-	dev = req->ring->dev;
+	dev = req->i915->dev;
 	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
 		mutex_unlock(&dev->struct_mutex);
 }
@@ -2877,7 +2877,7 @@  static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
 
 	BUG_ON(req == NULL);
 
-	seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+	seqno = req->engine->get_seqno(req->engine, lazy_coherency);
 
 	return i915_seqno_passed(seqno, req->seqno);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 030fc9d14385..5a1b51a27fe3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1174,7 +1174,7 @@  static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
 	u64 timeout;
 	unsigned cpu;
 
-	if (i915_gem_request_get_ring(req)->irq_refcount)
+	if (req->engine->irq_refcount)
 		return -EBUSY;
 
 	timeout = local_clock_us(&cpu) + 2;
@@ -1220,10 +1220,10 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 			s64 *timeout,
 			struct intel_rps_client *rps)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_private *dev_priv = req->i915;
 	const bool irq_test_in_progress =
-		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine);
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
@@ -1252,7 +1252,7 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 	if (ret == 0)
 		goto out;
 
-	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -1260,7 +1260,7 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 	for (;;) {
 		struct timer_list timer;
 
-		prepare_to_wait(&ring->irq_queue, &wait, state);
+		prepare_to_wait(&engine->irq_queue, &wait, state);
 
 		/* We need to check whether any gpu reset happened in between
 		 * the caller grabbing the seqno and now ... */
@@ -1289,11 +1289,11 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 		}
 
 		timer.function = NULL;
-		if (timeout || missed_irq(dev_priv, ring)) {
+		if (timeout || missed_irq(dev_priv, engine)) {
 			unsigned long expire;
 
 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
 			mod_timer(&timer, expire);
 		}
 
@@ -1305,9 +1305,9 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 		}
 	}
 	if (!irq_test_in_progress)
-		ring->irq_put(ring);
+		engine->irq_put(engine);
 
-	finish_wait(&ring->irq_queue, &wait);
+	finish_wait(&engine->irq_queue, &wait);
 
 out:
 	now = ktime_get_raw_ns();
@@ -1397,7 +1397,7 @@  static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 static void
 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *tmp;
 
 	lockdep_assert_held(&engine->dev->struct_mutex);
@@ -1466,7 +1466,7 @@  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			if (ret)
 				return ret;
 
-			i = obj->last_write_req->ring->id;
+			i = obj->last_write_req->engine->id;
 			if (obj->last_read_req[i] == obj->last_write_req)
 				i915_gem_object_retire__read(obj, i);
 			else
@@ -1493,7 +1493,7 @@  static void
 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 			       struct drm_i915_gem_request *req)
 {
-	int ring = req->ring->id;
+	int ring = req->engine->id;
 
 	if (obj->last_read_req[ring] == req)
 		i915_gem_object_retire__read(obj, ring);
@@ -2415,17 +2415,15 @@  void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct intel_engine_cs *ring;
-
-	ring = i915_gem_request_get_ring(req);
+	struct intel_engine_cs *engine = req->engine;;
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
 		drm_gem_object_reference(&obj->base);
-	obj->active |= intel_ring_flag(ring);
+	obj->active |= intel_ring_flag(engine);
 
-	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+	list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
+	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
 
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2434,7 +2432,7 @@  static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
 	RQ_BUG_ON(obj->last_write_req == NULL);
-	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->engine)));
 
 	i915_gem_request_assign(&obj->last_write_req, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2451,7 +2449,7 @@  i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	list_del_init(&obj->ring_list[ring]);
 	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
 
-	if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
 		i915_gem_object_retire__write(obj);
 
 	obj->active &= ~(1 << ring);
@@ -2553,7 +2551,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 			struct drm_i915_gem_object *obj,
 			bool flush_caches)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
@@ -2562,7 +2560,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	if (WARN_ON(request == NULL))
 		return;
 
-	ring = request->ring;
+	engine = request->engine;
 	dev_priv = request->i915;
 	ringbuf = request->ringbuf;
 
@@ -2598,9 +2596,9 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	request->postfix = intel_ring_get_tail(ringbuf);
 
 	if (i915.enable_execlists)
-		ret = ring->emit_request(request);
+		ret = engine->emit_request(request);
 	else {
-		ret = ring->add_request(request);
+		ret = engine->add_request(request);
 
 		request->tail = intel_ring_get_tail(ringbuf);
 	}
@@ -2618,12 +2616,12 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	request->batch_obj = obj;
 
 	request->emitted_jiffies = jiffies;
-	ring->last_submitted_seqno = request->seqno;
-	list_add_tail(&request->list, &ring->request_list);
+	engine->last_submitted_seqno = request->seqno;
+	list_add_tail(&request->list, &engine->request_list);
 
 	trace_i915_gem_request_add(request);
 
-	i915_queue_hangcheck(ring->dev);
+	i915_queue_hangcheck(engine->dev);
 
 	queue_delayed_work(dev_priv->wq,
 			   &dev_priv->mm.retire_work,
@@ -2690,7 +2688,7 @@  void i915_gem_request_free(struct kref *req_ref)
 
 	if (ctx) {
 		if (i915.enable_execlists) {
-			if (ctx != req->ring->default_context)
+			if (ctx != req->engine->default_context)
 				intel_lr_context_unpin(req);
 		}
 
@@ -2700,11 +2698,11 @@  void i915_gem_request_free(struct kref *req_ref)
 	kmem_cache_free(req->i915->requests, req);
 }
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
+int i915_gem_request_alloc(struct intel_engine_cs *engine,
 			   struct intel_context *ctx,
 			   struct drm_i915_gem_request **req_out)
 {
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
 	struct drm_i915_gem_request *req;
 	int ret;
 
@@ -2717,13 +2715,13 @@  int i915_gem_request_alloc(struct intel_engine_cs *ring,
 	if (req == NULL)
 		return -ENOMEM;
 
-	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
 	if (ret)
 		goto err;
 
 	kref_init(&req->ref);
 	req->i915 = dev_priv;
-	req->ring = ring;
+	req->engine = engine;
 	req->ctx  = ctx;
 	i915_gem_context_reference(req->ctx);
 
@@ -3137,7 +3135,7 @@  __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_ring(from_req);
+	from = from_req->engine;
 	if (to == from)
 		return 0;
 
@@ -4308,7 +4306,7 @@  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	BUILD_BUG_ON(I915_NUM_RINGS > 16);
 	args->busy = obj->active << 16;
 	if (obj->last_write_req)
-		args->busy |= obj->last_write_req->ring->id;
+		args->busy |= obj->last_write_req->engine->id;
 
 unref:
 	drm_gem_object_unreference(&obj->base);
@@ -4641,7 +4639,6 @@  err:
 
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = req->i915;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
@@ -4660,12 +4657,11 @@  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 	 * at initialization time.
 	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ring, reg_base + i);
-		intel_ring_emit(ring, remap_info[i/4]);
+		intel_ring_emit(req->ringbuf, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(req->ringbuf, reg_base + i);
+		intel_ring_emit(req->ringbuf, remap_info[i/4]);
 	}
-
-	intel_ring_advance(ring);
+	intel_ring_advance(req->ringbuf);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c3adc121aab4..047c2f94bd22 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -410,14 +410,14 @@  void i915_gem_context_fini(struct drm_device *dev)
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	if (i915.enable_execlists) {
-		if (ring->init_context == NULL)
+		if (engine->init_context == NULL)
 			return 0;
 
-		ret = ring->init_context(req);
+		ret = engine->init_context(req);
 	} else
 		ret = i915_switch_context(req);
 
@@ -494,7 +494,7 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(req->i915)) {
-		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
@@ -522,7 +522,7 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_ring(signaller, req->i915, i) {
-				if (signaller == req->ring)
+				if (signaller == req->engine)
 					continue;
 
 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
@@ -547,7 +547,7 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_ring(signaller, req->i915, i) {
-				if (signaller == req->ring)
+				if (signaller == req->engine)
 					continue;
 
 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
@@ -618,19 +618,19 @@  needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
 static int do_switch(struct drm_i915_gem_request *req)
 {
 	struct intel_context *to = req->ctx;
-	struct intel_engine_cs *ring = req->ring;
-	struct intel_context *from = ring->last_context;
+	struct intel_engine_cs *engine = req->engine;
+	struct intel_context *from = engine->last_context;
 	u32 hw_flags = 0;
 	bool uninitialized = false;
 	int ret, i;
 
-	if (should_skip_switch(ring, from, to))
+	if (should_skip_switch(engine, from, to))
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-					    get_context_alignment(ring->dev), 0);
+					    get_context_alignment(engine->dev), 0);
 		if (ret)
 			return ret;
 	}
@@ -640,23 +640,23 @@  static int do_switch(struct drm_i915_gem_request *req)
 	 * evict_everything - as a last ditch gtt defrag effort that also
 	 * switches to the default context. Hence we need to reload from here.
 	 */
-	from = ring->last_context;
+	from = engine->last_context;
 
-	if (needs_pd_load_pre(ring, to)) {
+	if (needs_pd_load_pre(engine, to)) {
 		/* Older GENs and non render rings still want the load first,
 		 * "PP_DCLV followed by PP_DIR_BASE register through Load
 		 * Register Immediate commands in Ring Buffer before submitting
 		 * a context."*/
-		trace_switch_mm(ring, to);
+		trace_switch_mm(engine, to);
 		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		if (ret)
 			goto unpin_out;
 
 		/* Doing a PD load always reloads the page dirs */
-		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
 	}
 
-	if (ring->id != RCS) {
+	if (engine->id != RCS) {
 		if (from)
 			i915_gem_context_unreference(from);
 		goto done;
@@ -681,14 +681,14 @@  static int do_switch(struct drm_i915_gem_request *req)
 		 * space. This means we must enforce that a page table load
 		 * occur when this occurs. */
 	} else if (to->ppgtt &&
-		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
+		   (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
 		hw_flags |= MI_FORCE_RESTORE;
-		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
 	}
 
 	/* We should never emit switch_mm more than once */
-	WARN_ON(needs_pd_load_pre(ring, to) &&
-		needs_pd_load_post(ring, to, hw_flags));
+	WARN_ON(needs_pd_load_pre(engine, to) &&
+		needs_pd_load_post(engine, to, hw_flags));
 
 	ret = mi_set_context(req, hw_flags);
 	if (ret)
@@ -697,8 +697,8 @@  static int do_switch(struct drm_i915_gem_request *req)
 	/* GEN8 does *not* require an explicit reload if the PDPs have been
 	 * setup, and we do not wish to move them.
 	 */
-	if (needs_pd_load_post(ring, to, hw_flags)) {
-		trace_switch_mm(ring, to);
+	if (needs_pd_load_post(engine, to, hw_flags)) {
+		trace_switch_mm(engine, to);
 		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		/* The hardware context switch is emitted, but we haven't
 		 * actually changed the state - so it's probably safe to bail
@@ -751,11 +751,11 @@  static int do_switch(struct drm_i915_gem_request *req)
 
 done:
 	i915_gem_context_reference(to);
-	ring->last_context = to;
+	engine->last_context = to;
 
 	if (uninitialized) {
-		if (ring->init_context) {
-			ret = ring->init_context(req);
+		if (engine->init_context) {
+			ret = engine->init_context(req);
 			if (ret)
 				DRM_ERROR("ring init context: %d\n", ret);
 		}
@@ -764,7 +764,7 @@  done:
 	return 0;
 
 unpin_out:
-	if (ring->id == RCS)
+	if (engine->id == RCS)
 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
 	return ret;
 }
@@ -784,17 +784,18 @@  unpin_out:
  */
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
 
 	WARN_ON(i915.enable_execlists);
 	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
 
 	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (req->ctx != ring->last_context) {
+		struct intel_engine_cs *engine = req->engine;
+
+		if (req->ctx != engine->last_context) {
 			i915_gem_context_reference(req->ctx);
-			if (ring->last_context)
-				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = req->ctx;
+			if (engine->last_context)
+				i915_gem_context_unreference(engine->last_context);
+			engine->last_context = req->ctx;
 		}
 		return 0;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5f1b9b7df051..32c2d08bdd4c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -922,7 +922,7 @@  static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(req->ring);
+	const unsigned other_rings = ~intel_ring_flag(req->engine);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -932,7 +932,7 @@  i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->ring, &req);
+			ret = i915_gem_object_sync(obj, req->engine, &req);
 			if (ret)
 				return ret;
 		}
@@ -944,7 +944,7 @@  i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(req->ring->dev);
+		i915_gem_chipset_flush(req->engine->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -1118,7 +1118,7 @@  i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret, i;
 
-	if (!IS_GEN7(req->i915) || req->ring->id != RCS) {
+	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4608884adfc8..c222456961fb 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -661,10 +661,10 @@  static int gen8_write_pdp(struct drm_i915_gem_request *req,
 		return ret;
 
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit(ring, GEN8_RING_PDP_UDW(req->ring, entry));
+	intel_ring_emit(ring, GEN8_RING_PDP_UDW(req->engine, entry));
 	intel_ring_emit(ring, upper_32_bits(addr));
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit(ring, GEN8_RING_PDP_LDW(req->ring, entry));
+	intel_ring_emit(ring, GEN8_RING_PDP_LDW(req->engine, entry));
 	intel_ring_emit(ring, lower_32_bits(addr));
 	intel_ring_advance(ring);
 
@@ -1592,7 +1592,9 @@  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = req->engine->flush(req,
+				 I915_GEM_GPU_DOMAINS,
+				 I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1601,9 +1603,9 @@  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 		return ret;
 
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
+	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->engine));
 	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
+	intel_ring_emit(ring, RING_PP_DIR_BASE(req->engine));
 	intel_ring_emit(ring, get_pd_offset(ppgtt));
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
@@ -1614,11 +1616,10 @@  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+	struct drm_i915_private *dev_priv = req->i915;
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(req->engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(req->engine), get_pd_offset(ppgtt));
 	return 0;
 }
 
@@ -1629,7 +1630,9 @@  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = req->engine->flush(req,
+				 I915_GEM_GPU_DOMAINS,
+				 I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1638,16 +1641,18 @@  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 		return ret;
 
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->ring));
+	intel_ring_emit(ring, RING_PP_DIR_DCLV(req->engine));
 	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit(ring, RING_PP_DIR_BASE(req->ring));
+	intel_ring_emit(ring, RING_PP_DIR_BASE(req->engine));
 	intel_ring_emit(ring, get_pd_offset(ppgtt));
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
-	if (req->ring->id != RCS) {
-		ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (req->engine->id != RCS) {
+		ret = req->engine->flush(req,
+					 I915_GEM_GPU_DOMAINS,
+					 I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -1658,15 +1663,12 @@  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ppgtt->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
+	struct drm_i915_private *dev_priv = req->i915;
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(req->engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(req->engine), get_pd_offset(ppgtt));
 
-	POSTING_READ(RING_PP_DIR_DCLV(ring));
+	POSTING_READ(RING_PP_DIR_DCLV(req->engine));
 
 	return 0;
 }
@@ -2101,8 +2103,7 @@  int i915_ppgtt_init_hw(struct drm_device *dev)
 
 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
 {
-	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	struct i915_hw_ppgtt *ppgtt = req->i915->mm.aliasing_ppgtt;
 	int ret;
 
 	if (i915.enable_execlists)
@@ -2115,7 +2116,7 @@  int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+	ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 5026a6267a88..f11e8685b1af 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -198,25 +198,25 @@  int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->ring, &so);
+	ret = i915_gem_render_state_prepare(req->engine, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
-					     so.rodata->batch_items * 4,
-					     I915_DISPATCH_SECURE);
+	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
+					       so.rodata->batch_items * 4,
+					       I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	if (so.aux_batch_size > 8) {
-		ret = req->ring->dispatch_execbuffer(req,
-						     (so.ggtt_offset +
-						      so.aux_batch_offset),
-						     so.aux_batch_size,
-						     I915_DISPATCH_SECURE);
+		ret = req->engine->dispatch_execbuffer(req,
+						       (so.ggtt_offset +
+							so.aux_batch_offset),
+						       so.aux_batch_size,
+						       I915_DISPATCH_SECURE);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8e035e126d7..974e3481e449 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -709,8 +709,7 @@  static void capture_bo(struct drm_i915_error_buffer *err,
 	err->dirty = obj->dirty;
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
-	err->ring = obj->last_write_req ?
-			i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+	err->ring = obj->last_write_req ?  obj->last_write_req->engine->id : -1;
 	err->cache_level = obj->cache_level;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index ad64d6ba13a2..533b6a87fd1d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -498,7 +498,7 @@  TRACE_EVENT(i915_gem_ring_sync_to,
 	    TP_fast_assign(
 			   __entry->dev = from->dev->primary->index;
 			   __entry->sync_from = from->id;
-			   __entry->sync_to = to_req->ring->id;
+			   __entry->sync_to = to_req->engine->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   ),
 
@@ -520,13 +520,11 @@  TRACE_EVENT(i915_gem_ring_dispatch,
 			     ),
 
 	    TP_fast_assign(
-			   struct intel_engine_cs *ring =
-						i915_gem_request_get_ring(req);
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->dev = req->i915->dev->primary->index;
+			   __entry->ring = req->engine->id;
+			   __entry->seqno = req->seqno;
 			   __entry->flags = flags;
-			   i915_trace_irq_get(ring, req);
+			   i915_trace_irq_get(req->engine, req);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -545,8 +543,8 @@  TRACE_EVENT(i915_gem_ring_flush,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = req->ring->dev->primary->index;
-			   __entry->ring = req->ring->id;
+			   __entry->dev = req->engine->dev->primary->index;
+			   __entry->ring = req->engine->id;
 			   __entry->invalidate = invalidate;
 			   __entry->flush = flush;
 			   ),
@@ -567,11 +565,9 @@  DECLARE_EVENT_CLASS(i915_gem_request,
 			     ),
 
 	    TP_fast_assign(
-			   struct intel_engine_cs *ring =
-						i915_gem_request_get_ring(req);
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->dev = req->i915->dev->primary->index;
+			   __entry->ring = req->engine->id;
+			   __entry->seqno = req->seqno;
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -631,13 +627,11 @@  TRACE_EVENT(i915_gem_request_wait_begin,
 	     * less desirable.
 	     */
 	    TP_fast_assign(
-			   struct intel_engine_cs *ring =
-						i915_gem_request_get_ring(req);
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->dev = req->i915->dev->primary->index;
+			   __entry->ring = req->engine->id;
+			   __entry->seqno = req->seqno;
 			   __entry->blocking =
-				     mutex_is_locked(&ring->dev->struct_mutex);
+				     mutex_is_locked(&req->i915->dev->struct_mutex);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f6dae2cfd9c9..2447f1a36fb0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10987,7 +10987,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 	}
 
 	len = 4;
-	if (req->ring->id == RCS) {
+	if (req->engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11025,7 +11025,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (req->ring->id == RCS) {
+	if (req->engine->id == RCS) {
 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 		intel_ring_emit(ring, DERRMR);
 		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
@@ -11038,7 +11038,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 			intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		intel_ring_emit(ring, DERRMR);
-		intel_ring_emit(ring, req->ring->scratch.gtt_offset + 256);
+		intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
 		if (IS_GEN8(req->i915)) {
 			intel_ring_emit(ring, 0);
 			intel_ring_emit(ring, MI_NOOP);
@@ -11078,7 +11078,7 @@  static bool use_mmio_flip(struct intel_engine_cs *ring,
 	else if (i915.enable_execlists)
 		return true;
 	else
-		return ring != i915_gem_request_get_ring(obj->last_write_req);
+		return ring != i915_gem_request_get_engine(obj->last_write_req);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
@@ -11395,7 +11395,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 		ring = &dev_priv->ring[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		ring = i915_gem_request_get_ring(obj->last_write_req);
+		ring = i915_gem_request_get_engine(obj->last_write_req);
 		if (ring == NULL || ring->id != RCS)
 			ring = &dev_priv->ring[BCS];
 	} else {
@@ -11411,7 +11411,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	 */
 	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
 					 crtc->primary->state,
-					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
+					 mmio_flip ? i915_gem_request_get_engine(obj->last_write_req) : ring, &request);
 	if (ret)
 		goto cleanup_pending;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2458804c521d..1502e53d2ad6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -272,17 +272,16 @@  u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
 
 static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *ring = rq->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	uint64_t desc;
+	int ring = rq->engine->id;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring].state;
 	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+	uint64_t desc;
 
 	WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
 
 	desc = GEN8_CTX_VALID;
-	desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-	if (IS_GEN8(ctx_obj->base.dev))
+	desc |= GEN8_CTX_ADDRESSING_MODE(rq->i915) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
+	if (IS_GEN8(rq->i915))
 		desc |= GEN8_CTX_L3LLC_COHERENT;
 	desc |= GEN8_CTX_PRIVILEGE;
 	desc |= lrca;
@@ -293,10 +292,10 @@  static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 	/* desc |= GEN8_CTX_FORCE_RESTORE; */
 
 	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
-	if (IS_GEN9(dev) &&
-	    INTEL_REVID(dev) <= SKL_REVID_B0 &&
-	    (ring->id == BCS || ring->id == VCS ||
-	    ring->id == VECS || ring->id == VCS2))
+	if (IS_GEN9(rq->i915) &&
+	    INTEL_REVID(rq->i915) <= SKL_REVID_B0 &&
+	    (ring == BCS || ring == VCS ||
+	     ring == VECS || ring == VCS2))
 		desc |= GEN8_CTX_FORCE_RESTORE;
 
 	return desc;
@@ -306,7 +305,7 @@  static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 				 struct drm_i915_gem_request *rq1)
 {
 
-	struct intel_engine_cs *ring = rq0->ring;
+	struct intel_engine_cs *engine = rq0->engine;
 	struct drm_i915_private *dev_priv = rq0->i915;
 	uint64_t desc[2];
 
@@ -323,24 +322,23 @@  static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 	/* You must always write both descriptors in the order below. */
 	spin_lock(&dev_priv->uncore.lock);
 	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
-	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
 
-	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
 	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
 
 	/* ELSP is a wo register, use another nearby reg for posting */
-	POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
+	POSTING_READ_FW(RING_EXECLIST_STATUS(engine));
 	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
 	spin_unlock(&dev_priv->uncore.lock);
 }
 
 static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *ring = rq->ring;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[rq->engine->id].state;
 	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
 	struct page *page;
 	uint32_t *reg_state;
@@ -355,7 +353,7 @@  static int execlists_update_context(struct drm_i915_gem_request *rq)
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
-	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+	if (ppgtt && !USES_FULL_48BIT_PPGTT(rq->i915)) {
 		/* True 32b PPGTT with dynamic page allocation: update PDP
 		 * registers and point the unallocated PDPs to scratch page.
 		 * PML4 is allocated during ppgtt init, so this is not needed
@@ -538,27 +536,27 @@  void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = request->ring;
+	struct intel_engine_cs *engine = request->engine;
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	if (request->ctx != ring->default_context)
+	if (request->ctx != engine->default_context)
 		intel_lr_context_pin(request);
 
 	i915_gem_request_reference(request);
 
 	request->tail = request->ringbuf->tail;
 
-	spin_lock_irq(&ring->execlist_lock);
+	spin_lock_irq(&engine->execlist_lock);
 
-	list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
 		if (++num_elements > 2)
 			break;
 
 	if (num_elements > 2) {
 		struct drm_i915_gem_request *tail_req;
 
-		tail_req = list_last_entry(&ring->execlist_queue,
+		tail_req = list_last_entry(&engine->execlist_queue,
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
@@ -567,41 +565,41 @@  static int execlists_context_queue(struct drm_i915_gem_request *request)
 				"More than 2 already-submitted reqs queued\n");
 			list_del(&tail_req->execlist_link);
 			list_add_tail(&tail_req->execlist_link,
-				&ring->execlist_retired_req_list);
+				&engine->execlist_retired_req_list);
 		}
 	}
 
-	list_add_tail(&request->execlist_link, &ring->execlist_queue);
+	list_add_tail(&request->execlist_link, &engine->execlist_queue);
 	if (num_elements == 0)
-		execlists_context_unqueue(ring);
+		execlists_context_unqueue(engine);
 
-	spin_unlock_irq(&ring->execlist_lock);
+	spin_unlock_irq(&engine->execlist_lock);
 
 	return 0;
 }
 
 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(req->ring);
+	const unsigned other_rings = ~intel_ring_flag(req->engine);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -611,7 +609,7 @@  static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->ring, &req);
+			ret = i915_gem_object_sync(obj, req->engine, &req);
 			if (ret)
 				return ret;
 		}
@@ -635,9 +633,9 @@  int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 {
 	int ret;
 
-	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+	request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
 
-	if (request->ctx != request->ring->default_context) {
+	if (request->ctx != request->engine->default_context) {
 		ret = intel_lr_context_pin(request);
 		if (ret)
 			return ret;
@@ -660,7 +658,7 @@  intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
 	intel_ring_advance(request->ringbuf);
 
-	if (intel_ring_stopped(request->ring))
+	if (intel_ring_stopped(request->engine))
 		return;
 
 	execlists_context_queue(request);
@@ -811,35 +809,35 @@  void intel_logical_ring_stop(struct intel_engine_cs *ring)
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *ring = rq->ring;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	int engine = rq->engine->id;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
 	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 	int ret = 0;
 
-	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
+	WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
+	if (rq->ctx->engine[engine].pin_count++ == 0) {
 		ret = i915_gem_obj_ggtt_pin(ctx_obj,
 				GEN8_LR_CONTEXT_ALIGN, 0);
 		if (ret)
 			goto reset_pin_count;
 
-		ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+		ret = intel_pin_and_map_ringbuffer_obj(rq->i915->dev, ringbuf);
 		if (ret)
 			goto unpin_ctx_obj;
 
@@ -851,20 +849,20 @@  static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
-	rq->ctx->engine[ring->id].pin_count = 0;
+	rq->ctx->engine[engine].pin_count = 0;
 
 	return ret;
 }
 
 void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *ring = rq->ring;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	int engine = rq->engine->id;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
 	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (ctx_obj) {
-		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--rq->ctx->engine[ring->id].pin_count == 0) {
+		WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
+		if (--rq->ctx->engine[engine].pin_count == 0) {
 			intel_unpin_ringbuffer_obj(ringbuf);
 			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
@@ -874,7 +872,7 @@  void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -882,7 +880,7 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (WARN_ON_ONCE(w->count == 0))
 		return 0;
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -900,7 +898,7 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ringbuf);
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1319,7 +1317,7 @@  static int gen9_init_render_ring(struct intel_engine_cs *ring)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
@@ -1332,9 +1330,9 @@  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_ring_emit(ringbuf, GEN8_RING_PDP_UDW(engine, i));
 		intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_ring_emit(ringbuf, GEN8_RING_PDP_LDW(engine, i));
 		intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
 	}
 
@@ -1358,14 +1356,14 @@  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 	 * not idle). PML4 is allocated during ppgtt init so this is
 	 * not needed in 48-bit.*/
 	if (req->ctx->ppgtt &&
-	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+	    (intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
 		if (!USES_FULL_48BIT_PPGTT(req->i915)) {
 			ret = intel_logical_ring_emit_pdps(req);
 			if (ret)
 				return ret;
 		}
 
-		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine);
 	}
 
 	ret = intel_ring_begin(req, 4);
@@ -1575,21 +1573,21 @@  static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->ring, &so);
+	ret = i915_gem_render_state_prepare(req->engine, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
-				       I915_DISPATCH_SECURE);
+	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	ret = req->ring->emit_bb_start(req,
-				       (so.ggtt_offset + so.aux_batch_offset),
-				       I915_DISPATCH_SECURE);
+	ret = req->engine->emit_bb_start(req,
+					 (so.ggtt_offset + so.aux_batch_offset),
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index ac0a982bbf55..d79c9c0bbffb 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -138,21 +138,21 @@  static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
  *
  * Return: true if there are applicable MOCS settings for the device.
  */
-static bool get_mocs_settings(struct drm_device *dev,
+static bool get_mocs_settings(struct drm_i915_private *dev_priv,
 			      struct drm_i915_mocs_table *table)
 {
 	bool result = false;
 
-	if (IS_SKYLAKE(dev)) {
+	if (IS_SKYLAKE(dev_priv)) {
 		table->size  = ARRAY_SIZE(skylake_mocs_table);
 		table->table = skylake_mocs_table;
 		result = true;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		table->size  = ARRAY_SIZE(broxton_mocs_table);
 		table->table = broxton_mocs_table;
 		result = true;
 	} else {
-		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+		WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
 			  "Platform that should have a MOCS table does not.\n");
 	}
 
@@ -297,7 +297,7 @@  int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
 	struct drm_i915_mocs_table t;
 	int ret;
 
-	if (get_mocs_settings(req->ring->dev, &t)) {
+	if (get_mocs_settings(req->i915, &t)) {
 		/* Program the control registers */
 		ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9821c2a8074a..cc060588a287 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -216,7 +216,7 @@  static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
 	struct intel_ringbuffer *ring = req->ringbuf;
-	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -253,7 +253,7 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 {
 	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 flags = 0;
-	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -326,7 +326,7 @@  gen7_render_ring_flush(struct drm_i915_gem_request *req,
 {
 	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 flags = 0;
-	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -410,7 +410,7 @@  gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -720,7 +720,7 @@  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (WARN_ON_ONCE(w->count == 0))
 		return 0;
 
-	req->ring->gpu_caches_dirty = true;
+	req->engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -738,7 +738,7 @@  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	req->ring->gpu_caches_dirty = true;
+	req->engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1199,7 +1199,7 @@  static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 
 	for_each_ring(waiter, dev_priv, i) {
 		u32 seqno;
-		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
+		u64 gtt_offset = signaller_req->engine->semaphore.signal_ggtt[i];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
@@ -1239,7 +1239,7 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 
 	for_each_ring(waiter, dev_priv, i) {
 		u32 seqno;
-		u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
+		u64 gtt_offset = signaller_req->engine->semaphore.signal_ggtt[i];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
@@ -1276,7 +1276,7 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_ring(useless, dev_priv, i) {
-		u32 mbox_reg = signaller_req->ring->semaphore.mbox.signal[i];
+		u32 mbox_reg = signaller_req->engine->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
 			u32 seqno = i915_gem_request_get_seqno(signaller_req);
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1306,8 +1306,8 @@  gen6_add_request(struct drm_i915_gem_request *req)
 	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
-	if (req->ring->semaphore.signal)
-		ret = req->ring->semaphore.signal(req, 4);
+	if (req->engine->semaphore.signal)
+		ret = req->engine->semaphore.signal(req, 4);
 	else
 		ret = intel_ring_begin(req, 4);
 
@@ -1318,7 +1318,7 @@  gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(req->ring);
+	__intel_ring_advance(req->engine);
 
 	return 0;
 }
@@ -1356,10 +1356,10 @@  gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 				MI_SEMAPHORE_SAD_GTE_SDD);
 	intel_ring_emit(waiter, seqno);
 	intel_ring_emit(waiter,
-			lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+			lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->engine,
 						       signaller->id)));
 	intel_ring_emit(waiter,
-			upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+			upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->engine,
 						       signaller->id)));
 	intel_ring_advance(waiter);
 	return 0;
@@ -1374,7 +1374,7 @@  gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->ring->id];
+	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
 	int ret;
 
 	/* Throughout all of the GEM code, seqno passed implies our current
@@ -1419,7 +1419,7 @@  static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ringbuffer *ring = req->ringbuf;
-	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1437,7 +1437,7 @@  pc_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, req->engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -1456,10 +1456,10 @@  pc_render_add_request(struct drm_i915_gem_request *req)
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
-	intel_ring_emit(ring, req->ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, req->engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
-	__intel_ring_advance(req->ring);
+	__intel_ring_advance(req->engine);
 
 	return 0;
 }
@@ -1639,7 +1639,7 @@  i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(req->ring);
+	__intel_ring_advance(req->engine);
 
 	return 0;
 }
@@ -1801,7 +1801,7 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 unsigned dispatch_flags)
 {
 	struct intel_ringbuffer *ring = req->ringbuf;
-	u32 cs_offset = req->ring->scratch.gtt_offset;
+	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -2164,7 +2164,7 @@  int intel_ring_idle(struct intel_engine_cs *ring)
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-	request->ringbuf = request->ring->buffer;
+	request->ringbuf = request->engine->buffer;
 	return 0;
 }
 
@@ -2218,7 +2218,7 @@  void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 	unsigned space;
 	int ret;
@@ -2229,7 +2229,7 @@  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	/* The whole point of reserving space is to not wait! */
 	WARN_ON(ringbuf->reserved_in_use);
 
-	list_for_each_entry(target, &ring->request_list, list) {
+	list_for_each_entry(target, &engine->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
 		 * from multiple ringbuffers. Here, we must ignore any that
@@ -2245,7 +2245,7 @@  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 			break;
 	}
 
-	if (WARN_ON(&target->list == &ring->request_list))
+	if (WARN_ON(&target->list == &engine->request_list))
 		return -ENOSPC;
 
 	ret = i915_wait_request(target);
@@ -2931,40 +2931,40 @@  int intel_init_vebox_ring_buffer(struct drm_device *dev)
 int
 intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 int
 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }