diff mbox

[1/5] drm/i915: Rename local struct intel_engine_cs variables

Message ID 1458126040-33105-1-git-send-email-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tvrtko Ursulin March 16, 2016, 11 a.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Done by the Coccinelle script below plus a manual
intervention to GEN8_RING_SEMAPHORE_INIT.

@@
expression E;
@@
- struct intel_engine_cs *ring = E;
+ struct intel_engine_cs *engine = E;
<+...
- ring
+ engine
...+>
@@
@@
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
<+...
- ring
+ engine
...+>

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 203 ++++----
 drivers/gpu/drm/i915/i915_gem.c            | 136 ++---
 drivers/gpu/drm/i915/i915_gem_context.c    | 140 ++---
 drivers/gpu/drm/i915/i915_gem_debug.c      |  15 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  74 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  90 ++--
 drivers/gpu/drm/i915/i915_gpu_error.c      |  35 +-
 drivers/gpu/drm/i915/i915_guc_submission.c |  30 +-
 drivers/gpu/drm/i915/i915_irq.c            |  84 +--
 drivers/gpu/drm/i915/intel_display.c       | 106 ++--
 drivers/gpu/drm/i915/intel_guc_loader.c    |  12 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 216 ++++----
 drivers/gpu/drm/i915/intel_mocs.c          |   4 +-
 drivers/gpu/drm/i915/intel_overlay.c       |  60 +--
 drivers/gpu/drm/i915/intel_pm.c            |  36 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 785 +++++++++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  14 +-
 17 files changed, 1035 insertions(+), 1005 deletions(-)

Comments

Tvrtko Ursulin March 16, 2016, 3:35 p.m. UTC | #1
On 16/03/16 15:01, Patchwork wrote:
> == Series Details ==
>
> Series: series starting with [1/5] drm/i915: Rename local struct intel_engine_cs variables
> URL   : https://patchwork.freedesktop.org/series/4508/
> State : failure
>
> == Summary ==
>
> Series 4508v1 Series without cover letter
> http://patchwork.freedesktop.org/api/1.0/series/4508/revisions/1/mbox/
>
> Test drv_module_reload_basic:
>                  skip       -> PASS       (bdw-nuci7)
> Test kms_flip:
>          Subgroup basic-flip-vs-wf_vblank:
>                  pass       -> FAIL       (ivb-t430s)

Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94294

>          Subgroup basic-plain-flip:
>                  pass       -> DMESG-WARN (hsw-brixbox)

Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94349

> Test kms_frontbuffer_tracking:
>          Subgroup basic:
>                  dmesg-warn -> PASS       (hsw-brixbox)
> Test kms_pipe_crc_basic:
>          Subgroup nonblocking-crc-pipe-b-frame-sequence:
>                  pass       -> DMESG-WARN (hsw-brixbox)

Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94384

>                  dmesg-warn -> PASS       (bdw-ultra)
>          Subgroup read-crc-pipe-b:
>                  dmesg-warn -> PASS       (hsw-gt2)
>          Subgroup read-crc-pipe-b-frame-sequence:
>                  dmesg-warn -> PASS       (hsw-brixbox)
> Test pm_rpm:
>          Subgroup basic-pci-d3-state:
>                  fail       -> DMESG-FAIL (snb-x220t)

Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94349

>
> bdw-nuci7        total:194  pass:182  dwarn:0   dfail:0   fail:0   skip:12
> bdw-ultra        total:194  pass:173  dwarn:0   dfail:0   fail:0   skip:21
> byt-nuc          total:194  pass:155  dwarn:4   dfail:0   fail:0   skip:35
> hsw-brixbox      total:194  pass:170  dwarn:2   dfail:0   fail:0   skip:22
> hsw-gt2          total:194  pass:177  dwarn:0   dfail:0   fail:0   skip:17
> ivb-t430s        total:194  pass:168  dwarn:0   dfail:0   fail:1   skip:25
> skl-i5k-2        total:194  pass:171  dwarn:0   dfail:0   fail:0   skip:23
> skl-i7k-2        total:194  pass:171  dwarn:0   dfail:0   fail:0   skip:23
> skl-nuci5        total:194  pass:183  dwarn:0   dfail:0   fail:0   skip:11
> snb-x220t        total:194  pass:159  dwarn:1   dfail:1   fail:0   skip:33
>
> Results at /archive/results/CI_IGT_test/Patchwork_1616/
>
> a5c43f5d1b4968a370f54bdda5387ce213aca785 drm-intel-nightly: 2016y-03m-16d-14h-09m-03s UTC integration manifest
> 12b10c002d2107d9c67685b4b94008145fe5e4e4 drm/i915: More renaming of rings to engines
> 913a88b2b22fbc640542c5ce70128d6293063123 drm/i915: More intel_engine_cs renaming
> 216f2de3f6d87d725cc437dc0b73f38bd4831c9e drm/i915: Rename intel_engine_cs struct members
> 262439daabb6ce89701667f970b5179e5f487836 drm/i915: Rename intel_engine_cs function parameters
> 4734cd9fd06a323fb7fd8468eaef529bb6dba052 drm/i915: Rename local struct intel_engine_cs variables

Series merged, thanks for the patch :) and review!

Regards,

Tvrtko
Ville Syrjala March 16, 2016, 3:40 p.m. UTC | #2
On Wed, Mar 16, 2016 at 03:35:13PM +0000, Tvrtko Ursulin wrote:
> 
> On 16/03/16 15:01, Patchwork wrote:
> > == Series Details ==
> >
> > Series: series starting with [1/5] drm/i915: Rename local struct intel_engine_cs variables
> > URL   : https://patchwork.freedesktop.org/series/4508/
> > State : failure
> >
> > == Summary ==
> >
> > Series 4508v1 Series without cover letter
> > http://patchwork.freedesktop.org/api/1.0/series/4508/revisions/1/mbox/
> >
> > Test drv_module_reload_basic:
> >                  skip       -> PASS       (bdw-nuci7)
> > Test kms_flip:
> >          Subgroup basic-flip-vs-wf_vblank:
> >                  pass       -> FAIL       (ivb-t430s)
> 
> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94294

I'd like people to add a bit more information to these analysis mails.
If there's just the bugzilla link I actually have to open it before
I can see what the problem was. Makes it harder to keep on top what
the most common BAT problems are.

> 
> >          Subgroup basic-plain-flip:
> >                  pass       -> DMESG-WARN (hsw-brixbox)
> 
> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94349
> 
> > Test kms_frontbuffer_tracking:
> >          Subgroup basic:
> >                  dmesg-warn -> PASS       (hsw-brixbox)
> > Test kms_pipe_crc_basic:
> >          Subgroup nonblocking-crc-pipe-b-frame-sequence:
> >                  pass       -> DMESG-WARN (hsw-brixbox)
> 
> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94384
> 
> >                  dmesg-warn -> PASS       (bdw-ultra)
> >          Subgroup read-crc-pipe-b:
> >                  dmesg-warn -> PASS       (hsw-gt2)
> >          Subgroup read-crc-pipe-b-frame-sequence:
> >                  dmesg-warn -> PASS       (hsw-brixbox)
> > Test pm_rpm:
> >          Subgroup basic-pci-d3-state:
> >                  fail       -> DMESG-FAIL (snb-x220t)
> 
> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94349
> 
> >
> > bdw-nuci7        total:194  pass:182  dwarn:0   dfail:0   fail:0   skip:12
> > bdw-ultra        total:194  pass:173  dwarn:0   dfail:0   fail:0   skip:21
> > byt-nuc          total:194  pass:155  dwarn:4   dfail:0   fail:0   skip:35
> > hsw-brixbox      total:194  pass:170  dwarn:2   dfail:0   fail:0   skip:22
> > hsw-gt2          total:194  pass:177  dwarn:0   dfail:0   fail:0   skip:17
> > ivb-t430s        total:194  pass:168  dwarn:0   dfail:0   fail:1   skip:25
> > skl-i5k-2        total:194  pass:171  dwarn:0   dfail:0   fail:0   skip:23
> > skl-i7k-2        total:194  pass:171  dwarn:0   dfail:0   fail:0   skip:23
> > skl-nuci5        total:194  pass:183  dwarn:0   dfail:0   fail:0   skip:11
> > snb-x220t        total:194  pass:159  dwarn:1   dfail:1   fail:0   skip:33
> >
> > Results at /archive/results/CI_IGT_test/Patchwork_1616/
> >
> > a5c43f5d1b4968a370f54bdda5387ce213aca785 drm-intel-nightly: 2016y-03m-16d-14h-09m-03s UTC integration manifest
> > 12b10c002d2107d9c67685b4b94008145fe5e4e4 drm/i915: More renaming of rings to engines
> > 913a88b2b22fbc640542c5ce70128d6293063123 drm/i915: More intel_engine_cs renaming
> > 216f2de3f6d87d725cc437dc0b73f38bd4831c9e drm/i915: Rename intel_engine_cs struct members
> > 262439daabb6ce89701667f970b5179e5f487836 drm/i915: Rename intel_engine_cs function parameters
> > 4734cd9fd06a323fb7fd8468eaef529bb6dba052 drm/i915: Rename local struct intel_engine_cs variables
> 
> Series merged, thanks for the patch :) and review!
> 
> Regards,
> 
> Tvrtko
> 
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Tvrtko Ursulin March 16, 2016, 3:45 p.m. UTC | #3
On 16/03/16 15:40, Ville Syrjälä wrote:
> On Wed, Mar 16, 2016 at 03:35:13PM +0000, Tvrtko Ursulin wrote:
>>
>> On 16/03/16 15:01, Patchwork wrote:
>>> == Series Details ==
>>>
>>> Series: series starting with [1/5] drm/i915: Rename local struct intel_engine_cs variables
>>> URL   : https://patchwork.freedesktop.org/series/4508/
>>> State : failure
>>>
>>> == Summary ==
>>>
>>> Series 4508v1 Series without cover letter
>>> http://patchwork.freedesktop.org/api/1.0/series/4508/revisions/1/mbox/
>>>
>>> Test drv_module_reload_basic:
>>>                   skip       -> PASS       (bdw-nuci7)
>>> Test kms_flip:
>>>           Subgroup basic-flip-vs-wf_vblank:
>>>                   pass       -> FAIL       (ivb-t430s)
>>
>> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94294
>
> I'd like people to add a bit more information to these analysis mails.
> If there's just the bugzilla link I actually have to open it before
> I can see what the problem was. Makes it harder to keep on top what
> the most common BAT problems are.

Yeah in principle I agree but some of these are the usual suspects which 
have been dragging for so long it would take an iron will (or something 
nastier) to keep repeating the same analysis over and over again.

Sometimes I remember and add a link to new CI logs to the relevant BZ 
instance which is probably more useful for people looking at BAT marked 
entries (which are all critical anyway) to gather enough data points 
when debugging.

Regards,

Tvrtko
Daniel Vetter March 16, 2016, 3:56 p.m. UTC | #4
On Wed, Mar 16, 2016 at 4:45 PM, Tvrtko Ursulin
<tvrtko.ursulin@linux.intel.com> wrote:
> On 16/03/16 15:40, Ville Syrjälä wrote:
>>
>> On Wed, Mar 16, 2016 at 03:35:13PM +0000, Tvrtko Ursulin wrote:
>>>
>>>
>>> On 16/03/16 15:01, Patchwork wrote:
>>>>
>>>> == Series Details ==
>>>>
>>>> Series: series starting with [1/5] drm/i915: Rename local struct
>>>> intel_engine_cs variables
>>>> URL   : https://patchwork.freedesktop.org/series/4508/
>>>> State : failure
>>>>
>>>> == Summary ==
>>>>
>>>> Series 4508v1 Series without cover letter
>>>> http://patchwork.freedesktop.org/api/1.0/series/4508/revisions/1/mbox/
>>>>
>>>> Test drv_module_reload_basic:
>>>>                   skip       -> PASS       (bdw-nuci7)
>>>> Test kms_flip:
>>>>           Subgroup basic-flip-vs-wf_vblank:
>>>>                   pass       -> FAIL       (ivb-t430s)
>>>
>>>
>>> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94294
>>
>>
>> I'd like people to add a bit more information to these analysis mails.
>> If there's just the bugzilla link I actually have to open it before
>> I can see what the problem was. Makes it harder to keep on top what
>> the most common BAT problems are.
>
>
> Yeah in principle I agree but some of these are the usual suspects which
> have been dragging for so long it would take an iron will (or something
> nastier) to keep repeating the same analysis over and over again.

Someone is asking for a really nasty maintainer to provide motivation?

More seriously&honestly I'm not entirely sure how we can get out of
this "CI seems constantly broken" place :(
-Daniel
Ville Syrjala March 16, 2016, 4:14 p.m. UTC | #5
On Wed, Mar 16, 2016 at 03:45:08PM +0000, Tvrtko Ursulin wrote:
> 
> On 16/03/16 15:40, Ville Syrjälä wrote:
> > On Wed, Mar 16, 2016 at 03:35:13PM +0000, Tvrtko Ursulin wrote:
> >>
> >> On 16/03/16 15:01, Patchwork wrote:
> >>> == Series Details ==
> >>>
> >>> Series: series starting with [1/5] drm/i915: Rename local struct intel_engine_cs variables
> >>> URL   : https://patchwork.freedesktop.org/series/4508/
> >>> State : failure
> >>>
> >>> == Summary ==
> >>>
> >>> Series 4508v1 Series without cover letter
> >>> http://patchwork.freedesktop.org/api/1.0/series/4508/revisions/1/mbox/
> >>>
> >>> Test drv_module_reload_basic:
> >>>                   skip       -> PASS       (bdw-nuci7)
> >>> Test kms_flip:
> >>>           Subgroup basic-flip-vs-wf_vblank:
> >>>                   pass       -> FAIL       (ivb-t430s)
> >>
> >> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94294
> >
> > I'd like people to add a bit more information to these analysis mails.
> > If there's just the bugzilla link I actually have to open it before
> > I can see what the problem was. Makes it harder to keep on top what
> > the most common BAT problems are.
> 
> Yeah in principle I agree but some of these are the usual suspects which 
> have been dragging for so long it would take an iron will (or something 
> nastier) to keep repeating the same analysis over and over again.

Just copy pasting a few relevant lines is enough. You already had to
read those lines anyway to look up the bug. You may know it's the same
thing all over again, but no one else will unless they memorized the
bugzilla ids or click on every link in these mails. So I think just
putting up the links without further information is masking the scope of
the problem, and thus making it less likely that someone will step up
and go fix things.

> 
> Sometimes I remember and add a link to new CI logs to the relevant BZ 
> instance which is probably more useful for people looking at BAT marked 
> entries (which are all critical anyway) to gather enough data points 
> when debugging.
> 
> Regards,
> 
> Tvrtko
Tvrtko Ursulin March 16, 2016, 4:40 p.m. UTC | #6
On 16/03/16 15:56, Daniel Vetter wrote:
> On Wed, Mar 16, 2016 at 4:45 PM, Tvrtko Ursulin
> <tvrtko.ursulin@linux.intel.com> wrote:
>> On 16/03/16 15:40, Ville Syrjälä wrote:
>>>
>>> On Wed, Mar 16, 2016 at 03:35:13PM +0000, Tvrtko Ursulin wrote:
>>>>
>>>>
>>>> On 16/03/16 15:01, Patchwork wrote:
>>>>>
>>>>> == Series Details ==
>>>>>
>>>>> Series: series starting with [1/5] drm/i915: Rename local struct
>>>>> intel_engine_cs variables
>>>>> URL   : https://patchwork.freedesktop.org/series/4508/
>>>>> State : failure
>>>>>
>>>>> == Summary ==
>>>>>
>>>>> Series 4508v1 Series without cover letter
>>>>> http://patchwork.freedesktop.org/api/1.0/series/4508/revisions/1/mbox/
>>>>>
>>>>> Test drv_module_reload_basic:
>>>>>                    skip       -> PASS       (bdw-nuci7)
>>>>> Test kms_flip:
>>>>>            Subgroup basic-flip-vs-wf_vblank:
>>>>>                    pass       -> FAIL       (ivb-t430s)
>>>>
>>>>
>>>> Unrelated https://bugs.freedesktop.org/show_bug.cgi?id=94294
>>>
>>>
>>> I'd like people to add a bit more information to these analysis mails.
>>> If there's just the bugzilla link I actually have to open it before
>>> I can see what the problem was. Makes it harder to keep on top what
>>> the most common BAT problems are.
>>
>>
>> Yeah in principle I agree but some of these are the usual suspects which
>> have been dragging for so long it would take an iron will (or something
>> nastier) to keep repeating the same analysis over and over again.
>
> Someone is asking for a really nasty maintainer to provide motivation?
>
> More seriously&honestly I'm not entirely sure how we can get out of
> this "CI seems constantly broken" place :(

I've noticed people are working on some of those constant naggers so 
perhaps there is light at the end of the tunnel.

Key is having the right hardware, and IMHO, assigning people to critical 
BAT tagged Bugzilla's. There is plenty of those, with more or less links 
to CI failure example, so it looks to me there is certainly enough 
material already.

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 15aacd0ee66f..5037ccb18e77 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -129,7 +129,7 @@  static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
 	int pin_count = 0;
 	int i;
@@ -143,7 +143,7 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain);
-	for_each_ring(ring, dev_priv, i)
+	for_each_ring(engine, dev_priv, i)
 		seq_printf(m, "%x ",
 				i915_gem_request_get_seqno(obj->last_read_req[i]));
 	seq_printf(m, "] %x %x%s%s%s",
@@ -397,15 +397,15 @@  static void print_batch_pool_stats(struct seq_file *m,
 {
 	struct drm_i915_gem_object *obj;
 	struct file_stats stats;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i, j;
 
 	memset(&stats, 0, sizeof(stats));
 
-	for_each_ring(ring, dev_priv, i) {
-		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+	for_each_ring(engine, dev_priv, i) {
+		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 			list_for_each_entry(obj,
-					    &ring->batch_pool.cache_list[j],
+					    &engine->batch_pool.cache_list[j],
 					    batch_pool_link)
 				per_file_stats(0, obj, &stats);
 		}
@@ -591,14 +591,13 @@  static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 					   pipe, plane);
 			}
 			if (work->flip_queued_req) {
-				struct intel_engine_cs *ring =
-					i915_gem_request_get_ring(work->flip_queued_req);
+				struct intel_engine_cs *engine = i915_gem_request_get_ring(work->flip_queued_req);
 
 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
-					   ring->name,
+					   engine->name,
 					   i915_gem_request_get_seqno(work->flip_queued_req),
 					   dev_priv->next_seqno,
-					   ring->get_seqno(ring, true),
+					   engine->get_seqno(engine, true),
 					   i915_gem_request_completed(work->flip_queued_req, true));
 			} else
 				seq_printf(m, "Flip not associated with any ring\n");
@@ -637,7 +636,7 @@  static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int total = 0;
 	int ret, i, j;
 
@@ -645,20 +644,20 @@  static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	for_each_ring(ring, dev_priv, i) {
-		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+	for_each_ring(engine, dev_priv, i) {
+		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 			int count;
 
 			count = 0;
 			list_for_each_entry(obj,
-					    &ring->batch_pool.cache_list[j],
+					    &engine->batch_pool.cache_list[j],
 					    batch_pool_link)
 				count++;
 			seq_printf(m, "%s cache[%d]: %d objects\n",
-				   ring->name, j, count);
+				   engine->name, j, count);
 
 			list_for_each_entry(obj,
-					    &ring->batch_pool.cache_list[j],
+					    &engine->batch_pool.cache_list[j],
 					    batch_pool_link) {
 				seq_puts(m, "   ");
 				describe_obj(m, obj);
@@ -681,7 +680,7 @@  static int i915_gem_request_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_gem_request *req;
 	int ret, any, i;
 
@@ -690,17 +689,17 @@  static int i915_gem_request_info(struct seq_file *m, void *data)
 		return ret;
 
 	any = 0;
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		int count;
 
 		count = 0;
-		list_for_each_entry(req, &ring->request_list, list)
+		list_for_each_entry(req, &engine->request_list, list)
 			count++;
 		if (count == 0)
 			continue;
 
-		seq_printf(m, "%s requests: %d\n", ring->name, count);
-		list_for_each_entry(req, &ring->request_list, list) {
+		seq_printf(m, "%s requests: %d\n", engine->name, count);
+		list_for_each_entry(req, &engine->request_list, list) {
 			struct task_struct *task;
 
 			rcu_read_lock();
@@ -739,7 +738,7 @@  static int i915_gem_seqno_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -747,8 +746,8 @@  static int i915_gem_seqno_info(struct seq_file *m, void *data)
 		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, i)
-		i915_ring_seqno_info(m, ring);
+	for_each_ring(engine, dev_priv, i)
+		i915_ring_seqno_info(m, engine);
 
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -762,7 +761,7 @@  static int i915_interrupt_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i, pipe;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -934,13 +933,13 @@  static int i915_interrupt_info(struct seq_file *m, void *data)
 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 			   I915_READ(GTIMR));
 	}
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
-				   ring->name, I915_READ_IMR(ring));
+				   engine->name, I915_READ_IMR(engine));
 		}
-		i915_ring_seqno_info(m, ring);
+		i915_ring_seqno_info(m, engine);
 	}
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -981,12 +980,12 @@  static int i915_hws_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	const u32 *hws;
 	int i;
 
-	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-	hws = ring->status_page.page_addr;
+	engine = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	hws = engine->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -1331,7 +1330,7 @@  static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u64 acthd[I915_NUM_RINGS];
 	u32 seqno[I915_NUM_RINGS];
 	u32 instdone[I915_NUM_INSTDONE_REG];
@@ -1344,9 +1343,9 @@  static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, i) {
-		seqno[i] = ring->get_seqno(ring, false);
-		acthd[i] = intel_ring_get_active_head(ring);
+	for_each_ring(engine, dev_priv, i) {
+		seqno[i] = engine->get_seqno(engine, false);
+		acthd[i] = intel_ring_get_active_head(engine);
 	}
 
 	i915_get_extra_instdone(dev, instdone);
@@ -1360,17 +1359,17 @@  static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	} else
 		seq_printf(m, "Hangcheck inactive\n");
 
-	for_each_ring(ring, dev_priv, i) {
-		seq_printf(m, "%s:\n", ring->name);
+	for_each_ring(engine, dev_priv, i) {
+		seq_printf(m, "%s:\n", engine->name);
 		seq_printf(m, "\tseqno = %x [current %x]\n",
-			   ring->hangcheck.seqno, seqno[i]);
+			   engine->hangcheck.seqno, seqno[i]);
 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
-			   (long long)ring->hangcheck.acthd,
+			   (long long)engine->hangcheck.acthd,
 			   (long long)acthd[i]);
-		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
-		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+		seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
+		seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
 
-		if (ring->id == RCS) {
+		if (engine->id == RCS) {
 			seq_puts(m, "\tinstdone read =");
 
 			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
@@ -1380,7 +1379,7 @@  static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
 				seq_printf(m, " 0x%08x",
-					   ring->hangcheck.instdone[j]);
+					   engine->hangcheck.instdone[j]);
 
 			seq_puts(m, "\n");
 		}
@@ -1946,7 +1945,7 @@  static int i915_context_status(struct seq_file *m, void *unused)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	int ret, i;
 
@@ -1966,13 +1965,13 @@  static int i915_context_status(struct seq_file *m, void *unused)
 
 		if (i915.enable_execlists) {
 			seq_putc(m, '\n');
-			for_each_ring(ring, dev_priv, i) {
+			for_each_ring(engine, dev_priv, i) {
 				struct drm_i915_gem_object *ctx_obj =
 					ctx->engine[i].state;
 				struct intel_ringbuffer *ringbuf =
 					ctx->engine[i].ringbuf;
 
-				seq_printf(m, "%s: ", ring->name);
+				seq_printf(m, "%s: ", engine->name);
 				if (ctx_obj)
 					describe_obj(m, ctx_obj);
 				if (ringbuf)
@@ -2041,7 +2040,7 @@  static int i915_dump_lrc(struct seq_file *m, void *unused)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	int ret, i;
 
@@ -2056,8 +2055,8 @@  static int i915_dump_lrc(struct seq_file *m, void *unused)
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link)
 		if (ctx != dev_priv->kernel_context)
-			for_each_ring(ring, dev_priv, i)
-				i915_dump_lrc_obj(m, ctx, ring);
+			for_each_ring(engine, dev_priv, i)
+				i915_dump_lrc_obj(m, ctx, engine);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -2069,7 +2068,7 @@  static int i915_execlists(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 status_pointer;
 	u8 read_pointer;
 	u8 write_pointer;
@@ -2090,22 +2089,22 @@  static int i915_execlists(struct seq_file *m, void *data)
 
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, ring_id) {
+	for_each_ring(engine, dev_priv, ring_id) {
 		struct drm_i915_gem_request *head_req = NULL;
 		int count = 0;
 		unsigned long flags;
 
-		seq_printf(m, "%s\n", ring->name);
+		seq_printf(m, "%s\n", engine->name);
 
-		status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
-		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
+		status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
 			   status, ctx_id);
 
-		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-		read_pointer = ring->next_context_status_buffer;
+		read_pointer = engine->next_context_status_buffer;
 		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 		if (read_pointer > write_pointer)
 			write_pointer += GEN8_CSB_ENTRIES;
@@ -2113,24 +2112,25 @@  static int i915_execlists(struct seq_file *m, void *data)
 			   read_pointer, write_pointer);
 
 		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
-			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
-			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
+			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
+			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
 
 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
 				   i, status, ctx_id);
 		}
 
-		spin_lock_irqsave(&ring->execlist_lock, flags);
-		list_for_each(cursor, &ring->execlist_queue)
+		spin_lock_irqsave(&engine->execlist_lock, flags);
+		list_for_each(cursor, &engine->execlist_queue)
 			count++;
-		head_req = list_first_entry_or_null(&ring->execlist_queue,
-				struct drm_i915_gem_request, execlist_link);
-		spin_unlock_irqrestore(&ring->execlist_lock, flags);
+		head_req = list_first_entry_or_null(&engine->execlist_queue,
+						    struct drm_i915_gem_request,
+						    execlist_link);
+		spin_unlock_irqrestore(&engine->execlist_lock, flags);
 
 		seq_printf(m, "\t%d requests in queue\n", count);
 		if (head_req) {
 			seq_printf(m, "\tHead request id: %u\n",
-				   intel_execlists_ctx_id(head_req->ctx, ring));
+				   intel_execlists_ctx_id(head_req->ctx, engine));
 			seq_printf(m, "\tHead request tail: %u\n",
 				   head_req->tail);
 		}
@@ -2246,19 +2246,19 @@  static int per_file_ctx(int id, void *ptr, void *data)
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 	int unused, i;
 
 	if (!ppgtt)
 		return;
 
-	for_each_ring(ring, dev_priv, unused) {
-		seq_printf(m, "%s\n", ring->name);
+	for_each_ring(engine, dev_priv, unused) {
+		seq_printf(m, "%s\n", engine->name);
 		for (i = 0; i < 4; i++) {
-			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
+			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
 			pdp <<= 32;
-			pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
+			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
 		}
 	}
@@ -2267,19 +2267,23 @@  static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for_each_ring(ring, dev_priv, i) {
-		seq_printf(m, "%s\n", ring->name);
+	for_each_ring(engine, dev_priv, i) {
+		seq_printf(m, "%s\n", engine->name);
 		if (INTEL_INFO(dev)->gen == 7)
-			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
-		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
-		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
-		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+			seq_printf(m, "GFX_MODE: 0x%08x\n",
+				   I915_READ(RING_MODE_GEN7(engine)));
+		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_BASE(engine)));
+		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
+		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
 	if (dev_priv->mm.aliasing_ppgtt) {
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2334,12 +2338,12 @@  out_put:
 
 static int count_irq_waiters(struct drm_i915_private *i915)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int count = 0;
 	int i;
 
-	for_each_ring(ring, i915, i)
-		count += ring->irq_refcount;
+	for_each_ring(engine, i915, i)
+		count += engine->irq_refcount;
 
 	return count;
 }
@@ -2447,7 +2451,7 @@  static void i915_guc_client_info(struct seq_file *m,
 				 struct drm_i915_private *dev_priv,
 				 struct i915_guc_client *client)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint64_t tot = 0;
 	uint32_t i;
 
@@ -2462,11 +2466,11 @@  static void i915_guc_client_info(struct seq_file *m,
 	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
 	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		seq_printf(m, "\tSubmissions: %llu %s\n",
-				client->submissions[ring->guc_id],
-				ring->name);
-		tot += client->submissions[ring->guc_id];
+				client->submissions[engine->guc_id],
+				engine->name);
+		tot += client->submissions[engine->guc_id];
 	}
 	seq_printf(m, "\tTotal: %llu\n", tot);
 }
@@ -2478,7 +2482,7 @@  static int i915_guc_info(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc guc;
 	struct i915_guc_client client = {};
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	enum intel_ring_id i;
 	u64 total = 0;
 
@@ -2502,11 +2506,11 @@  static int i915_guc_info(struct seq_file *m, void *data)
 	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
 
 	seq_printf(m, "\nGuC submissions:\n");
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-			ring->name, guc.submissions[ring->guc_id],
-			guc.last_seqno[ring->guc_id]);
-		total += guc.submissions[ring->guc_id];
+			engine->name, guc.submissions[engine->guc_id],
+			guc.last_seqno[engine->guc_id]);
+		total += guc.submissions[engine->guc_id];
 	}
 	seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -3128,7 +3132,7 @@  static int i915_semaphore_status(struct seq_file *m, void *unused)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
 	int i, j, ret;
 
@@ -3149,10 +3153,10 @@  static int i915_semaphore_status(struct seq_file *m, void *unused)
 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
 
 		seqno = (uint64_t *)kmap_atomic(page);
-		for_each_ring(ring, dev_priv, i) {
+		for_each_ring(engine, dev_priv, i) {
 			uint64_t offset;
 
-			seq_printf(m, "%s\n", ring->name);
+			seq_printf(m, "%s\n", engine->name);
 
 			seq_puts(m, "  Last signal:");
 			for (j = 0; j < num_rings; j++) {
@@ -3174,17 +3178,18 @@  static int i915_semaphore_status(struct seq_file *m, void *unused)
 		kunmap_atomic(seqno);
 	} else {
 		seq_puts(m, "  Last signal:");
-		for_each_ring(ring, dev_priv, i)
+		for_each_ring(engine, dev_priv, i)
 			for (j = 0; j < num_rings; j++)
 				seq_printf(m, "0x%08x\n",
-					   I915_READ(ring->semaphore.mbox.signal[j]));
+					   I915_READ(engine->semaphore.mbox.signal[j]));
 		seq_putc(m, '\n');
 	}
 
 	seq_puts(m, "\nSync seqno:\n");
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		for (j = 0; j < num_rings; j++) {
-			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
+			seq_printf(m, "  0x%08x ",
+				   engine->semaphore.sync_seqno[j]);
 		}
 		seq_putc(m, '\n');
 	}
@@ -3226,7 +3231,7 @@  static int i915_wa_registers(struct seq_file *m, void *unused)
 {
 	int i;
 	int ret;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3239,9 +3244,9 @@  static int i915_wa_registers(struct seq_file *m, void *unused)
 	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-	for_each_ring(ring, dev_priv, i)
+	for_each_ring(engine, dev_priv, i)
 		seq_printf(m, "HW whitelist count for %s: %d\n",
-			   ring->name, workarounds->hw_whitelist_count[i]);
+			   engine->name, workarounds->hw_whitelist_count[i]);
 	for (i = 0; i < workarounds->count; ++i) {
 		i915_reg_t addr;
 		u32 mask, value, read;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b854af2c4141..5a7f6032f066 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1243,11 +1243,11 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 			s64 *timeout,
 			struct intel_rps_client *rps)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const bool irq_test_in_progress =
-		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine);
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
@@ -1288,7 +1288,7 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 	if (ret == 0)
 		goto out;
 
-	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -1296,7 +1296,7 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 	for (;;) {
 		struct timer_list timer;
 
-		prepare_to_wait(&ring->irq_queue, &wait, state);
+		prepare_to_wait(&engine->irq_queue, &wait, state);
 
 		/* We need to check whether any gpu reset happened in between
 		 * the caller grabbing the seqno and now ... */
@@ -1325,11 +1325,11 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 		}
 
 		timer.function = NULL;
-		if (timeout || missed_irq(dev_priv, ring)) {
+		if (timeout || missed_irq(dev_priv, engine)) {
 			unsigned long expire;
 
 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
 			mod_timer(&timer, expire);
 		}
 
@@ -1341,9 +1341,9 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 		}
 	}
 	if (!irq_test_in_progress)
-		ring->irq_put(ring);
+		engine->irq_put(engine);
 
-	finish_wait(&ring->irq_queue, &wait);
+	finish_wait(&engine->irq_queue, &wait);
 
 out:
 	trace_i915_gem_request_wait_end(req);
@@ -2404,17 +2404,17 @@  void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 
-	ring = i915_gem_request_get_ring(req);
+	engine = i915_gem_request_get_ring(req);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
 		drm_gem_object_reference(&obj->base);
-	obj->active |= intel_ring_flag(ring);
+	obj->active |= intel_ring_flag(engine);
 
-	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+	list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
+	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
 
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
@@ -2467,23 +2467,23 @@  static int
 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i, j;
 
 	/* Carefully retire all requests without writing to the rings */
-	for_each_ring(ring, dev_priv, i) {
-		ret = intel_ring_idle(ring);
+	for_each_ring(engine, dev_priv, i) {
+		ret = intel_ring_idle(engine);
 		if (ret)
 			return ret;
 	}
 	i915_gem_retire_requests(dev);
 
 	/* Finally reset hw state */
-	for_each_ring(ring, dev_priv, i) {
-		intel_ring_init_seqno(ring, seqno);
+	for_each_ring(engine, dev_priv, i) {
+		intel_ring_init_seqno(engine, seqno);
 
-		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
-			ring->semaphore.sync_seqno[j] = 0;
+		for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
+			engine->semaphore.sync_seqno[j] = 0;
 	}
 
 	return 0;
@@ -2542,7 +2542,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 			struct drm_i915_gem_object *obj,
 			bool flush_caches)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
@@ -2551,8 +2551,8 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	if (WARN_ON(request == NULL))
 		return;
 
-	ring = request->ring;
-	dev_priv = ring->dev->dev_private;
+	engine = request->ring;
+	dev_priv = engine->dev->dev_private;
 	ringbuf = request->ringbuf;
 
 	/*
@@ -2587,9 +2587,9 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	request->postfix = intel_ring_get_tail(ringbuf);
 
 	if (i915.enable_execlists)
-		ret = ring->emit_request(request);
+		ret = engine->emit_request(request);
 	else {
-		ret = ring->add_request(request);
+		ret = engine->add_request(request);
 
 		request->tail = intel_ring_get_tail(ringbuf);
 	}
@@ -2607,13 +2607,13 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	request->batch_obj = obj;
 
 	request->emitted_jiffies = jiffies;
-	request->previous_seqno = ring->last_submitted_seqno;
-	ring->last_submitted_seqno = request->seqno;
-	list_add_tail(&request->list, &ring->request_list);
+	request->previous_seqno = engine->last_submitted_seqno;
+	engine->last_submitted_seqno = request->seqno;
+	list_add_tail(&request->list, &engine->request_list);
 
 	trace_i915_gem_request_add(request);
 
-	i915_queue_hangcheck(ring->dev);
+	i915_queue_hangcheck(engine->dev);
 
 	queue_delayed_work(dev_priv->wq,
 			   &dev_priv->mm.retire_work,
@@ -2885,7 +2885,7 @@  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	/*
@@ -2893,11 +2893,11 @@  void i915_gem_reset(struct drm_device *dev)
 	 * them for finding the guilty party. As the requests only borrow
 	 * their reference to the objects, the inspection must be done first.
 	 */
-	for_each_ring(ring, dev_priv, i)
-		i915_gem_reset_ring_status(dev_priv, ring);
+	for_each_ring(engine, dev_priv, i)
+		i915_gem_reset_ring_status(dev_priv, engine);
 
-	for_each_ring(ring, dev_priv, i)
-		i915_gem_reset_ring_cleanup(dev_priv, ring);
+	for_each_ring(engine, dev_priv, i)
+		i915_gem_reset_ring_cleanup(dev_priv, engine);
 
 	i915_gem_context_reset(dev);
 
@@ -2962,19 +2962,19 @@  bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool idle = true;
 	int i;
 
-	for_each_ring(ring, dev_priv, i) {
-		i915_gem_retire_requests_ring(ring);
-		idle &= list_empty(&ring->request_list);
+	for_each_ring(engine, dev_priv, i) {
+		i915_gem_retire_requests_ring(engine);
+		idle &= list_empty(&engine->request_list);
 		if (i915.enable_execlists) {
-			spin_lock_irq(&ring->execlist_lock);
-			idle &= list_empty(&ring->execlist_queue);
-			spin_unlock_irq(&ring->execlist_lock);
+			spin_lock_irq(&engine->execlist_lock);
+			idle &= list_empty(&engine->execlist_queue);
+			spin_unlock_irq(&engine->execlist_lock);
 
-			intel_execlists_retire_requests(ring);
+			intel_execlists_retire_requests(engine);
 		}
 	}
 
@@ -3025,11 +3025,11 @@  i915_gem_idle_work_handler(struct work_struct *work)
 	intel_mark_idle(dev);
 
 	if (mutex_trylock(&dev->struct_mutex)) {
-		struct intel_engine_cs *ring;
+		struct intel_engine_cs *engine;
 		int i;
 
-		for_each_ring(ring, dev_priv, i)
-			i915_gem_batch_pool_fini(&ring->batch_pool);
+		for_each_ring(engine, dev_priv, i)
+			i915_gem_batch_pool_fini(&engine->batch_pool);
 
 		mutex_unlock(&dev->struct_mutex);
 	}
@@ -3391,15 +3391,15 @@  int __i915_vma_unbind_no_wait(struct i915_vma *vma)
 int i915_gpu_idle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		if (!i915.enable_execlists) {
 			struct drm_i915_gem_request *req;
 
-			req = i915_gem_request_alloc(ring, NULL);
+			req = i915_gem_request_alloc(engine, NULL);
 			if (IS_ERR(req))
 				return PTR_ERR(req);
 
@@ -3412,7 +3412,7 @@  int i915_gpu_idle(struct drm_device *dev)
 			i915_add_request_no_flush(req);
 		}
 
-		ret = intel_ring_idle(ring);
+		ret = intel_ring_idle(engine);
 		if (ret)
 			return ret;
 	}
@@ -4656,11 +4656,11 @@  static void
 i915_gem_stop_ringbuffers(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		dev_priv->gt.stop_ring(ring);
+	for_each_ring(engine, dev_priv, i)
+		dev_priv->gt.stop_ring(engine);
 }
 
 int
@@ -4697,8 +4697,8 @@  err:
 
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
 	int i, ret;
@@ -4716,12 +4716,12 @@  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 	 * at initialization time.
 	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+		intel_ring_emit(engine, remap_info[i]);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return ret;
 }
@@ -4829,7 +4829,7 @@  int
 i915_gem_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i, j;
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4876,8 +4876,8 @@  i915_gem_init_hw(struct drm_device *dev)
 	}
 
 	/* Need to do basic initialisation of all rings first: */
-	for_each_ring(ring, dev_priv, i) {
-		ret = ring->init_hw(ring);
+	for_each_ring(engine, dev_priv, i) {
+		ret = engine->init_hw(engine);
 		if (ret)
 			goto out;
 	}
@@ -4901,17 +4901,17 @@  i915_gem_init_hw(struct drm_device *dev)
 		goto out;
 
 	/* Now it is safe to go back round and do everything else: */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_request_alloc(ring, NULL);
+		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req)) {
 			ret = PTR_ERR(req);
 			i915_gem_cleanup_ringbuffer(dev);
 			goto out;
 		}
 
-		if (ring->id == RCS) {
+		if (engine->id == RCS) {
 			for (j = 0; j < NUM_L3_SLICES(dev); j++)
 				i915_gem_l3_remap(req, j);
 		}
@@ -5006,11 +5006,11 @@  void
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		dev_priv->gt.cleanup_ring(ring);
+	for_each_ring(engine, dev_priv, i)
+		dev_priv->gt.cleanup_ring(engine);
 
     if (i915.enable_execlists)
             /*
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5dd84e148bba..cc07666c2d91 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -346,11 +346,11 @@  void i915_gem_context_reset(struct drm_device *dev)
 	}
 
 	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+		struct intel_engine_cs *engine = &dev_priv->ring[i];
 
-		if (ring->last_context) {
-			i915_gem_context_unpin(ring->last_context, ring);
-			ring->last_context = NULL;
+		if (engine->last_context) {
+			i915_gem_context_unpin(engine->last_context, engine);
+			engine->last_context = NULL;
 		}
 	}
 
@@ -427,11 +427,11 @@  void i915_gem_context_fini(struct drm_device *dev)
 	}
 
 	for (i = I915_NUM_RINGS; --i >= 0;) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+		struct intel_engine_cs *engine = &dev_priv->ring[i];
 
-		if (ring->last_context) {
-			i915_gem_context_unpin(ring->last_context, ring);
-			ring->last_context = NULL;
+		if (engine->last_context) {
+			i915_gem_context_unpin(engine->last_context, engine);
+			engine->last_context = NULL;
 		}
 	}
 
@@ -441,14 +441,14 @@  void i915_gem_context_fini(struct drm_device *dev)
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	if (i915.enable_execlists) {
-		if (ring->init_context == NULL)
+		if (engine->init_context == NULL)
 			return 0;
 
-		ret = ring->init_context(req);
+		ret = engine->init_context(req);
 	} else
 		ret = i915_switch_context(req);
 
@@ -510,12 +510,12 @@  i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
-		i915_semaphore_is_enabled(ring->dev) ?
-		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+		i915_semaphore_is_enabled(engine->dev) ?
+		hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
 		0;
 	int len, i, ret;
 
@@ -524,21 +524,21 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * explicitly, so we rely on the value at ring init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(ring->dev)) {
-		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+	if (IS_GEN6(engine->dev)) {
+		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+	if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-	else if (INTEL_INFO(ring->dev)->gen < 8)
+	else if (INTEL_INFO(engine->dev)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
 	len = 4;
-	if (INTEL_INFO(ring->dev)->gen >= 7)
+	if (INTEL_INFO(engine->dev)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
 	ret = intel_ring_begin(req, len);
@@ -546,49 +546,56 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+	if (INTEL_INFO(engine->dev)->gen >= 7) {
+		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
-				if (signaller == ring)
+			intel_ring_emit(engine,
+					MI_LOAD_REGISTER_IMM(num_rings));
+			for_each_ring(signaller, to_i915(engine->dev), i) {
+				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				intel_ring_emit_reg(engine,
+						    RING_PSMI_CTL(signaller->mmio_base));
+				intel_ring_emit(engine,
+						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_SET_CONTEXT);
+	intel_ring_emit(engine,
+			i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, MI_NOOP);
 
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
+	if (INTEL_INFO(engine->dev)->gen >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
-				if (signaller == ring)
+			intel_ring_emit(engine,
+					MI_LOAD_REGISTER_IMM(num_rings));
+			for_each_ring(signaller, to_i915(engine->dev), i) {
+				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				intel_ring_emit_reg(engine,
+						    RING_PSMI_CTL(signaller->mmio_base));
+				intel_ring_emit(engine,
+						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return ret;
 }
@@ -648,25 +655,26 @@  needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
 static int do_switch(struct drm_i915_gem_request *req)
 {
 	struct intel_context *to = req->ctx;
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct intel_context *from = ring->last_context;
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct intel_context *from = engine->last_context;
 	u32 hw_flags = 0;
 	bool uninitialized = false;
 	int ret, i;
 
-	if (from != NULL && ring == &dev_priv->ring[RCS]) {
+	if (from != NULL && engine == &dev_priv->ring[RCS]) {
 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
 	}
 
-	if (should_skip_switch(ring, from, to))
+	if (should_skip_switch(engine, from, to))
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	if (ring == &dev_priv->ring[RCS]) {
+	if (engine == &dev_priv->ring[RCS]) {
 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-					    get_context_alignment(ring->dev), 0);
+					    get_context_alignment(engine->dev),
+					    0);
 		if (ret)
 			return ret;
 	}
@@ -676,23 +684,23 @@  static int do_switch(struct drm_i915_gem_request *req)
 	 * evict_everything - as a last ditch gtt defrag effort that also
 	 * switches to the default context. Hence we need to reload from here.
 	 */
-	from = ring->last_context;
+	from = engine->last_context;
 
-	if (needs_pd_load_pre(ring, to)) {
+	if (needs_pd_load_pre(engine, to)) {
 		/* Older GENs and non render rings still want the load first,
 		 * "PP_DCLV followed by PP_DIR_BASE register through Load
 		 * Register Immediate commands in Ring Buffer before submitting
 		 * a context."*/
-		trace_switch_mm(ring, to);
+		trace_switch_mm(engine, to);
 		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		if (ret)
 			goto unpin_out;
 
 		/* Doing a PD load always reloads the page dirs */
-		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
 	}
 
-	if (ring != &dev_priv->ring[RCS]) {
+	if (engine != &dev_priv->ring[RCS]) {
 		if (from)
 			i915_gem_context_unreference(from);
 		goto done;
@@ -717,14 +725,14 @@  static int do_switch(struct drm_i915_gem_request *req)
 		 * space. This means we must enforce that a page table load
 		 * occur when this occurs. */
 	} else if (to->ppgtt &&
-		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
+		   (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
 		hw_flags |= MI_FORCE_RESTORE;
-		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
 	}
 
 	/* We should never emit switch_mm more than once */
-	WARN_ON(needs_pd_load_pre(ring, to) &&
-		needs_pd_load_post(ring, to, hw_flags));
+	WARN_ON(needs_pd_load_pre(engine, to) &&
+		needs_pd_load_post(engine, to, hw_flags));
 
 	ret = mi_set_context(req, hw_flags);
 	if (ret)
@@ -733,8 +741,8 @@  static int do_switch(struct drm_i915_gem_request *req)
 	/* GEN8 does *not* require an explicit reload if the PDPs have been
 	 * setup, and we do not wish to move them.
 	 */
-	if (needs_pd_load_post(ring, to, hw_flags)) {
-		trace_switch_mm(ring, to);
+	if (needs_pd_load_post(engine, to, hw_flags)) {
+		trace_switch_mm(engine, to);
 		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		/* The hardware context switch is emitted, but we haven't
 		 * actually changed the state - so it's probably safe to bail
@@ -787,11 +795,11 @@  static int do_switch(struct drm_i915_gem_request *req)
 
 done:
 	i915_gem_context_reference(to);
-	ring->last_context = to;
+	engine->last_context = to;
 
 	if (uninitialized) {
-		if (ring->init_context) {
-			ret = ring->init_context(req);
+		if (engine->init_context) {
+			ret = engine->init_context(req);
 			if (ret)
 				DRM_ERROR("ring init context: %d\n", ret);
 		}
@@ -800,7 +808,7 @@  done:
 	return 0;
 
 unpin_out:
-	if (ring->id == RCS)
+	if (engine->id == RCS)
 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
 	return ret;
 }
@@ -820,18 +828,18 @@  unpin_out:
  */
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
 	WARN_ON(i915.enable_execlists);
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (req->ctx != ring->last_context) {
+		if (req->ctx != engine->last_context) {
 			i915_gem_context_reference(req->ctx);
-			if (ring->last_context)
-				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = req->ctx;
+			if (engine->last_context)
+				i915_gem_context_unreference(engine->last_context);
+			engine->last_context = req->ctx;
 		}
 		return 0;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 17299d04189f..202a7e6ae295 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -36,29 +36,30 @@  i915_verify_lists(struct drm_device *dev)
 	static int warned;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int err = 0;
 	int i;
 
 	if (warned)
 		return 0;
 
-	for_each_ring(ring, dev_priv, i) {
-		list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+	for_each_ring(engine, dev_priv, i) {
+		list_for_each_entry(obj, &engine->active_list,
+				    ring_list[engine->id]) {
 			if (obj->base.dev != dev ||
 			    !atomic_read(&obj->base.refcount.refcount)) {
 				DRM_ERROR("%s: freed active obj %p\n",
-					  ring->name, obj);
+					  engine->name, obj);
 				err++;
 				break;
 			} else if (!obj->active ||
-				   obj->last_read_req[ring->id] == NULL) {
+				   obj->last_read_req[engine->id] == NULL) {
 				DRM_ERROR("%s: invalid active obj %p\n",
-					  ring->name, obj);
+					  engine->name, obj);
 				err++;
 			} else if (obj->base.write_domain) {
 				DRM_ERROR("%s: invalid write obj %p (w %x)\n",
-					  ring->name,
+					  engine->name,
 					  obj, obj->base.write_domain);
 				err++;
 			}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1328bc5021b4..b73496ea5583 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1095,7 +1095,7 @@  void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+	struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1122,7 +1122,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(ring->dev);
+				struct drm_i915_private *dev_priv = to_i915(engine->dev);
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
@@ -1146,11 +1146,11 @@  static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
 			    struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+	if (!IS_GEN7(dev) || engine != &dev_priv->ring[RCS]) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -1160,12 +1160,12 @@  i915_reset_gen7_sol_offsets(struct drm_device *dev,
 		return ret;
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(engine, 0);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1229,7 +1229,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct list_head *vmas)
 {
 	struct drm_device *dev = params->dev;
-	struct intel_engine_cs *ring = params->ring;
+	struct intel_engine_cs *engine = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u64 exec_start, exec_len;
 	int instp_mode;
@@ -1244,8 +1244,8 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
-	     "%s didn't clear reload\n", ring->name);
+	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
+	     "%s didn't clear reload\n", engine->name);
 
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	instp_mask = I915_EXEC_CONSTANTS_MASK;
@@ -1253,7 +1253,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -1280,17 +1280,17 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		return -EINVAL;
 	}
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine == &dev_priv->ring[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, INSTPM);
+		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
+		intel_ring_advance(engine);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1308,7 +1308,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (exec_len == 0)
 		exec_len = params->batch_obj->base.size;
 
-	ret = ring->dispatch_execbuffer(params->request,
+	ret = engine->dispatch_execbuffer(params->request,
 					exec_start, exec_len,
 					params->dispatch_flags);
 	if (ret)
@@ -1432,7 +1432,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
 	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
@@ -1459,7 +1459,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (args->flags & I915_EXEC_IS_PINNED)
 		dispatch_flags |= I915_DISPATCH_PINNED;
 
-	ret = eb_select_ring(dev_priv, file, args, &ring);
+	ret = eb_select_ring(dev_priv, file, args, &engine);
 	if (ret)
 		return ret;
 
@@ -1473,9 +1473,9 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
 			return -EINVAL;
 		}
-		if (ring->id != RCS) {
+		if (engine->id != RCS) {
 			DRM_DEBUG("RS is not available on %s\n",
-				 ring->name);
+				 engine->name);
 			return -EINVAL;
 		}
 
@@ -1488,7 +1488,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret)
 		goto pre_mutex_err;
 
-	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
 	if (IS_ERR(ctx)) {
 		mutex_unlock(&dev->struct_mutex);
 		ret = PTR_ERR(ctx);
@@ -1522,7 +1522,8 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+					  &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1531,7 +1532,8 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		ret = i915_gem_execbuffer_relocate(eb);
 	if (ret) {
 		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
+								engine,
 								eb, exec, ctx);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
@@ -1547,16 +1549,16 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	params->args_batch_start_offset = args->batch_start_offset;
-	if (i915_needs_cmd_parser(ring) && args->batch_len) {
+	if (i915_needs_cmd_parser(engine) && args->batch_len) {
 		struct drm_i915_gem_object *parsed_batch_obj;
 
-		parsed_batch_obj = i915_gem_execbuffer_parse(ring,
-						      &shadow_exec_entry,
-						      eb,
-						      batch_obj,
-						      args->batch_start_offset,
-						      args->batch_len,
-						      file->is_master);
+		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
+							     &shadow_exec_entry,
+							     eb,
+							     batch_obj,
+							     args->batch_start_offset,
+							     args->batch_len,
+							     file->is_master);
 		if (IS_ERR(parsed_batch_obj)) {
 			ret = PTR_ERR(parsed_batch_obj);
 			goto err;
@@ -1608,7 +1610,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
 	/* Allocate a request for this batch buffer nice and early. */
-	req = i915_gem_request_alloc(ring, ctx);
+	req = i915_gem_request_alloc(engine, ctx);
 	if (IS_ERR(req)) {
 		ret = PTR_ERR(req);
 		goto err_batch_unpin;
@@ -1626,7 +1628,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 */
 	params->dev                     = dev;
 	params->file                    = file;
-	params->ring                    = ring;
+	params->ring                    = engine;
 	params->dispatch_flags          = dispatch_flags;
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7b8de85c5f76..1bc77791bc96 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -658,7 +658,7 @@  static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -667,13 +667,13 @@  static int gen8_write_pdp(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
+	intel_ring_emit(engine, upper_32_bits(addr));
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
+	intel_ring_emit(engine, lower_32_bits(addr));
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1650,11 +1650,11 @@  static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1662,13 +1662,13 @@  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(engine, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(engine, get_pd_offset(ppgtt));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1676,22 +1676,22 @@  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
 	return 0;
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1699,17 +1699,17 @@  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(engine, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(engine, get_pd_offset(ppgtt));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
-	if (ring->id != RCS) {
-		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (engine->id != RCS) {
+		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -1720,15 +1720,15 @@  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
 
-	POSTING_READ(RING_PP_DIR_DCLV(ring));
+	POSTING_READ(RING_PP_DIR_DCLV(engine));
 
 	return 0;
 }
@@ -1736,12 +1736,12 @@  static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int j;
 
-	for_each_ring(ring, dev_priv, j) {
+	for_each_ring(engine, dev_priv, j) {
 		u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
-		I915_WRITE(RING_MODE_GEN7(ring),
+		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
 	}
 }
@@ -1749,7 +1749,7 @@  static void gen8_ppgtt_enable(struct drm_device *dev)
 static void gen7_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t ecochk, ecobits;
 	int i;
 
@@ -1765,9 +1765,9 @@  static void gen7_ppgtt_enable(struct drm_device *dev)
 	}
 	I915_WRITE(GAM_ECOCHK, ecochk);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		/* GFX_MODE is per-ring on gen7+ */
-		I915_WRITE(RING_MODE_GEN7(ring),
+		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	}
 }
@@ -2286,15 +2286,15 @@  static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 void i915_check_and_clear_faults(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	if (INTEL_INFO(dev)->gen < 6)
 		return;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		u32 fault_reg;
-		fault_reg = I915_READ(RING_FAULT_REG(ring));
+		fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (fault_reg & RING_FAULT_VALID) {
 			DRM_DEBUG_DRIVER("Unexpected fault\n"
 					 "\tAddr: 0x%08lx\n"
@@ -2305,7 +2305,7 @@  void i915_check_and_clear_faults(struct drm_device *dev)
 					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
 					 RING_FAULT_SRCID(fault_reg),
 					 RING_FAULT_FAULT_TYPE(fault_reg));
-			I915_WRITE(RING_FAULT_REG(ring),
+			I915_WRITE(RING_FAULT_REG(engine),
 				   fault_reg & ~RING_FAULT_VALID);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 13b5f3aed01c..d97cadcfccb1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -495,9 +495,9 @@  int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 		if (obj) {
 			u64 wa_ctx_offset = obj->gtt_offset;
 			u32 *wa_ctx_page = &obj->pages[0][0];
-			struct intel_engine_cs *ring = &dev_priv->ring[RCS];
-			u32 wa_ctx_size = (ring->wa_ctx.indirect_ctx.size +
-					   ring->wa_ctx.per_ctx.size);
+			struct intel_engine_cs *engine = &dev_priv->ring[RCS];
+			u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
+					   engine->wa_ctx.per_ctx.size);
 
 			err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
 				   dev_priv->ring[i].name, wa_ctx_offset);
@@ -1019,19 +1019,19 @@  static void i915_gem_record_rings(struct drm_device *dev,
 	int i, count;
 
 	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+		struct intel_engine_cs *engine = &dev_priv->ring[i];
 		struct intel_ringbuffer *rbuf;
 
 		error->ring[i].pid = -1;
 
-		if (ring->dev == NULL)
+		if (engine->dev == NULL)
 			continue;
 
 		error->ring[i].valid = true;
 
-		i915_record_ring_state(dev, error, ring, &error->ring[i]);
+		i915_record_ring_state(dev, error, engine, &error->ring[i]);
 
-		request = i915_gem_find_active_request(ring);
+		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
 
@@ -1051,7 +1051,7 @@  static void i915_gem_record_rings(struct drm_device *dev,
 			if (HAS_BROKEN_CS_TLB(dev_priv->dev))
 				error->ring[i].wa_batchbuffer =
 					i915_error_ggtt_object_create(dev_priv,
-							     ring->scratch.obj);
+							     engine->scratch.obj);
 
 			if (request->pid) {
 				struct task_struct *task;
@@ -1073,11 +1073,11 @@  static void i915_gem_record_rings(struct drm_device *dev,
 			 * executed).
 			 */
 			if (request)
-				rbuf = request->ctx->engine[ring->id].ringbuf;
+				rbuf = request->ctx->engine[engine->id].ringbuf;
 			else
-				rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
+				rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
 		} else
-			rbuf = ring->buffer;
+			rbuf = engine->buffer;
 
 		error->ring[i].cpu_ring_head = rbuf->head;
 		error->ring[i].cpu_ring_tail = rbuf->tail;
@@ -1086,18 +1086,19 @@  static void i915_gem_record_rings(struct drm_device *dev,
 			i915_error_ggtt_object_create(dev_priv, rbuf->obj);
 
 		error->ring[i].hws_page =
-			i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+			i915_error_ggtt_object_create(dev_priv,
+						      engine->status_page.obj);
 
-		if (ring->wa_ctx.obj) {
+		if (engine->wa_ctx.obj) {
 			error->ring[i].wa_ctx =
 				i915_error_ggtt_object_create(dev_priv,
-							      ring->wa_ctx.obj);
+							      engine->wa_ctx.obj);
 		}
 
-		i915_gem_record_active_context(ring, error, &error->ring[i]);
+		i915_gem_record_active_context(engine, error, &error->ring[i]);
 
 		count = 0;
-		list_for_each_entry(request, &ring->request_list, list)
+		list_for_each_entry(request, &engine->request_list, list)
 			count++;
 
 		error->ring[i].num_requests = count;
@@ -1110,7 +1111,7 @@  static void i915_gem_record_rings(struct drm_device *dev,
 		}
 
 		count = 0;
-		list_for_each_entry(request, &ring->request_list, list) {
+		list_for_each_entry(request, &engine->request_list, list) {
 			struct drm_i915_error_request *erq;
 
 			if (count >= error->ring[i].num_requests) {
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d7543efc8a5e..15a4beb387d4 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -377,7 +377,7 @@  static void guc_init_ctx_desc(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx = client->owner;
 	struct guc_context_desc desc;
 	struct sg_table *sg;
@@ -390,8 +390,8 @@  static void guc_init_ctx_desc(struct intel_guc *guc,
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
-	for_each_ring(ring, dev_priv, i) {
-		struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
+	for_each_ring(engine, dev_priv, i) {
+		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
 		struct drm_i915_gem_object *obj;
 		uint64_t ctx_desc;
 
@@ -406,14 +406,14 @@  static void guc_init_ctx_desc(struct intel_guc *guc,
 		if (!obj)
 			break;	/* XXX: continue? */
 
-		ctx_desc = intel_lr_context_descriptor(ctx, ring);
+		ctx_desc = intel_lr_context_descriptor(ctx, engine);
 		lrc->context_desc = (u32)ctx_desc;
 
 		/* The state page is after PPHWSP */
 		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
 				LRC_STATE_PN * PAGE_SIZE;
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-				(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
+				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
 		obj = ctx->engine[i].ringbuf->obj;
 
@@ -422,7 +422,7 @@  static void guc_init_ctx_desc(struct intel_guc *guc,
 		lrc->ring_next_free_location = lrc->ring_begin;
 		lrc->ring_current_tail_pointer_value = 0;
 
-		desc.engines_used |= (1 << ring->guc_id);
+		desc.engines_used |= (1 << engine->guc_id);
 	}
 
 	WARN_ON(desc.engines_used == 0);
@@ -839,7 +839,7 @@  static void guc_create_ads(struct intel_guc *guc)
 	struct guc_ads *ads;
 	struct guc_policies *policies;
 	struct guc_mmio_reg_state *reg_state;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct page *page;
 	u32 size, i;
 
@@ -867,11 +867,11 @@  static void guc_create_ads(struct intel_guc *guc)
 	 * so its address won't change after we've told the GuC where
 	 * to find it.
 	 */
-	ring = &dev_priv->ring[RCS];
-	ads->golden_context_lrca = ring->status_page.gfx_addr;
+	engine = &dev_priv->ring[RCS];
+	ads->golden_context_lrca = engine->status_page.gfx_addr;
 
-	for_each_ring(ring, dev_priv, i)
-		ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+	for_each_ring(engine, dev_priv, i)
+		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
 
 	/* GuC scheduling policies */
 	policies = (void *)ads + sizeof(struct guc_ads);
@@ -883,12 +883,12 @@  static void guc_create_ads(struct intel_guc *guc)
 	/* MMIO reg state */
 	reg_state = (void *)policies + sizeof(struct guc_policies);
 
-	for_each_ring(ring, dev_priv, i) {
-		reg_state->mmio_white_list[ring->guc_id].mmio_start =
-			ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+	for_each_ring(engine, dev_priv, i) {
+		reg_state->mmio_white_list[engine->guc_id].mmio_start =
+			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
 
 		/* Nothing to be saved or restored for now. */
-		reg_state->mmio_white_list[ring->guc_id].count = 0;
+		reg_state->mmio_white_list[engine->guc_id].count = 0;
 	}
 
 	ads->reg_state_addr = ads->scheduler_policies +
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 53e5104964b3..f172de0a61bf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1079,11 +1079,11 @@  static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 
 static bool any_waiters(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		if (ring->irq_refcount)
+	for_each_ring(engine, dev_priv, i)
+		if (engine->irq_refcount)
 			return true;
 
 	return false;
@@ -2449,7 +2449,7 @@  static irqreturn_t gen8_irq_handler(int irq, void *arg)
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 			       bool reset_completed)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	/*
@@ -2460,8 +2460,8 @@  static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 	 */
 
 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-	for_each_ring(ring, dev_priv, i)
-		wake_up_all(&ring->irq_queue);
+	for_each_ring(engine, dev_priv, i)
+		wake_up_all(&engine->irq_queue);
 
 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
 	wake_up_all(&dev_priv->pending_flip_queue);
@@ -2956,11 +2956,11 @@  static int semaphore_passed(struct intel_engine_cs *ring)
 
 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		ring->hangcheck.deadlock = 0;
+	for_each_ring(engine, dev_priv, i)
+		engine->hangcheck.deadlock = 0;
 }
 
 static bool subunits_stuck(struct intel_engine_cs *ring)
@@ -3071,7 +3071,7 @@  static void i915_hangcheck_elapsed(struct work_struct *work)
 		container_of(work, typeof(*dev_priv),
 			     gpu_error.hangcheck_work.work);
 	struct drm_device *dev = dev_priv->dev;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 	int busy_count = 0, rings_hung = 0;
 	bool stuck[I915_NUM_RINGS] = { 0 };
@@ -3096,33 +3096,33 @@  static void i915_hangcheck_elapsed(struct work_struct *work)
 	 */
 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		u64 acthd;
 		u32 seqno;
 		bool busy = true;
 
 		semaphore_clear_deadlocks(dev_priv);
 
-		seqno = ring->get_seqno(ring, false);
-		acthd = intel_ring_get_active_head(ring);
+		seqno = engine->get_seqno(engine, false);
+		acthd = intel_ring_get_active_head(engine);
 
-		if (ring->hangcheck.seqno == seqno) {
-			if (ring_idle(ring, seqno)) {
-				ring->hangcheck.action = HANGCHECK_IDLE;
+		if (engine->hangcheck.seqno == seqno) {
+			if (ring_idle(engine, seqno)) {
+				engine->hangcheck.action = HANGCHECK_IDLE;
 
-				if (waitqueue_active(&ring->irq_queue)) {
+				if (waitqueue_active(&engine->irq_queue)) {
 					/* Issue a wake-up to catch stuck h/w. */
-					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
-						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+					if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
+						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(engine)))
 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-								  ring->name);
+								  engine->name);
 						else
 							DRM_INFO("Fake missed irq on %s\n",
-								 ring->name);
-						wake_up_all(&ring->irq_queue);
+								 engine->name);
+						wake_up_all(&engine->irq_queue);
 					}
 					/* Safeguard against driver failure */
-					ring->hangcheck.score += BUSY;
+					engine->hangcheck.score += BUSY;
 				} else
 					busy = false;
 			} else {
@@ -3141,53 +3141,53 @@  static void i915_hangcheck_elapsed(struct work_struct *work)
 				 * being repeatedly kicked and so responsible
 				 * for stalling the machine.
 				 */
-				ring->hangcheck.action = ring_stuck(ring,
-								    acthd);
+				engine->hangcheck.action = ring_stuck(engine,
+								      acthd);
 
-				switch (ring->hangcheck.action) {
+				switch (engine->hangcheck.action) {
 				case HANGCHECK_IDLE:
 				case HANGCHECK_WAIT:
 					break;
 				case HANGCHECK_ACTIVE:
-					ring->hangcheck.score += BUSY;
+					engine->hangcheck.score += BUSY;
 					break;
 				case HANGCHECK_KICK:
-					ring->hangcheck.score += KICK;
+					engine->hangcheck.score += KICK;
 					break;
 				case HANGCHECK_HUNG:
-					ring->hangcheck.score += HUNG;
+					engine->hangcheck.score += HUNG;
 					stuck[i] = true;
 					break;
 				}
 			}
 		} else {
-			ring->hangcheck.action = HANGCHECK_ACTIVE;
+			engine->hangcheck.action = HANGCHECK_ACTIVE;
 
 			/* Gradually reduce the count so that we catch DoS
 			 * attempts across multiple batches.
 			 */
-			if (ring->hangcheck.score > 0)
-				ring->hangcheck.score -= ACTIVE_DECAY;
-			if (ring->hangcheck.score < 0)
-				ring->hangcheck.score = 0;
+			if (engine->hangcheck.score > 0)
+				engine->hangcheck.score -= ACTIVE_DECAY;
+			if (engine->hangcheck.score < 0)
+				engine->hangcheck.score = 0;
 
 			/* Clear head and subunit states on seqno movement */
-			ring->hangcheck.acthd = 0;
+			engine->hangcheck.acthd = 0;
 
-			memset(ring->hangcheck.instdone, 0,
-			       sizeof(ring->hangcheck.instdone));
+			memset(engine->hangcheck.instdone, 0,
+			       sizeof(engine->hangcheck.instdone));
 		}
 
-		ring->hangcheck.seqno = seqno;
-		ring->hangcheck.acthd = acthd;
+		engine->hangcheck.seqno = seqno;
+		engine->hangcheck.acthd = acthd;
 		busy_count += busy;
 	}
 
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+	for_each_ring(engine, dev_priv, i) {
+		if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
 			DRM_INFO("%s on %s\n",
 				 stuck[i] ? "stuck" : "no progress",
-				 ring->name);
+				 engine->name);
 			rings_hung++;
 		}
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ce55f0b683c6..d7f8674bc87a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10976,7 +10976,7 @@  static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -10992,13 +10992,13 @@  static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11011,7 +11011,7 @@  static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11024,13 +11024,13 @@  static int intel_gen3_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11043,7 +11043,7 @@  static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11057,10 +11057,10 @@  static int intel_gen4_queue_flip(struct drm_device *dev,
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
 			obj->tiling_mode);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11069,7 +11069,7 @@  static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_emit(engine, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11082,7 +11082,7 @@  static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11092,10 +11092,10 @@  static int intel_gen6_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11105,7 +11105,7 @@  static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_emit(engine, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11118,7 +11118,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -11139,7 +11139,7 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 	}
 
 	len = 4;
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11177,30 +11177,30 @@  static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (ring->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					DERRMR_PIPEB_PRI_FLIP_DONE |
-					DERRMR_PIPEC_PRI_FLIP_DONE));
+	if (engine->id == RCS) {
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, DERRMR);
+		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+					  DERRMR_PIPEB_PRI_FLIP_DONE |
+					  DERRMR_PIPEC_PRI_FLIP_DONE));
 		if (IS_GEN8(dev))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
+			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
+			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
 					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+		intel_ring_emit_reg(engine, DERRMR);
+		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
 		if (IS_GEN8(dev)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_emit(engine, 0);
+			intel_ring_emit(engine, MI_NOOP);
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11480,7 +11480,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct drm_plane *primary = crtc->primary;
 	enum pipe pipe = intel_crtc->pipe;
 	struct intel_unpin_work *work;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool mmio_flip;
 	struct drm_i915_gem_request *request = NULL;
 	int ret;
@@ -11567,21 +11567,21 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		ring = &dev_priv->ring[BCS];
+		engine = &dev_priv->ring[BCS];
 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
 			/* vlv: DISPLAY_FLIP fails to change tiling */
-			ring = NULL;
+			engine = NULL;
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-		ring = &dev_priv->ring[BCS];
+		engine = &dev_priv->ring[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		ring = i915_gem_request_get_ring(obj->last_write_req);
-		if (ring == NULL || ring->id != RCS)
-			ring = &dev_priv->ring[BCS];
+		engine = i915_gem_request_get_ring(obj->last_write_req);
+		if (engine == NULL || engine->id != RCS)
+			engine = &dev_priv->ring[BCS];
 	} else {
-		ring = &dev_priv->ring[RCS];
+		engine = &dev_priv->ring[RCS];
 	}
 
-	mmio_flip = use_mmio_flip(ring, obj);
+	mmio_flip = use_mmio_flip(engine, obj);
 
 	/* When using CS flips, we want to emit semaphores between rings.
 	 * However, when using mmio flips we will create a task to do the
@@ -11589,7 +11589,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	 * into the display plane and skip any waits.
 	 */
 	if (!mmio_flip) {
-		ret = i915_gem_object_sync(obj, ring, &request);
+		ret = i915_gem_object_sync(obj, engine, &request);
 		if (ret)
 			goto cleanup_pending;
 	}
@@ -11611,7 +11611,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 					obj->last_write_req);
 	} else {
 		if (!request) {
-			request = i915_gem_request_alloc(ring, NULL);
+			request = i915_gem_request_alloc(engine, NULL);
 			if (IS_ERR(request)) {
 				ret = PTR_ERR(request);
 				goto cleanup_unpin;
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 82a3c03fbc0e..fc2c5188b095 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -81,14 +81,14 @@  const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 
 static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i, irqs;
 
 	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
 	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
 	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MODE_GEN7(ring), irqs);
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
 	/* route all GT interrupts to the host */
 	I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -98,14 +98,14 @@  static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 
 static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i, irqs;
 
 	/* tell all command streamers to forward interrupts and vblank to GuC */
 	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
 	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MODE_GEN7(ring), irqs);
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
 	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
 	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6fcbf6bb0479..448c68e69194 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -360,8 +360,8 @@  static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 				 struct drm_i915_gem_request *rq1)
 {
 
-	struct intel_engine_cs *ring = rq0->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = rq0->ring;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint64_t desc[2];
 
@@ -376,15 +376,15 @@  static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 	rq0->elsp_submitted++;
 
 	/* You must always write both descriptors in the order below. */
-	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
-	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
 
-	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
 	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
 
 	/* ELSP is a wo register, use another nearby reg for posting */
-	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
+	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
 }
 
 static void
@@ -398,9 +398,9 @@  execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static void execlists_update_context(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *ring = rq->ring;
+	struct intel_engine_cs *engine = rq->ring;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
+	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 
@@ -609,25 +609,25 @@  void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 
 static void execlists_context_queue(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = request->ring;
+	struct intel_engine_cs *engine = request->ring;
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
 	if (request->ctx != request->i915->kernel_context)
-		intel_lr_context_pin(request->ctx, ring);
+		intel_lr_context_pin(request->ctx, engine);
 
 	i915_gem_request_reference(request);
 
-	spin_lock_irq(&ring->execlist_lock);
+	spin_lock_irq(&engine->execlist_lock);
 
-	list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
 		if (++num_elements > 2)
 			break;
 
 	if (num_elements > 2) {
 		struct drm_i915_gem_request *tail_req;
 
-		tail_req = list_last_entry(&ring->execlist_queue,
+		tail_req = list_last_entry(&engine->execlist_queue,
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
@@ -635,32 +635,32 @@  static void execlists_context_queue(struct drm_i915_gem_request *request)
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
 			list_move_tail(&tail_req->execlist_link,
-				       &ring->execlist_retired_req_list);
+				       &engine->execlist_retired_req_list);
 		}
 	}
 
-	list_add_tail(&request->execlist_link, &ring->execlist_queue);
+	list_add_tail(&request->execlist_link, &engine->execlist_queue);
 	if (num_elements == 0)
-		execlists_context_unqueue(ring);
+		execlists_context_unqueue(engine);
 
-	spin_unlock_irq(&ring->execlist_lock);
+	spin_unlock_irq(&engine->execlist_lock);
 }
 
 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
@@ -726,7 +726,7 @@  static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 				       int bytes)
 {
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct drm_i915_gem_request *target;
 	unsigned space;
 	int ret;
@@ -737,7 +737,7 @@  static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 	/* The whole point of reserving space is to not wait! */
 	WARN_ON(ringbuf->reserved_in_use);
 
-	list_for_each_entry(target, &ring->request_list, list) {
+	list_for_each_entry(target, &engine->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
 		 * from multiple ringbuffers. Here, we must ignore any that
@@ -753,7 +753,7 @@  static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 			break;
 	}
 
-	if (WARN_ON(&target->list == &ring->request_list))
+	if (WARN_ON(&target->list == &engine->request_list))
 		return -ENOSPC;
 
 	ret = i915_wait_request(target);
@@ -947,9 +947,9 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct list_head *vmas)
 {
 	struct drm_device       *dev = params->dev;
-	struct intel_engine_cs  *ring = params->ring;
+	struct intel_engine_cs *engine = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -961,7 +961,7 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -990,7 +990,7 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine == &dev_priv->ring[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_logical_ring_begin(params->request, 4);
 		if (ret)
@@ -1008,7 +1008,7 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     args->batch_start_offset;
 
-	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
 		return ret;
 
@@ -1071,17 +1071,17 @@  void intel_logical_ring_stop(struct intel_engine_cs *ring)
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
@@ -1172,16 +1172,16 @@  void intel_lr_context_unpin(struct intel_context *ctx,
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1199,7 +1199,7 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_logical_ring_advance(ringbuf);
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1643,7 +1643,7 @@  static int gen9_init_render_ring(struct intel_engine_cs *ring)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
@@ -1656,9 +1656,11 @@  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_logical_ring_emit_reg(ringbuf,
+					    GEN8_RING_PDP_UDW(engine, i));
 		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_logical_ring_emit_reg(ringbuf,
+					    GEN8_RING_PDP_LDW(engine, i));
 		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
 	}
 
@@ -1748,8 +1750,8 @@  static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 unused)
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = ringbuf->ring;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t cmd;
 	int ret;
@@ -1769,7 +1771,7 @@  static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (ring == &dev_priv->ring[VCS])
+		if (engine == &dev_priv->ring[VCS])
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -1789,8 +1791,8 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 flush_domains)
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = ringbuf->ring;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false;
 	u32 flags = 0;
 	int ret;
@@ -1818,7 +1820,7 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
 		 * pipe control.
 		 */
-		if (IS_GEN9(ring->dev))
+		if (IS_GEN9(engine->dev))
 			vf_flush_wa = true;
 	}
 
@@ -2109,38 +2111,38 @@  error:
 static int logical_render_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[RCS];
 	int ret;
 
-	ring->name = "render ring";
-	ring->id = RCS;
-	ring->exec_id = I915_EXEC_RENDER;
-	ring->guc_id = GUC_RENDER_ENGINE;
-	ring->mmio_base = RENDER_RING_BASE;
+	engine->name = "render ring";
+	engine->id = RCS;
+	engine->exec_id = I915_EXEC_RENDER;
+	engine->guc_id = GUC_RENDER_ENGINE;
+	engine->mmio_base = RENDER_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
+	logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
 	if (HAS_L3_DPF(dev))
-		ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+		engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_vfuncs(dev, engine);
 
 	/* Override some for render ring. */
 	if (INTEL_INFO(dev)->gen >= 9)
-		ring->init_hw = gen9_init_render_ring;
+		engine->init_hw = gen9_init_render_ring;
 	else
-		ring->init_hw = gen8_init_render_ring;
-	ring->init_context = gen8_init_rcs_context;
-	ring->cleanup = intel_fini_pipe_control;
-	ring->emit_flush = gen8_emit_flush_render;
-	ring->emit_request = gen8_emit_request_render;
+		engine->init_hw = gen8_init_render_ring;
+	engine->init_context = gen8_init_rcs_context;
+	engine->cleanup = intel_fini_pipe_control;
+	engine->emit_flush = gen8_emit_flush_render;
+	engine->emit_request = gen8_emit_request_render;
 
-	ring->dev = dev;
+	engine->dev = dev;
 
-	ret = intel_init_pipe_control(ring);
+	ret = intel_init_pipe_control(engine);
 	if (ret)
 		return ret;
 
-	ret = intel_init_workaround_bb(ring);
+	ret = intel_init_workaround_bb(engine);
 	if (ret) {
 		/*
 		 * We continue even if we fail to initialize WA batch
@@ -2151,9 +2153,9 @@  static int logical_render_ring_init(struct drm_device *dev)
 			  ret);
 	}
 
-	ret = logical_ring_init(dev, ring);
+	ret = logical_ring_init(dev, engine);
 	if (ret) {
-		lrc_destroy_wa_ctx_obj(ring);
+		lrc_destroy_wa_ctx_obj(engine);
 	}
 
 	return ret;
@@ -2162,69 +2164,69 @@  static int logical_render_ring_init(struct drm_device *dev)
 static int logical_bsd_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[VCS];
 
-	ring->name = "bsd ring";
-	ring->id = VCS;
-	ring->exec_id = I915_EXEC_BSD;
-	ring->guc_id = GUC_VIDEO_ENGINE;
-	ring->mmio_base = GEN6_BSD_RING_BASE;
+	engine->name = "bsd ring";
+	engine->id = VCS;
+	engine->exec_id = I915_EXEC_BSD;
+	engine->guc_id = GUC_VIDEO_ENGINE;
+	engine->mmio_base = GEN6_BSD_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 static int logical_bsd2_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+	struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
 
-	ring->name = "bsd2 ring";
-	ring->id = VCS2;
-	ring->exec_id = I915_EXEC_BSD;
-	ring->guc_id = GUC_VIDEO_ENGINE2;
-	ring->mmio_base = GEN8_BSD2_RING_BASE;
+	engine->name = "bsd2 ring";
+	engine->id = VCS2;
+	engine->exec_id = I915_EXEC_BSD;
+	engine->guc_id = GUC_VIDEO_ENGINE2;
+	engine->mmio_base = GEN8_BSD2_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 static int logical_blt_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[BCS];
 
-	ring->name = "blitter ring";
-	ring->id = BCS;
-	ring->exec_id = I915_EXEC_BLT;
-	ring->guc_id = GUC_BLITTER_ENGINE;
-	ring->mmio_base = BLT_RING_BASE;
+	engine->name = "blitter ring";
+	engine->id = BCS;
+	engine->exec_id = I915_EXEC_BLT;
+	engine->guc_id = GUC_BLITTER_ENGINE;
+	engine->mmio_base = BLT_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 static int logical_vebox_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+	struct intel_engine_cs *engine = &dev_priv->ring[VECS];
 
-	ring->name = "video enhancement ring";
-	ring->id = VECS;
-	ring->exec_id = I915_EXEC_VEBOX;
-	ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
-	ring->mmio_base = VEBOX_RING_BASE;
+	engine->name = "video enhancement ring";
+	engine->id = VECS;
+	engine->exec_id = I915_EXEC_VEBOX;
+	engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
+	engine->mmio_base = VEBOX_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 /**
@@ -2639,14 +2641,14 @@  void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_ring(engine, dev_priv, i) {
 		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
+				ctx->engine[engine->id].state;
 		struct intel_ringbuffer *ringbuf =
-				ctx->engine[ring->id].ringbuf;
+				ctx->engine[engine->id].ringbuf;
 		uint32_t *reg_state;
 		struct page *page;
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index fed7bea19cc9..d55925987ebf 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -324,11 +324,11 @@  int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
 
 	if (get_mocs_settings(req->ring->dev, &t)) {
 		struct drm_i915_private *dev_priv = req->i915;
-		struct intel_engine_cs *ring;
+		struct intel_engine_cs *engine;
 		enum intel_ring_id ring_id;
 
 		/* Program the control registers */
-		for_each_ring(ring, dev_priv, ring_id) {
+		for_each_ring(engine, dev_priv, ring_id) {
 			ret = emit_mocs_control_table(req, &t, ring_id);
 			if (ret)
 				return ret;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9168413fe204..13b27632636e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -233,14 +233,14 @@  static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *req;
 	int ret;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	req = i915_gem_request_alloc(ring, NULL);
+	req = i915_gem_request_alloc(engine, NULL);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -252,11 +252,11 @@  static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -267,7 +267,7 @@  static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
@@ -283,7 +283,7 @@  static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	req = i915_gem_request_alloc(ring, NULL);
+	req = i915_gem_request_alloc(engine, NULL);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -293,9 +293,9 @@  static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(engine, flip_addr);
+	intel_ring_advance(engine);
 
 	WARN_ON(overlay->last_flip_req);
 	i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -336,7 +336,7 @@  static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
@@ -349,7 +349,7 @@  static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	req = i915_gem_request_alloc(ring, NULL);
+	req = i915_gem_request_alloc(engine, NULL);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -360,22 +360,23 @@  static int intel_overlay_off(struct intel_overlay *overlay)
 	}
 
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(engine, flip_addr);
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 	} else {
-		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(ring, flip_addr);
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(engine, flip_addr);
+		intel_ring_emit(engine,
+				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
@@ -408,7 +409,7 @@  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[RCS];
 	int ret;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -423,7 +424,7 @@  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_request_alloc(ring, NULL);
+		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
@@ -433,9 +434,10 @@  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine,
+				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_advance(engine);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d7aef17bf0f9..c54a7df7c2c9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4815,7 +4815,7 @@  static void gen9_enable_rps(struct drm_device *dev)
 static void gen9_enable_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t rc6_mask = 0;
 	int unused;
 
@@ -4838,8 +4838,8 @@  static void gen9_enable_rc6(struct drm_device *dev)
 		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_ring(ring, dev_priv, unused)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_ring(engine, dev_priv, unused)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	if (HAS_GUC_UCODE(dev))
 		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
@@ -4885,7 +4885,7 @@  static void gen9_enable_rc6(struct drm_device *dev)
 static void gen8_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t rc6_mask = 0;
 	int unused;
 
@@ -4906,8 +4906,8 @@  static void gen8_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_ring(ring, dev_priv, unused)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_ring(engine, dev_priv, unused)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	if (IS_BROADWELL(dev))
 		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@@ -4967,7 +4967,7 @@  static void gen8_enable_rps(struct drm_device *dev)
 static void gen6_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
 	u32 gtfifodbg;
 	int rc6_mode;
@@ -5003,8 +5003,8 @@  static void gen6_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -5495,7 +5495,7 @@  static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
 static void cherryview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
 	int i;
 
@@ -5522,8 +5522,8 @@  static void cherryview_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 
 	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
@@ -5593,7 +5593,7 @@  static void cherryview_enable_rps(struct drm_device *dev)
 static void valleyview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 gtfifodbg, val, rc6_mode = 0;
 	int i;
 
@@ -5633,8 +5633,8 @@  static void valleyview_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
@@ -6010,7 +6010,7 @@  EXPORT_SYMBOL_GPL(i915_gpu_lower);
 bool i915_gpu_busy(void)
 {
 	struct drm_i915_private *dev_priv;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool ret = false;
 	int i;
 
@@ -6019,8 +6019,8 @@  bool i915_gpu_busy(void)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	for_each_ring(ring, dev_priv, i)
-		ret |= !list_empty(&ring->request_list);
+	for_each_ring(engine, dev_priv, i)
+		ret |= !list_empty(&engine->request_list);
 
 out_unlock:
 	spin_unlock_irq(&mchdev_lock);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..688773aaa5e5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -79,7 +79,7 @@  gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -94,9 +94,9 @@  gen2_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -106,8 +106,8 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_device *dev = engine->dev;
 	u32 cmd;
 	int ret;
 
@@ -153,9 +153,9 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -200,34 +200,34 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = req->ring;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
 			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(engine, 0); /* low dword */
+	intel_ring_emit(engine, 0); /* high dword */
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -236,9 +236,9 @@  static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -276,11 +276,11 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(engine, flags);
+	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -288,19 +288,19 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
 			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -309,9 +309,9 @@  static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -360,11 +360,11 @@  gen7_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(engine, flags);
+	intel_ring_emit(engine, scratch_addr);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -373,20 +373,20 @@  static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(engine, flags);
+	intel_ring_emit(engine, scratch_addr);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -700,15 +700,15 @@  err:
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -717,16 +717,16 @@  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		intel_ring_emit_reg(engine, w->reg[i].addr);
+		intel_ring_emit(engine, w->reg[i].value);
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, MI_NOOP);
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1388,22 +1388,23 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
-	if (ring->semaphore.signal)
-		ret = ring->semaphore.signal(req, 4);
+	if (engine->semaphore.signal)
+		ret = engine->semaphore.signal(req, 4);
 	else
 		ret = intel_ring_begin(req, 4);
 
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, MI_USER_INTERRUPT);
+	__intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1502,8 +1503,8 @@  do {									\
 static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = req->ring;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1518,32 +1519,36 @@  pc_render_add_request(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+	intel_ring_emit(engine,
+			GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, 0);
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	intel_ring_emit(engine,
+			engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, 0);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+	intel_ring_emit(engine,
+			GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
-	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, 0);
-	__intel_ring_advance(ring);
+	intel_ring_emit(engine,
+			engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, 0);
+	__intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1696,34 +1701,35 @@  bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_FLUSH);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 	return 0;
 }
 
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, MI_USER_INTERRUPT);
+	__intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1857,20 +1863,20 @@  i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
+	intel_ring_emit(engine,
 			MI_BATCH_BUFFER_START |
 			MI_BATCH_GTT |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, offset);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1884,8 +1890,8 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 cs_offset = ring->scratch.gtt_offset;
+	struct intel_engine_cs *engine = req->ring;
+	u32 cs_offset = engine->scratch.gtt_offset;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -1893,13 +1899,13 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+	intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+	intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+	intel_ring_emit(engine, cs_offset);
+	intel_ring_emit(engine, 0xdeadbeef);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
@@ -1913,16 +1919,17 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+		intel_ring_emit(engine,
+				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+		intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+		intel_ring_emit(engine, cs_offset);
+		intel_ring_emit(engine, 4096);
+		intel_ring_emit(engine, offset);
+
+		intel_ring_emit(engine, MI_FLUSH);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_advance(engine);
 
 		/* ... and execute it. */
 		offset = cs_offset;
@@ -1932,10 +1939,10 @@  i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					  0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1945,17 +1952,17 @@  i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					  0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2480,32 +2487,32 @@  static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
 int intel_ring_begin(struct drm_i915_gem_request *req,
 		     int num_dwords)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv;
 	int ret;
 
 	WARN_ON(req == NULL);
-	ring = req->ring;
-	dev_priv = ring->dev->dev_private;
+	engine = req->ring;
+	dev_priv = engine->dev->dev_private;
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
 		return ret;
 
-	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+	ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
 	if (ret)
 		return ret;
 
-	ring->buffer->space -= num_dwords * sizeof(uint32_t);
+	engine->buffer->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_engine_cs *engine = req->ring;
+	int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2517,9 +2524,9 @@  int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 		return ret;
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2576,7 +2583,7 @@  static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2585,7 +2592,7 @@  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(ring->dev)->gen >= 8)
+	if (INTEL_INFO(engine->dev)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2604,16 +2611,17 @@  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 	if (invalidate & I915_GEM_GPU_DOMAINS)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(ring->dev)->gen >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	if (INTEL_INFO(engine->dev)->gen >= 8) {
+		intel_ring_emit(engine, 0); /* upper addr */
+		intel_ring_emit(engine, 0); /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, 0);
+		intel_ring_emit(engine, MI_NOOP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 	return 0;
 }
 
@@ -2622,8 +2630,8 @@  gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	bool ppgtt = USES_PPGTT(ring->dev) &&
+	struct intel_engine_cs *engine = req->ring;
+	bool ppgtt = USES_PPGTT(engine->dev) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2632,13 +2640,13 @@  gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+	intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, lower_32_bits(offset));
+	intel_ring_emit(engine, upper_32_bits(offset));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2648,22 +2656,22 @@  hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
+	intel_ring_emit(engine,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, offset);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2673,20 +2681,20 @@  gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
+	intel_ring_emit(engine,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, offset);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2696,8 +2704,8 @@  gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_device *dev = engine->dev;
 	uint32_t cmd;
 	int ret;
 
@@ -2724,16 +2732,17 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 	 */
 	if (invalidate & I915_GEM_DOMAIN_RENDER)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_INFO(dev)->gen >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		intel_ring_emit(engine, 0); /* upper addr */
+		intel_ring_emit(engine, 0); /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, 0);
+		intel_ring_emit(engine, MI_NOOP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2741,14 +2750,14 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[RCS];
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	ring->name = "render ring";
-	ring->id = RCS;
-	ring->exec_id = I915_EXEC_RENDER;
-	ring->mmio_base = RENDER_RING_BASE;
+	engine->name = "render ring";
+	engine->id = RCS;
+	engine->exec_id = I915_EXEC_RENDER;
+	engine->mmio_base = RENDER_RING_BASE;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
 		if (i915_semaphore_is_enabled(dev)) {
@@ -2768,34 +2777,34 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 			}
 		}
 
-		ring->init_context = intel_rcs_ctx_init;
-		ring->add_request = gen6_add_request;
-		ring->flush = gen8_render_ring_flush;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
-		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-		ring->get_seqno = gen6_ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+		engine->init_context = intel_rcs_ctx_init;
+		engine->add_request = gen6_add_request;
+		engine->flush = gen8_render_ring_flush;
+		engine->irq_get = gen8_ring_get_irq;
+		engine->irq_put = gen8_ring_put_irq;
+		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		engine->get_seqno = gen6_ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (i915_semaphore_is_enabled(dev)) {
 			WARN_ON(!dev_priv->semaphore_obj);
-			ring->semaphore.sync_to = gen8_ring_sync;
-			ring->semaphore.signal = gen8_rcs_signal;
-			GEN8_RING_SEMAPHORE_INIT;
+			engine->semaphore.sync_to = gen8_ring_sync;
+			engine->semaphore.signal = gen8_rcs_signal;
+			GEN8_RING_SEMAPHORE_INIT(engine);
 		}
 	} else if (INTEL_INFO(dev)->gen >= 6) {
-		ring->init_context = intel_rcs_ctx_init;
-		ring->add_request = gen6_add_request;
-		ring->flush = gen7_render_ring_flush;
+		engine->init_context = intel_rcs_ctx_init;
+		engine->add_request = gen6_add_request;
+		engine->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
-			ring->flush = gen6_render_ring_flush;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
-		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-		ring->get_seqno = gen6_ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+			engine->flush = gen6_render_ring_flush;
+		engine->irq_get = gen6_ring_get_irq;
+		engine->irq_put = gen6_ring_put_irq;
+		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		engine->get_seqno = gen6_ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen6_ring_sync;
-			ring->semaphore.signal = gen6_signal;
+			engine->semaphore.sync_to = gen6_ring_sync;
+			engine->semaphore.signal = gen6_signal;
 			/*
 			 * The current semaphore is only applied on pre-gen8
 			 * platform.  And there is no VCS2 ring on the pre-gen8
@@ -2803,59 +2812,59 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 			 * initialized as INVALID.  Gen8 will initialize the
 			 * sema between VCS2 and RCS later.
 			 */
-			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-			ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-			ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-			ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+			engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+			engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+			engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	} else if (IS_GEN5(dev)) {
-		ring->add_request = pc_render_add_request;
-		ring->flush = gen4_render_ring_flush;
-		ring->get_seqno = pc_render_get_seqno;
-		ring->set_seqno = pc_render_set_seqno;
-		ring->irq_get = gen5_ring_get_irq;
-		ring->irq_put = gen5_ring_put_irq;
-		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+		engine->add_request = pc_render_add_request;
+		engine->flush = gen4_render_ring_flush;
+		engine->get_seqno = pc_render_get_seqno;
+		engine->set_seqno = pc_render_set_seqno;
+		engine->irq_get = gen5_ring_get_irq;
+		engine->irq_put = gen5_ring_put_irq;
+		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
 					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
 	} else {
-		ring->add_request = i9xx_add_request;
+		engine->add_request = i9xx_add_request;
 		if (INTEL_INFO(dev)->gen < 4)
-			ring->flush = gen2_render_ring_flush;
+			engine->flush = gen2_render_ring_flush;
 		else
-			ring->flush = gen4_render_ring_flush;
-		ring->get_seqno = ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+			engine->flush = gen4_render_ring_flush;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (IS_GEN2(dev)) {
-			ring->irq_get = i8xx_ring_get_irq;
-			ring->irq_put = i8xx_ring_put_irq;
+			engine->irq_get = i8xx_ring_get_irq;
+			engine->irq_put = i8xx_ring_put_irq;
 		} else {
-			ring->irq_get = i9xx_ring_get_irq;
-			ring->irq_put = i9xx_ring_put_irq;
+			engine->irq_get = i9xx_ring_get_irq;
+			engine->irq_put = i9xx_ring_put_irq;
 		}
-		ring->irq_enable_mask = I915_USER_INTERRUPT;
+		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
-	ring->write_tail = ring_write_tail;
+	engine->write_tail = ring_write_tail;
 
 	if (IS_HASWELL(dev))
-		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
 	else if (IS_GEN8(dev))
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 6)
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 4)
-		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
 	else if (IS_I830(dev) || IS_845G(dev))
-		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
 	else
-		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-	ring->init_hw = init_render_ring;
-	ring->cleanup = render_ring_cleanup;
+		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+	engine->init_hw = init_render_ring;
+	engine->cleanup = render_ring_cleanup;
 
 	/* Workaround batchbuffer to combat CS tlb bug. */
 	if (HAS_BROKEN_CS_TLB(dev)) {
@@ -2872,16 +2881,16 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 			return ret;
 		}
 
-		ring->scratch.obj = obj;
-		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+		engine->scratch.obj = obj;
+		engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
 	}
 
-	ret = intel_init_ring_buffer(dev, ring);
+	ret = intel_init_ring_buffer(dev, engine);
 	if (ret)
 		return ret;
 
 	if (INTEL_INFO(dev)->gen >= 5) {
-		ret = intel_init_pipe_control(ring);
+		ret = intel_init_pipe_control(engine);
 		if (ret)
 			return ret;
 	}
@@ -2892,75 +2901,75 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+	struct intel_engine_cs *engine = &dev_priv->ring[VCS];
 
-	ring->name = "bsd ring";
-	ring->id = VCS;
-	ring->exec_id = I915_EXEC_BSD;
+	engine->name = "bsd ring";
+	engine->id = VCS;
+	engine->exec_id = I915_EXEC_BSD;
 
-	ring->write_tail = ring_write_tail;
+	engine->write_tail = ring_write_tail;
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ring->mmio_base = GEN6_BSD_RING_BASE;
+		engine->mmio_base = GEN6_BSD_RING_BASE;
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev))
-			ring->write_tail = gen6_bsd_ring_write_tail;
-		ring->flush = gen6_bsd_ring_flush;
-		ring->add_request = gen6_add_request;
-		ring->get_seqno = gen6_ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+			engine->write_tail = gen6_bsd_ring_write_tail;
+		engine->flush = gen6_bsd_ring_flush;
+		engine->add_request = gen6_add_request;
+		engine->get_seqno = gen6_ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (INTEL_INFO(dev)->gen >= 8) {
-			ring->irq_enable_mask =
+			engine->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-			ring->irq_get = gen8_ring_get_irq;
-			ring->irq_put = gen8_ring_put_irq;
-			ring->dispatch_execbuffer =
+			engine->irq_get = gen8_ring_get_irq;
+			engine->irq_put = gen8_ring_put_irq;
+			engine->dispatch_execbuffer =
 				gen8_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev)) {
-				ring->semaphore.sync_to = gen8_ring_sync;
-				ring->semaphore.signal = gen8_xcs_signal;
-				GEN8_RING_SEMAPHORE_INIT;
+				engine->semaphore.sync_to = gen8_ring_sync;
+				engine->semaphore.signal = gen8_xcs_signal;
+				GEN8_RING_SEMAPHORE_INIT(engine);
 			}
 		} else {
-			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-			ring->irq_get = gen6_ring_get_irq;
-			ring->irq_put = gen6_ring_put_irq;
-			ring->dispatch_execbuffer =
+			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			engine->irq_get = gen6_ring_get_irq;
+			engine->irq_put = gen6_ring_put_irq;
+			engine->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev)) {
-				ring->semaphore.sync_to = gen6_ring_sync;
-				ring->semaphore.signal = gen6_signal;
-				ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-				ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-				ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-				ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-				ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-				ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-				ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-				ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-				ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-				ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+				engine->semaphore.sync_to = gen6_ring_sync;
+				engine->semaphore.signal = gen6_signal;
+				engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+				engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+				engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+				engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+				engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+				engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+				engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+				engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+				engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+				engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 			}
 		}
 	} else {
-		ring->mmio_base = BSD_RING_BASE;
-		ring->flush = bsd_ring_flush;
-		ring->add_request = i9xx_add_request;
-		ring->get_seqno = ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+		engine->mmio_base = BSD_RING_BASE;
+		engine->flush = bsd_ring_flush;
+		engine->add_request = i9xx_add_request;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (IS_GEN5(dev)) {
-			ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-			ring->irq_get = gen5_ring_get_irq;
-			ring->irq_put = gen5_ring_put_irq;
+			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+			engine->irq_get = gen5_ring_get_irq;
+			engine->irq_put = gen5_ring_put_irq;
 		} else {
-			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-			ring->irq_get = i9xx_ring_get_irq;
-			ring->irq_put = i9xx_ring_put_irq;
+			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+			engine->irq_get = i9xx_ring_get_irq;
+			engine->irq_put = i9xx_ring_put_irq;
 		}
-		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 /**
@@ -2969,68 +2978,68 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
-
-	ring->name = "bsd2 ring";
-	ring->id = VCS2;
-	ring->exec_id = I915_EXEC_BSD;
-
-	ring->write_tail = ring_write_tail;
-	ring->mmio_base = GEN8_BSD2_RING_BASE;
-	ring->flush = gen6_bsd_ring_flush;
-	ring->add_request = gen6_add_request;
-	ring->get_seqno = gen6_ring_get_seqno;
-	ring->set_seqno = ring_set_seqno;
-	ring->irq_enable_mask =
+	struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
+
+	engine->name = "bsd2 ring";
+	engine->id = VCS2;
+	engine->exec_id = I915_EXEC_BSD;
+
+	engine->write_tail = ring_write_tail;
+	engine->mmio_base = GEN8_BSD2_RING_BASE;
+	engine->flush = gen6_bsd_ring_flush;
+	engine->add_request = gen6_add_request;
+	engine->get_seqno = gen6_ring_get_seqno;
+	engine->set_seqno = ring_set_seqno;
+	engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-	ring->irq_get = gen8_ring_get_irq;
-	ring->irq_put = gen8_ring_put_irq;
-	ring->dispatch_execbuffer =
+	engine->irq_get = gen8_ring_get_irq;
+	engine->irq_put = gen8_ring_put_irq;
+	engine->dispatch_execbuffer =
 			gen8_ring_dispatch_execbuffer;
 	if (i915_semaphore_is_enabled(dev)) {
-		ring->semaphore.sync_to = gen8_ring_sync;
-		ring->semaphore.signal = gen8_xcs_signal;
-		GEN8_RING_SEMAPHORE_INIT;
+		engine->semaphore.sync_to = gen8_ring_sync;
+		engine->semaphore.signal = gen8_xcs_signal;
+		GEN8_RING_SEMAPHORE_INIT(engine);
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
-
-	ring->name = "blitter ring";
-	ring->id = BCS;
-	ring->exec_id = I915_EXEC_BLT;
-
-	ring->mmio_base = BLT_RING_BASE;
-	ring->write_tail = ring_write_tail;
-	ring->flush = gen6_ring_flush;
-	ring->add_request = gen6_add_request;
-	ring->get_seqno = gen6_ring_get_seqno;
-	ring->set_seqno = ring_set_seqno;
+	struct intel_engine_cs *engine = &dev_priv->ring[BCS];
+
+	engine->name = "blitter ring";
+	engine->id = BCS;
+	engine->exec_id = I915_EXEC_BLT;
+
+	engine->mmio_base = BLT_RING_BASE;
+	engine->write_tail = ring_write_tail;
+	engine->flush = gen6_ring_flush;
+	engine->add_request = gen6_add_request;
+	engine->get_seqno = gen6_ring_get_seqno;
+	engine->set_seqno = ring_set_seqno;
 	if (INTEL_INFO(dev)->gen >= 8) {
-		ring->irq_enable_mask =
+		engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->irq_get = gen8_ring_get_irq;
+		engine->irq_put = gen8_ring_put_irq;
+		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen8_ring_sync;
-			ring->semaphore.signal = gen8_xcs_signal;
-			GEN8_RING_SEMAPHORE_INIT;
+			engine->semaphore.sync_to = gen8_ring_sync;
+			engine->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT(engine);
 		}
 	} else {
-		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+		engine->irq_get = gen6_ring_get_irq;
+		engine->irq_put = gen6_ring_put_irq;
+		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.signal = gen6_signal;
-			ring->semaphore.sync_to = gen6_ring_sync;
+			engine->semaphore.signal = gen6_signal;
+			engine->semaphore.sync_to = gen6_ring_sync;
 			/*
 			 * The current semaphore is only applied on pre-gen8
 			 * platform.  And there is no VCS2 ring on the pre-gen8
@@ -3038,112 +3047,112 @@  int intel_init_blt_ring_buffer(struct drm_device *dev)
 			 * initialized as INVALID.  Gen8 will initialize the
 			 * sema between BCS and VCS2 later.
 			 */
-			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-			ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-			ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-			ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+			engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+			engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+			engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+	struct intel_engine_cs *engine = &dev_priv->ring[VECS];
 
-	ring->name = "video enhancement ring";
-	ring->id = VECS;
-	ring->exec_id = I915_EXEC_VEBOX;
+	engine->name = "video enhancement ring";
+	engine->id = VECS;
+	engine->exec_id = I915_EXEC_VEBOX;
 
-	ring->mmio_base = VEBOX_RING_BASE;
-	ring->write_tail = ring_write_tail;
-	ring->flush = gen6_ring_flush;
-	ring->add_request = gen6_add_request;
-	ring->get_seqno = gen6_ring_get_seqno;
-	ring->set_seqno = ring_set_seqno;
+	engine->mmio_base = VEBOX_RING_BASE;
+	engine->write_tail = ring_write_tail;
+	engine->flush = gen6_ring_flush;
+	engine->add_request = gen6_add_request;
+	engine->get_seqno = gen6_ring_get_seqno;
+	engine->set_seqno = ring_set_seqno;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
-		ring->irq_enable_mask =
+		engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->irq_get = gen8_ring_get_irq;
+		engine->irq_put = gen8_ring_put_irq;
+		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen8_ring_sync;
-			ring->semaphore.signal = gen8_xcs_signal;
-			GEN8_RING_SEMAPHORE_INIT;
+			engine->semaphore.sync_to = gen8_ring_sync;
+			engine->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT(engine);
 		}
 	} else {
-		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-		ring->irq_get = hsw_vebox_get_irq;
-		ring->irq_put = hsw_vebox_put_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+		engine->irq_get = hsw_vebox_get_irq;
+		engine->irq_put = hsw_vebox_put_irq;
+		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen6_ring_sync;
-			ring->semaphore.signal = gen6_signal;
-			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-			ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-			ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-			ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			engine->semaphore.sync_to = gen6_ring_sync;
+			engine->semaphore.signal = gen6_signal;
+			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+			engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+			engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+			engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 int
 intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 int
 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4b1439deb7fe..24efb57dcd7d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,16 +63,16 @@  struct  intel_hw_status_page {
 	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
 	(i915_semaphore_seqno_size * (__ring)->id))
 
-#define GEN8_RING_SEMAPHORE_INIT do { \
+#define GEN8_RING_SEMAPHORE_INIT(e) do { \
 	if (!dev_priv->semaphore_obj) { \
 		break; \
 	} \
-	ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
-	ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
-	ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
-	ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
-	ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
-	ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+	(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
+	(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
+	(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
+	(e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
+	(e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
+	(e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
 	} while(0)
 
 enum intel_ring_hangcheck_action {