diff mbox

[v2,2/2] drm/i915: Store a i915 backpointer from engine, and use it

Message ID 1461958968-1946-2-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson April 29, 2016, 7:42 p.m. UTC
text	   data	    bss	    dec	    hex	filename
6309351	3578714	 696320	10584385	 a18141	vmlinux
6308391	3578714	 696320	10583425	 a17d81	vmlinux

Almost 1KiB of code reduction.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c       |  12 +-
 drivers/gpu/drm/i915/i915_debugfs.c          |   8 +-
 drivers/gpu/drm/i915/i915_dma.c              |   9 +-
 drivers/gpu/drm/i915/i915_drv.c              |  10 +-
 drivers/gpu/drm/i915/i915_drv.h              |  35 ++--
 drivers/gpu/drm/i915/i915_gem.c              |  47 ++---
 drivers/gpu/drm/i915/i915_gem_context.c      |  48 ++---
 drivers/gpu/drm/i915/i915_gem_evict.c        |   4 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   6 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  32 +--
 drivers/gpu/drm/i915/i915_gem_render_state.c |  13 +-
 drivers/gpu/drm/i915/i915_gem_shrinker.c     |   4 +-
 drivers/gpu/drm/i915/i915_gpu_error.c        |  79 ++++----
 drivers/gpu/drm/i915/i915_irq.c              |  80 ++++----
 drivers/gpu/drm/i915/i915_trace.h            |  36 ++--
 drivers/gpu/drm/i915/intel_display.c         |  49 +++--
 drivers/gpu/drm/i915/intel_drv.h             |   4 +-
 drivers/gpu/drm/i915/intel_fbc.c             |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c             | 138 ++++++-------
 drivers/gpu/drm/i915/intel_lrc.h             |   3 +-
 drivers/gpu/drm/i915/intel_mocs.c            |   2 +-
 drivers/gpu/drm/i915/intel_overlay.c         |   3 +-
 drivers/gpu/drm/i915/intel_pm.c              |   5 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 290 ++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h      |   8 +-
 drivers/gpu/drm/i915/intel_uncore.c          |  14 +-
 26 files changed, 442 insertions(+), 499 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..e4664fea22af 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -750,12 +750,12 @@  int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 	int cmd_table_count;
 	int ret;
 
-	if (!IS_GEN7(engine->dev))
+	if (!IS_GEN7(engine->i915))
 		return 0;
 
 	switch (engine->id) {
 	case RCS:
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine->i915)) {
 			cmd_tables = hsw_render_ring_cmds;
 			cmd_table_count =
 				ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +764,7 @@  int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
 		}
 
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine->i915)) {
 			engine->reg_tables = hsw_render_reg_tables;
 			engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
 		} else {
@@ -780,7 +780,7 @@  int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	case BCS:
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine->i915)) {
 			cmd_tables = hsw_blt_ring_cmds;
 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
 		} else {
@@ -788,7 +788,7 @@  int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
 		}
 
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine->i915)) {
 			engine->reg_tables = hsw_blt_reg_tables;
 			engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
 		} else {
@@ -1035,7 +1035,7 @@  bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
 	if (!engine->needs_cmd_parser)
 		return false;
 
-	if (!USES_PPGTT(engine->dev))
+	if (!USES_PPGTT(engine->i915))
 		return false;
 
 	return (i915.enable_cmd_parser == 1);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6ee69b17e8fb..91a16c72a034 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1383,7 +1383,7 @@  static int i915_hangcheck_info(struct seq_file *m, void *unused)
 		seqno[id] = engine->get_seqno(engine);
 	}
 
-	i915_get_extra_instdone(dev, instdone);
+	i915_get_extra_instdone(dev_priv, instdone);
 
 	intel_runtime_pm_put(dev_priv);
 
@@ -3181,7 +3181,7 @@  static int i915_semaphore_status(struct seq_file *m, void *unused)
 	enum intel_engine_id id;
 	int j, ret;
 
-	if (!i915_semaphore_is_enabled(dev)) {
+	if (!i915_semaphore_is_enabled(dev_priv)) {
 		seq_puts(m, "Semaphores are disabled\n");
 		return 0;
 	}
@@ -4782,7 +4782,7 @@  i915_wedged_set(void *data, u64 val)
 
 	intel_runtime_pm_get(dev_priv);
 
-	i915_handle_error(dev, val,
+	i915_handle_error(dev_priv, val,
 			  "Manually setting wedged to %llu", val);
 
 	intel_runtime_pm_put(dev_priv);
@@ -4932,7 +4932,7 @@  i915_drop_caches_set(void *data, u64 val)
 	}
 
 	if (val & (DROP_RETIRE | DROP_ACTIVE))
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev_priv);
 
 	if (val & DROP_BOUND)
 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c91387f1aedd..3fba4d08787e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -186,7 +186,7 @@  static int i915_getparam(struct drm_device *dev, void *data,
 		value = 1;
 		break;
 	case I915_PARAM_HAS_SEMAPHORES:
-		value = i915_semaphore_is_enabled(dev);
+		value = i915_semaphore_is_enabled(dev_priv);
 		break;
 	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
 		value = 1;
@@ -970,7 +970,8 @@  static void intel_device_info_runtime_init(struct drm_device *dev)
 			 info->has_eu_pg ? "y" : "n");
 
 	i915.enable_execlists =
-		intel_sanitize_enable_execlists(dev, i915.enable_execlists);
+		intel_sanitize_enable_execlists(dev_priv,
+					       	i915.enable_execlists);
 
 	/*
 	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
@@ -979,7 +980,7 @@  static void intel_device_info_runtime_init(struct drm_device *dev)
 	 * than every time we check intel_enable_ppgtt().
 	 */
 	i915.enable_ppgtt =
-		intel_sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+		intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
 	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 }
 
@@ -1345,7 +1346,7 @@  static void i915_driver_register(struct drm_i915_private *dev_priv)
 	 * Notify a valid surface after modesetting,
 	 * when running inside a VM.
 	 */
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv))
 		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
 
 	i915_setup_sysfs(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9fd221c97275..17aef1e92770 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -530,9 +530,9 @@  void intel_detect_pch(struct drm_device *dev)
 	pci_dev_put(pch);
 }
 
-bool i915_semaphore_is_enabled(struct drm_device *dev)
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
 {
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return false;
 
 	if (i915.semaphores >= 0)
@@ -544,7 +544,7 @@  bool i915_semaphore_is_enabled(struct drm_device *dev)
 
 #ifdef CONFIG_INTEL_IOMMU
 	/* Enable semaphores on SNB when IO remapping is off */
-	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+	if (INTEL_GEN(dev_priv) == 6 && intel_iommu_gfx_mapped)
 		return false;
 #endif
 
@@ -914,9 +914,9 @@  int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_device *dev)
+int i915_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
 	struct i915_gpu_error *error = &dev_priv->gpu_error;
 	unsigned reset_counter;
 	int ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 02c619e7af05..1a6a27b9a9d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2753,7 +2753,8 @@  extern int i915_max_ioctl;
 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
-int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt);
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+			       	int enable_ppgtt);
 
 /* i915_dma.c */
 void __printf(3, 4)
@@ -2777,7 +2778,7 @@  extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 #endif
 extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_device *dev);
-extern int i915_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2794,9 +2795,10 @@  void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
 
 /* i915_irq.c */
-void i915_queue_hangcheck(struct drm_device *dev);
+void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
 __printf(3, 4)
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+		       u32 engine_mask,
 		       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
@@ -2826,9 +2828,9 @@  void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-static inline bool intel_vgpu_active(struct drm_device *dev)
+static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
 {
-	return to_i915(dev)->vgpu.active;
+	return dev_priv->vgpu.active;
 }
 
 void
@@ -3096,13 +3098,13 @@  static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
 				 req->seqno);
 }
 
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
-bool i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3355,9 +3357,9 @@  int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 
 /* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 {
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_INFO(dev_priv)->gen < 6)
 		intel_gtt_chipset_flush();
 }
 
@@ -3436,14 +3438,15 @@  static inline void i915_error_state_buf_release(
 {
 	kfree(eb->buf);
 }
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+			      u32 engine_mask,
 			      const char *error_msg);
 void i915_error_state_get(struct drm_device *dev,
 			  struct i915_error_state_file_priv *error_priv);
 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
 void i915_destroy_error_state(struct drm_device *dev);
 
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
@@ -3551,18 +3554,20 @@  extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_enable_rc6(const struct drm_device *dev);
 
-extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
 			       struct drm_file *file);
 
 /* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
 					    struct intel_overlay_error_state *error);
 
-extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 					    struct drm_device *dev,
 					    struct intel_display_error_state *error);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bd6061f177fe..9616d72a1ac5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@  i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 		vaddr += PAGE_SIZE;
 	}
 
-	i915_gem_chipset_flush(obj->base.dev);
+	i915_gem_chipset_flush(to_i915(obj->base.dev));
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
@@ -347,7 +347,7 @@  i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 	}
 
 	drm_clflush_virt_range(vaddr, args->size);
-	i915_gem_chipset_flush(dev);
+	i915_gem_chipset_flush(to_i915(dev));
 
 out:
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -1006,7 +1006,7 @@  out:
 	}
 
 	if (needs_clflush_after)
-		i915_gem_chipset_flush(dev);
+		i915_gem_chipset_flush(to_i915(dev));
 	else
 		obj->cache_dirty = true;
 
@@ -1230,8 +1230,7 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 			struct intel_rps_client *rps)
 {
 	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	const bool irq_test_in_progress =
 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1429,7 +1428,7 @@  __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *tmp;
 
-	lockdep_assert_held(&engine->dev->struct_mutex);
+	lockdep_assert_held(&engine->i915->dev->struct_mutex);
 
 	if (list_empty(&req->list))
 		return;
@@ -2502,9 +2501,8 @@  i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 }
 
 static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *engine;
 	int ret;
 
@@ -2514,7 +2512,7 @@  i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 		if (ret)
 			return ret;
 	}
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev_priv);
 
 	/* Finally reset hw state */
 	for_each_engine(engine, dev_priv)
@@ -2534,7 +2532,7 @@  int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 	/* HWS page needs to be set less than what we
 	 * will inject to ring
 	 */
-	ret = i915_gem_init_seqno(dev, seqno - 1);
+	ret = i915_gem_init_seqno(dev_priv, seqno - 1);
 	if (ret)
 		return ret;
 
@@ -2550,13 +2548,11 @@  int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 }
 
 int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	/* reserve 0 for non-seqno */
 	if (dev_priv->next_seqno == 0) {
-		int ret = i915_gem_init_seqno(dev, 0);
+		int ret = i915_gem_init_seqno(dev_priv, 0);
 		if (ret)
 			return ret;
 
@@ -2654,7 +2650,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
 
-	i915_queue_hangcheck(engine->dev);
+	i915_queue_hangcheck(engine->i915);
 
 	queue_delayed_work(dev_priv->wq,
 			   &dev_priv->mm.retire_work,
@@ -2728,7 +2724,7 @@  __i915_gem_request_alloc(struct intel_engine_cs *engine,
 			 struct intel_context *ctx,
 			 struct drm_i915_gem_request **req_out)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
 	struct drm_i915_gem_request *req;
 	int ret;
@@ -2750,7 +2746,7 @@  __i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (req == NULL)
 		return -ENOMEM;
 
-	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+	ret = i915_gem_get_seqno(engine->i915, &req->seqno);
 	if (ret)
 		goto err;
 
@@ -2807,7 +2803,7 @@  i915_gem_request_alloc(struct intel_engine_cs *engine,
 	int err;
 
 	if (ctx == NULL)
-		ctx = to_i915(engine->dev)->kernel_context;
+		ctx = engine->i915->kernel_context;
 	err = __i915_gem_request_alloc(engine, ctx, &req);
 	return err ? ERR_PTR(err) : req;
 }
@@ -2982,9 +2978,8 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 }
 
 bool
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *engine;
 	bool idle = true;
 
@@ -3017,7 +3012,7 @@  i915_gem_retire_work_handler(struct work_struct *work)
 	/* Come back later if the device is busy... */
 	idle = false;
 	if (mutex_trylock(&dev->struct_mutex)) {
-		idle = i915_gem_retire_requests(dev);
+		idle = i915_gem_retire_requests(dev_priv);
 		mutex_unlock(&dev->struct_mutex);
 	}
 	if (!idle)
@@ -3186,7 +3181,7 @@  __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (i915_gem_request_completed(from_req, true))
 		return 0;
 
-	if (!i915_semaphore_is_enabled(obj->base.dev)) {
+	if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
 		ret = __i915_wait_request(from_req,
 					  i915->mm.interruptible,
@@ -3719,7 +3714,7 @@  i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 		return;
 
 	if (i915_gem_clflush_object(obj, obj->pin_display))
-		i915_gem_chipset_flush(obj->base.dev);
+		i915_gem_chipset_flush(to_i915(obj->base.dev));
 
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
@@ -3917,7 +3912,7 @@  out:
 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
 	    cpu_write_needs_clflush(obj)) {
 		if (i915_gem_clflush_object(obj, true))
-			i915_gem_chipset_flush(obj->base.dev);
+			i915_gem_chipset_flush(to_i915(obj->base.dev));
 	}
 
 	return 0;
@@ -4695,7 +4690,7 @@  i915_gem_suspend(struct drm_device *dev)
 	if (ret)
 		goto err;
 
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev_priv);
 
 	i915_gem_stop_engines(dev);
 	i915_gem_context_lost(dev_priv);
@@ -4986,7 +4981,7 @@  i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 	else
 		dev_priv->num_fence_regs = 8;
 
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv))
 		dev_priv->num_fence_regs =
 				I915_READ(vgtif_reg(avail_rs.fence_num));
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b1b704c2c001..2dcf18cfc809 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -99,28 +99,27 @@ 
 #define GEN6_CONTEXT_ALIGN (64<<10)
 #define GEN7_CONTEXT_ALIGN 4096
 
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
 {
-	if (IS_GEN6(dev))
+	if (IS_GEN6(dev_priv))
 		return GEN6_CONTEXT_ALIGN;
 
 	return GEN7_CONTEXT_ALIGN;
 }
 
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 	u32 reg;
 
-	switch (INTEL_INFO(dev)->gen) {
+	switch (INTEL_INFO(dev_priv)->gen) {
 	case 6:
 		reg = I915_READ(CXT_SIZE);
 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
 		break;
 	case 7:
 		reg = I915_READ(GEN7_CXT_SIZE);
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(dev_priv))
 			ret = HSW_CXT_TOTAL_SIZE;
 		else
 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -224,7 +223,7 @@  static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
 		 * Flush any pending retires to hopefully release some
 		 * stale contexts and try again.
 		 */
-		i915_gem_retire_requests(dev_priv->dev);
+		i915_gem_retire_requests(dev_priv);
 		ret = ida_simple_get(&dev_priv->context_hw_ida,
 				     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
 		if (ret < 0)
@@ -320,7 +319,7 @@  i915_gem_create_context(struct drm_device *dev,
 		 * context.
 		 */
 		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
-					    get_context_alignment(dev), 0);
+					    get_context_alignment(to_i915(dev)), 0);
 		if (ret) {
 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
 			goto err_destroy;
@@ -389,7 +388,8 @@  int i915_gem_context_init(struct drm_device *dev)
 	if (WARN_ON(dev_priv->kernel_context))
 		return 0;
 
-	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+	if (intel_vgpu_active(dev_priv) &&
+	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
 		if (!i915.enable_execlists) {
 			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
 			return -EINVAL;
@@ -404,8 +404,9 @@  int i915_gem_context_init(struct drm_device *dev)
 		/* NB: intentionally left blank. We will allocate our own
 		 * backing objects as we need them, thank you very much */
 		dev_priv->hw_context_size = 0;
-	} else if (HAS_HW_CONTEXTS(dev)) {
-		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+	} else if (HAS_HW_CONTEXTS(dev_priv)) {
+		dev_priv->hw_context_size =
+			round_up(get_context_size(dev_priv), 4096);
 		if (dev_priv->hw_context_size > (1<<20)) {
 			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
 					 dev_priv->hw_context_size);
@@ -509,12 +510,13 @@  i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *engine = req->engine;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
-		i915_semaphore_is_enabled(engine->dev) ?
-		hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+		i915_semaphore_is_enabled(dev_priv) ?
+		hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
 		0;
 	int len, ret;
 
@@ -523,21 +525,21 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * explicitly, so we rely on the value at ring init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(engine->dev)) {
+	if (IS_GEN6(dev_priv)) {
 		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+	if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-	else if (INTEL_INFO(engine->dev)->gen < 8)
+	else if (INTEL_GEN(dev_priv) < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
 	len = 4;
-	if (INTEL_INFO(engine->dev)->gen >= 7)
+	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
 	ret = intel_ring_begin(req, len);
@@ -545,14 +547,14 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(engine->dev)->gen >= 7) {
+	if (INTEL_GEN(dev_priv) >= 7) {
 		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(engine,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, to_i915(engine->dev)) {
+			for_each_engine(signaller, dev_priv) {
 				if (signaller == engine)
 					continue;
 
@@ -575,14 +577,14 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 */
 	intel_ring_emit(engine, MI_NOOP);
 
-	if (INTEL_INFO(engine->dev)->gen >= 7) {
+	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
 			intel_ring_emit(engine,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, to_i915(engine->dev)) {
+			for_each_engine(signaller, dev_priv) {
 				if (signaller == engine)
 					continue;
 
@@ -673,7 +675,7 @@  needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
 	if (engine->id != RCS)
 		return true;
 
-	if (INTEL_INFO(engine->dev)->gen < 8)
+	if (INTEL_GEN(engine->i915) < 8)
 		return true;
 
 	return false;
@@ -710,7 +712,7 @@  static int do_rcs_switch(struct drm_i915_gem_request *req)
 
 	/* Trying to pin first makes error handling easier. */
 	ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-				    get_context_alignment(engine->dev),
+				    get_context_alignment(engine->i915),
 				    0);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..b144c3f5c650 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -154,7 +154,7 @@  none:
 		if (ret)
 			return ret;
 
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(to_i915(dev));
 		goto search_again;
 	}
 
@@ -265,7 +265,7 @@  int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 		if (ret)
 			return ret;
 
-		i915_gem_retire_requests(vm->dev);
+		i915_gem_retire_requests(to_i915(vm->dev));
 
 		WARN_ON(!list_empty(&vm->active_list));
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e0ee5d1ac372..a54a243ccaac 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -724,7 +724,7 @@  i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
 	struct i915_address_space *vm;
 	struct list_head ordered_vmas;
 	struct list_head pinned_vmas;
-	bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
 	int retry;
 
 	i915_gem_retire_requests_ring(engine);
@@ -965,7 +965,7 @@  i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(req->engine->dev);
+		i915_gem_chipset_flush(req->engine->i915);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -1119,7 +1119,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(engine->dev);
+				struct drm_i915_private *dev_priv = engine->i915;
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 364cf8236021..4e344707cebc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -110,17 +110,19 @@  const struct i915_ggtt_view i915_ggtt_view_rotated = {
 	.type = I915_GGTT_VIEW_ROTATED,
 };
 
-int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+			       	int enable_ppgtt)
 {
 	bool has_aliasing_ppgtt;
 	bool has_full_ppgtt;
 	bool has_full_48bit_ppgtt;
 
-	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
-	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
-	has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
+	has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
+	has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
+	has_full_48bit_ppgtt =
+	       	IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
 
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv))
 		has_full_ppgtt = false; /* emulation is too hard */
 
 	if (!has_aliasing_ppgtt)
@@ -130,7 +132,7 @@  int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
 	 * execlists, the sole mechanism available to submit work.
 	 */
-	if (enable_ppgtt == 0 && INTEL_INFO(dev)->gen < 9)
+	if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
 		return 0;
 
 	if (enable_ppgtt == 1)
@@ -144,19 +146,19 @@  int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 
 #ifdef CONFIG_INTEL_IOMMU
 	/* Disable ppgtt on SNB if VT-d is on. */
-	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+	if (INTEL_GEN(dev_priv) == 6 && intel_iommu_gfx_mapped) {
 		DRM_INFO("Disabling PPGTT because VT-d is on\n");
 		return 0;
 	}
 #endif
 
 	/* Early VLV doesn't have this */
-	if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+	if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
 		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
 		return 0;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+	if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
 		return has_full_48bit_ppgtt ? 3 : 2;
 	else
 		return has_aliasing_ppgtt ? 1 : 0;
@@ -994,7 +996,7 @@  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
-	if (intel_vgpu_active(vm->dev))
+	if (intel_vgpu_active(to_i915(vm->dev)))
 		gen8_ppgtt_notify_vgt(ppgtt, false);
 
 	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1545,14 +1547,14 @@  static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 							      0, 0,
 							      GEN8_PML4E_SHIFT);
 
-		if (intel_vgpu_active(ppgtt->base.dev)) {
+		if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
 			ret = gen8_preallocate_top_level_pdps(ppgtt);
 			if (ret)
 				goto free_scratch;
 		}
 	}
 
-	if (intel_vgpu_active(ppgtt->base.dev))
+	if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
 		gen8_ppgtt_notify_vgt(ppgtt, true);
 
 	return 0;
@@ -2080,7 +2082,7 @@  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	} else
 		BUG();
 
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv))
 		ppgtt->switch_mm = vgpu_mm_switch;
 
 	ret = gen6_ppgtt_alloc(ppgtt);
@@ -2729,7 +2731,7 @@  static int i915_gem_setup_global_gtt(struct drm_device *dev,
 	i915_address_space_init(&ggtt->base, dev_priv);
 	ggtt->base.total += PAGE_SIZE;
 
-	if (intel_vgpu_active(dev)) {
+	if (intel_vgpu_active(dev_priv)) {
 		ret = intel_vgt_balloon(dev);
 		if (ret)
 			return ret;
@@ -2833,7 +2835,7 @@  void i915_ggtt_cleanup_hw(struct drm_device *dev)
 	i915_gem_cleanup_stolen(dev);
 
 	if (drm_mm_initialized(&ggtt->base.mm)) {
-		if (intel_vgpu_active(dev))
+		if (intel_vgpu_active(dev_priv))
 			intel_vgt_deballoon();
 
 		drm_mm_takedown(&ggtt->base.mm);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 423cf5144bcb..7c93327b70fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@ 
 #include "intel_renderstate.h"
 
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(struct drm_device *dev, const int gen)
+render_state_get_rodata(const int gen)
 {
 	switch (gen) {
 	case 6:
@@ -45,19 +45,20 @@  render_state_get_rodata(struct drm_device *dev, const int gen)
 	return NULL;
 }
 
-static int render_state_init(struct render_state *so, struct drm_device *dev)
+static int render_state_init(struct render_state *so,
+			     struct drm_i915_private *dev_priv)
 {
 	int ret;
 
-	so->gen = INTEL_INFO(dev)->gen;
-	so->rodata = render_state_get_rodata(dev, so->gen);
+	so->gen = INTEL_GEN(dev_priv);
+	so->rodata = render_state_get_rodata(so->gen);
 	if (so->rodata == NULL)
 		return 0;
 
 	if (so->rodata->batch_items * 4 > 4096)
 		return -EINVAL;
 
-	so->obj = i915_gem_object_create(dev, 4096);
+	so->obj = i915_gem_object_create(dev_priv->dev, 4096);
 	if (IS_ERR(so->obj))
 		return PTR_ERR(so->obj);
 
@@ -177,7 +178,7 @@  int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
 	if (WARN_ON(engine->id != RCS))
 		return -ENOENT;
 
-	ret = render_state_init(so, engine->dev);
+	ret = render_state_init(so, engine->i915);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 2643d18e41f5..2cde8147c255 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,7 @@  i915_gem_shrink(struct drm_i915_private *dev_priv,
 	unsigned long count = 0;
 
 	trace_i915_gem_shrink(dev_priv, target, flags);
-	i915_gem_retire_requests(dev_priv->dev);
+	i915_gem_retire_requests(dev_priv);
 
 	/*
 	 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +197,7 @@  i915_gem_shrink(struct drm_i915_private *dev_priv,
 		list_splice(&still_in_list, phase->list);
 	}
 
-	i915_gem_retire_requests(dev_priv->dev);
+	i915_gem_retire_requests(dev_priv);
 
 	return count;
 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..22d926839b68 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -824,19 +824,18 @@  static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 	return error_code;
 }
 
-static void i915_gem_record_fences(struct drm_device *dev,
+static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	if (IS_GEN3(dev) || IS_GEN2(dev)) {
+	if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
 		for (i = 0; i < dev_priv->num_fence_regs; i++)
 			error->fence[i] = I915_READ(FENCE_REG(i));
-	} else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+	} else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
 		for (i = 0; i < dev_priv->num_fence_regs; i++)
 			error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		for (i = 0; i < dev_priv->num_fence_regs; i++)
 			error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
 	}
@@ -851,7 +850,7 @@  static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 	struct intel_engine_cs *to;
 	enum intel_engine_id id;
 
-	if (!i915_semaphore_is_enabled(dev_priv->dev))
+	if (!i915_semaphore_is_enabled(dev_priv))
 		return;
 
 	if (!error->semaphore_obj)
@@ -893,31 +892,29 @@  static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void i915_record_ring_state(struct drm_device *dev,
+static void i915_record_ring_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error,
 				   struct intel_engine_cs *engine,
 				   struct drm_i915_error_ring *ering)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (INTEL_INFO(dev)->gen >= 6) {
+	if (INTEL_GEN(dev_priv) >= 6) {
 		ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
 		ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
-		if (INTEL_INFO(dev)->gen >= 8)
+		if (INTEL_GEN(dev_priv) >= 8)
 			gen8_record_semaphore_state(dev_priv, error, engine,
 						    ering);
 		else
 			gen6_record_semaphore_state(dev_priv, engine, ering);
 	}
 
-	if (INTEL_INFO(dev)->gen >= 4) {
+	if (INTEL_GEN(dev_priv) >= 4) {
 		ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
 		ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
 		ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
 		ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
 		ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
 		ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
-		if (INTEL_INFO(dev)->gen >= 8) {
+		if (INTEL_GEN(dev_priv) >= 8) {
 			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
 			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
 		}
@@ -939,10 +936,10 @@  static void i915_record_ring_state(struct drm_device *dev,
 	ering->tail = I915_READ_TAIL(engine);
 	ering->ctl = I915_READ_CTL(engine);
 
-	if (I915_NEED_GFX_HWS(dev)) {
+	if (I915_NEED_GFX_HWS(dev_priv)) {
 		i915_reg_t mmio;
 
-		if (IS_GEN7(dev)) {
+		if (IS_GEN7(dev_priv)) {
 			switch (engine->id) {
 			default:
 			case RCS:
@@ -958,7 +955,7 @@  static void i915_record_ring_state(struct drm_device *dev,
 				mmio = VEBOX_HWS_PGA_GEN7;
 				break;
 			}
-		} else if (IS_GEN6(engine->dev)) {
+		} else if (IS_GEN6(engine->i915)) {
 			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 		} else {
 			/* XXX: gen8 returns to sanity */
@@ -971,18 +968,18 @@  static void i915_record_ring_state(struct drm_device *dev,
 	ering->hangcheck_score = engine->hangcheck.score;
 	ering->hangcheck_action = engine->hangcheck.action;
 
-	if (USES_PPGTT(dev)) {
+	if (USES_PPGTT(dev_priv)) {
 		int i;
 
 		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
-		if (IS_GEN6(dev))
+		if (IS_GEN6(dev_priv))
 			ering->vm_info.pp_dir_base =
 				I915_READ(RING_PP_DIR_BASE_READ(engine));
-		else if (IS_GEN7(dev))
+		else if (IS_GEN7(dev_priv))
 			ering->vm_info.pp_dir_base =
 				I915_READ(RING_PP_DIR_BASE(engine));
-		else if (INTEL_INFO(dev)->gen >= 8)
+		else if (INTEL_GEN(dev_priv) >= 8)
 			for (i = 0; i < 4; i++) {
 				ering->vm_info.pdp[i] =
 					I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +995,7 @@  static void i915_gem_record_active_context(struct intel_engine_cs *engine,
 					   struct drm_i915_error_state *error,
 					   struct drm_i915_error_ring *ering)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_gem_object *obj;
 
 	/* Currently render ring is the only HW context user */
@@ -1016,10 +1013,9 @@  static void i915_gem_record_active_context(struct intel_engine_cs *engine,
 	}
 }
 
-static void i915_gem_record_rings(struct drm_device *dev,
+static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				  struct drm_i915_error_state *error)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_request *request;
 	int i, count;
@@ -1030,12 +1026,12 @@  static void i915_gem_record_rings(struct drm_device *dev,
 
 		error->ring[i].pid = -1;
 
-		if (engine->dev == NULL)
+		if (engine->i915 == NULL)
 			continue;
 
 		error->ring[i].valid = true;
 
-		i915_record_ring_state(dev, error, engine, &error->ring[i]);
+		i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
 
 		request = i915_gem_find_active_request(engine);
 		if (request) {
@@ -1301,15 +1297,14 @@  static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 
-	i915_get_extra_instdone(dev, error->extra_instdone);
+	i915_get_extra_instdone(dev_priv, error->extra_instdone);
 }
 
-static void i915_error_capture_msg(struct drm_device *dev,
+static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error,
 				   u32 engine_mask,
 				   const char *error_msg)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ecode;
 	int ring_id = -1, len;
 
@@ -1317,7 +1312,7 @@  static void i915_error_capture_msg(struct drm_device *dev,
 
 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
 			"GPU HANG: ecode %d:%d:0x%08x",
-			INTEL_INFO(dev)->gen, ring_id, ecode);
+			INTEL_INFO(dev_priv)->gen, ring_id, ecode);
 
 	if (ring_id != -1 && error->ring[ring_id].pid != -1)
 		len += scnprintf(error->error_msg + len,
@@ -1352,11 +1347,11 @@  static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
  * out a structure which becomes available in debugfs for user level tools
  * to pick up.
  */
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+			      u32 engine_mask,
 			      const char *error_msg)
 {
 	static bool warned;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_error_state *error;
 	unsigned long flags;
 
@@ -1372,15 +1367,15 @@  void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
 	i915_capture_gen_state(dev_priv, error);
 	i915_capture_reg_state(dev_priv, error);
 	i915_gem_capture_buffers(dev_priv, error);
-	i915_gem_record_fences(dev, error);
-	i915_gem_record_rings(dev, error);
+	i915_gem_record_fences(dev_priv, error);
+	i915_gem_record_rings(dev_priv, error);
 
 	do_gettimeofday(&error->time);
 
-	error->overlay = intel_overlay_capture_error_state(dev);
-	error->display = intel_display_capture_error_state(dev);
+	error->overlay = intel_overlay_capture_error_state(dev_priv);
+	error->display = intel_display_capture_error_state(dev_priv);
 
-	i915_error_capture_msg(dev, error, engine_mask, error_msg);
+	i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
 	DRM_INFO("%s\n", error->error_msg);
 
 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
@@ -1400,7 +1395,7 @@  void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
 		DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
 		DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
 		DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
-		DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+		DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
 		warned = true;
 	}
 }
@@ -1450,17 +1445,17 @@  const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 }
 
 /* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
+			     uint32_t *instdone)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
 
-	if (IS_GEN2(dev) || IS_GEN3(dev))
+	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
 		instdone[0] = I915_READ(GEN2_INSTDONE);
-	else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+	else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
 		instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
 		instdone[1] = I915_READ(GEN4_INSTDONE1);
-	} else if (INTEL_INFO(dev)->gen >= 7) {
+	} else if (INTEL_INFO(dev_priv)->gen >= 7) {
 		instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..8864ee19154f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2555,15 +2555,15 @@  static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  * Fire an error uevent so userspace can see that a hang or error
  * was detected.
  */
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
 	int ret;
 
-	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
 	/*
 	 * Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2577,7 @@  static void i915_reset_and_wakeup(struct drm_device *dev)
 	 */
 	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
 		DRM_DEBUG_DRIVER("resetting chip\n");
-		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
-				   reset_event);
+		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
 
 		/*
 		 * In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2588,7 @@  static void i915_reset_and_wakeup(struct drm_device *dev)
 		 */
 		intel_runtime_pm_get(dev_priv);
 
-		intel_prepare_reset(dev);
+		intel_prepare_reset(dev_priv);
 
 		/*
 		 * All state reset _must_ be completed before we update the
@@ -2597,14 +2596,14 @@  static void i915_reset_and_wakeup(struct drm_device *dev)
 		 * pending state and not properly drop locks, resulting in
 		 * deadlocks with the reset work.
 		 */
-		ret = i915_reset(dev);
+		ret = i915_reset(dev_priv);
 
-		intel_finish_reset(dev);
+		intel_finish_reset(dev_priv);
 
 		intel_runtime_pm_put(dev_priv);
 
 		if (ret == 0)
-			kobject_uevent_env(&dev->primary->kdev->kobj,
+			kobject_uevent_env(kobj,
 					   KOBJ_CHANGE, reset_done_event);
 
 		/*
@@ -2615,9 +2614,8 @@  static void i915_reset_and_wakeup(struct drm_device *dev)
 	}
 }
 
-static void i915_report_and_clear_eir(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t instdone[I915_NUM_INSTDONE_REG];
 	u32 eir = I915_READ(EIR);
 	int pipe, i;
@@ -2627,9 +2625,9 @@  static void i915_report_and_clear_eir(struct drm_device *dev)
 
 	pr_err("render error detected, EIR: 0x%08x\n", eir);
 
-	i915_get_extra_instdone(dev, instdone);
+	i915_get_extra_instdone(dev_priv, instdone);
 
-	if (IS_G4X(dev)) {
+	if (IS_G4X(dev_priv)) {
 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
@@ -2651,7 +2649,7 @@  static void i915_report_and_clear_eir(struct drm_device *dev)
 		}
 	}
 
-	if (!IS_GEN2(dev)) {
+	if (!IS_GEN2(dev_priv)) {
 		if (eir & I915_ERROR_PAGE_TABLE) {
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
 			pr_err("page table error\n");
@@ -2673,7 +2671,7 @@  static void i915_report_and_clear_eir(struct drm_device *dev)
 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
-		if (INTEL_INFO(dev)->gen < 4) {
+		if (INTEL_GEN(dev_priv) < 4) {
 			u32 ipeir = I915_READ(IPEIR);
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2717,10 +2715,10 @@  static void i915_report_and_clear_eir(struct drm_device *dev)
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
  */
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+		       u32 engine_mask,
 		       const char *fmt, ...)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	va_list args;
 	char error_msg[80];
 
@@ -2728,8 +2726,8 @@  void i915_handle_error(struct drm_device *dev, u32 engine_mask,
 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
 	va_end(args);
 
-	i915_capture_error_state(dev, engine_mask, error_msg);
-	i915_report_and_clear_eir(dev);
+	i915_capture_error_state(dev_priv, engine_mask, error_msg);
+	i915_report_and_clear_eir(dev_priv);
 
 	if (engine_mask) {
 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2751,7 +2749,7 @@  void i915_handle_error(struct drm_device *dev, u32 engine_mask,
 		i915_error_wake_up(dev_priv, false);
 	}
 
-	i915_reset_and_wakeup(dev);
+	i915_reset_and_wakeup(dev_priv);
 }
 
 /* Called from drm generic code, passed 'crtc' which
@@ -2869,9 +2867,9 @@  ring_idle(struct intel_engine_cs *engine, u32 seqno)
 }
 
 static bool
-ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
 {
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_GEN(dev_priv) >= 8) {
 		return (ipehr >> 23) == 0x1c;
 	} else {
 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2882,10 @@  static struct intel_engine_cs *
 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 				 u64 offset)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct intel_engine_cs *signaller;
 
-	if (INTEL_INFO(dev_priv)->gen >= 8) {
+	if (INTEL_GEN(dev_priv) >= 8) {
 		for_each_engine(signaller, dev_priv) {
 			if (engine == signaller)
 				continue;
@@ -2916,7 +2914,7 @@  semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 static struct intel_engine_cs *
 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	u32 cmd, ipehr, head;
 	u64 offset = 0;
 	int i, backwards;
@@ -2942,7 +2940,7 @@  semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 		return NULL;
 
 	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-	if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
+	if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
 		return NULL;
 
 	/*
@@ -2954,7 +2952,7 @@  semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 	 * ringbuffer itself.
 	 */
 	head = I915_READ_HEAD(engine) & HEAD_ADDR;
-	backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
 
 	for (i = backwards; i; --i) {
 		/*
@@ -2976,7 +2974,7 @@  semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 		return NULL;
 
 	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
-	if (INTEL_INFO(engine->dev)->gen >= 8) {
+	if (INTEL_GEN(dev_priv) >= 8) {
 		offset = ioread32(engine->buffer->virtual_start + head + 12);
 		offset <<= 32;
 		offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2984,7 @@  semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 
 static int semaphore_passed(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct intel_engine_cs *signaller;
 	u32 seqno;
 
@@ -3028,7 +3026,7 @@  static bool subunits_stuck(struct intel_engine_cs *engine)
 	if (engine->id != RCS)
 		return true;
 
-	i915_get_extra_instdone(engine->dev, instdone);
+	i915_get_extra_instdone(engine->i915, instdone);
 
 	/* There might be unstable subunit states even when
 	 * actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3067,7 @@  head_stuck(struct intel_engine_cs *engine, u64 acthd)
 static enum intel_ring_hangcheck_action
 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	enum intel_ring_hangcheck_action ha;
 	u32 tmp;
 
@@ -3078,7 +3075,7 @@  ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 	if (ha != HANGCHECK_HUNG)
 		return ha;
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		return HANGCHECK_HUNG;
 
 	/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3085,19 @@  ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 	 */
 	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
-		i915_handle_error(dev, 0,
+		i915_handle_error(dev_priv, 0,
 				  "Kicking stuck wait on %s",
 				  engine->name);
 		I915_WRITE_CTL(engine, tmp);
 		return HANGCHECK_KICK;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
 		switch (semaphore_passed(engine)) {
 		default:
 			return HANGCHECK_HUNG;
 		case 1:
-			i915_handle_error(dev, 0,
+			i915_handle_error(dev_priv, 0,
 					  "Kicking stuck semaphore on %s",
 					  engine->name);
 			I915_WRITE_CTL(engine, tmp);
@@ -3115,7 +3112,7 @@  ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 
 static unsigned kick_waiters(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *i915 = to_i915(engine->dev);
+	struct drm_i915_private *i915 = engine->i915;
 	unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
 
 	if (engine->hangcheck.user_interrupts == user_interrupts &&
@@ -3144,7 +3141,6 @@  static void i915_hangcheck_elapsed(struct work_struct *work)
 	struct drm_i915_private *dev_priv =
 		container_of(work, typeof(*dev_priv),
 			     gpu_error.hangcheck_work.work);
-	struct drm_device *dev = dev_priv->dev;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	int busy_count = 0, rings_hung = 0;
@@ -3272,22 +3268,22 @@  static void i915_hangcheck_elapsed(struct work_struct *work)
 	}
 
 	if (rings_hung) {
-		i915_handle_error(dev, rings_hung, "Engine(s) hung");
+		i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
 		goto out;
 	}
 
 	if (busy_count)
 		/* Reset timer case chip hangs without another request
 		 * being added */
-		i915_queue_hangcheck(dev);
+		i915_queue_hangcheck(dev_priv);
 
 out:
 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 }
 
-void i915_queue_hangcheck(struct drm_device *dev)
+void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
 {
-	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
+	struct i915_gpu_error *e = &dev_priv->gpu_error;
 
 	if (!i915.enable_hangcheck)
 		return;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..20b2e4039792 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -462,7 +462,7 @@  TRACE_EVENT(i915_gem_ring_sync_to,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = from->dev->primary->index;
+			   __entry->dev = from->i915->dev->primary->index;
 			   __entry->sync_from = from->id;
 			   __entry->sync_to = to_req->engine->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@  TRACE_EVENT(i915_gem_ring_dispatch,
 			     ),
 
 	    TP_fast_assign(
-			   struct intel_engine_cs *engine =
-						i915_gem_request_get_engine(req);
-			   __entry->dev = engine->dev->primary->index;
-			   __entry->ring = engine->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->dev = req->i915->dev->primary->index;
+			   __entry->ring = req->engine->id;
+			   __entry->seqno = req->seqno;
 			   __entry->flags = flags;
-			   i915_trace_irq_get(engine, req);
+			   i915_trace_irq_get(req->engine, req);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@  TRACE_EVENT(i915_gem_ring_flush,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = req->engine->dev->primary->index;
+			   __entry->dev = req->i915->dev->primary->index;
 			   __entry->ring = req->engine->id;
 			   __entry->invalidate = invalidate;
 			   __entry->flush = flush;
@@ -533,11 +531,9 @@  DECLARE_EVENT_CLASS(i915_gem_request,
 			     ),
 
 	    TP_fast_assign(
-			   struct intel_engine_cs *engine =
-						i915_gem_request_get_engine(req);
-			   __entry->dev = engine->dev->primary->index;
-			   __entry->ring = engine->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->dev = req->i915->dev->primary->index;
+			   __entry->ring = req->engine->id;
+			   __entry->seqno = req->seqno;
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,7 +556,7 @@  TRACE_EVENT(i915_gem_request_notify,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = engine->dev->primary->index;
+			   __entry->dev = engine->i915->dev->primary->index;
 			   __entry->ring = engine->id;
 			   __entry->seqno = engine->get_seqno(engine);
 			   ),
@@ -597,13 +593,11 @@  TRACE_EVENT(i915_gem_request_wait_begin,
 	     * less desirable.
 	     */
 	    TP_fast_assign(
-			   struct intel_engine_cs *engine =
-						i915_gem_request_get_engine(req);
-			   __entry->dev = engine->dev->primary->index;
-			   __entry->ring = engine->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->dev = req->i915->dev->primary->index;
+			   __entry->ring = req->engine->id;
+			   __entry->seqno = req->seqno;
 			   __entry->blocking =
-				     mutex_is_locked(&engine->dev->struct_mutex);
+				     mutex_is_locked(&req->i915->dev->struct_mutex);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -792,7 +786,7 @@  TRACE_EVENT(switch_mm,
 			__entry->ring = engine->id;
 			__entry->to = to;
 			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
-			__entry->dev = engine->dev->primary->index;
+			__entry->dev = engine->i915->dev->primary->index;
 	),
 
 	TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5ffccf6e7f36..8a08385dec7f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3144,41 +3144,39 @@  static void intel_update_primary_planes(struct drm_device *dev)
 	}
 }
 
-void intel_prepare_reset(struct drm_device *dev)
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
 {
 	/* no reset support for gen2 */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		return;
 
 	/* reset doesn't touch the display */
-	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 		return;
 
-	drm_modeset_lock_all(dev);
+	drm_modeset_lock_all(dev_priv->dev);
 	/*
 	 * Disabling the crtcs gracefully seems nicer. Also the
 	 * g33 docs say we should at least disable all the planes.
 	 */
-	intel_display_suspend(dev);
+	intel_display_suspend(dev_priv->dev);
 }
 
-void intel_finish_reset(struct drm_device *dev)
+void intel_finish_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/*
 	 * Flips in the rings will be nuked by the reset,
 	 * so complete all pending flips so that user space
 	 * will get its events and not get stuck.
 	 */
-	intel_complete_page_flips(dev);
+	intel_complete_page_flips(dev_priv->dev);
 
 	/* no reset support for gen2 */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		return;
 
 	/* reset doesn't touch the display */
-	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
 		/*
 		 * Flips in the rings have been nuked by the reset,
 		 * so update the base address of all primary
@@ -3188,7 +3186,7 @@  void intel_finish_reset(struct drm_device *dev)
 		 * FIXME: Atomic will make this obsolete since we won't schedule
 		 * CS-based flips (which might get lost in gpu resets) any more.
 		 */
-		intel_update_primary_planes(dev);
+		intel_update_primary_planes(dev_priv->dev);
 		return;
 	}
 
@@ -3199,18 +3197,18 @@  void intel_finish_reset(struct drm_device *dev)
 	intel_runtime_pm_disable_interrupts(dev_priv);
 	intel_runtime_pm_enable_interrupts(dev_priv);
 
-	intel_modeset_init_hw(dev);
+	intel_modeset_init_hw(dev_priv->dev);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
+		dev_priv->display.hpd_irq_setup(dev_priv->dev);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	intel_display_resume(dev);
+	intel_display_resume(dev_priv->dev);
 
 	intel_hpd_init(dev_priv);
 
-	drm_modeset_unlock_all(dev);
+	drm_modeset_unlock_all(dev_priv->dev);
 }
 
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -11263,7 +11261,7 @@  static bool use_mmio_flip(struct intel_engine_cs *engine,
 	if (engine == NULL)
 		return true;
 
-	if (INTEL_INFO(engine->dev)->gen < 5)
+	if (INTEL_GEN(engine->i915) < 5)
 		return false;
 
 	if (i915.use_mmio_flip < 0)
@@ -16192,9 +16190,8 @@  struct intel_display_error_state {
 };
 
 struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_device *dev)
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_display_error_state *error;
 	int transcoders[] = {
 		TRANSCODER_A,
@@ -16204,14 +16201,14 @@  intel_display_capture_error_state(struct drm_device *dev)
 	};
 	int i;
 
-	if (INTEL_INFO(dev)->num_pipes == 0)
+	if (INTEL_INFO(dev_priv)->num_pipes == 0)
 		return NULL;
 
 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
 	if (error == NULL)
 		return NULL;
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
 	for_each_pipe(dev_priv, i) {
@@ -16227,25 +16224,25 @@  intel_display_capture_error_state(struct drm_device *dev)
 
 		error->plane[i].control = I915_READ(DSPCNTR(i));
 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
-		if (INTEL_INFO(dev)->gen <= 3) {
+		if (INTEL_GEN(dev_priv) <= 3) {
 			error->plane[i].size = I915_READ(DSPSIZE(i));
 			error->plane[i].pos = I915_READ(DSPPOS(i));
 		}
-		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
 			error->plane[i].addr = I915_READ(DSPADDR(i));
-		if (INTEL_INFO(dev)->gen >= 4) {
+		if (INTEL_GEN(dev_priv) >= 4) {
 			error->plane[i].surface = I915_READ(DSPSURF(i));
 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
 		}
 
 		error->pipe[i].source = I915_READ(PIPESRC(i));
 
-		if (HAS_GMCH_DISPLAY(dev))
+		if (HAS_GMCH_DISPLAY(dev_priv))
 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
 	}
 
 	/* Note: this does not include DSI transcoders. */
-	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+	error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
 	if (HAS_DDI(dev_priv))
 		error->num_transcoders++; /* Account for eDP. */
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 21dee3f89e84..46afac7daa28 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1228,8 +1228,8 @@  u32 intel_compute_tile_offset(int *x, int *y,
 			      const struct drm_framebuffer *fb, int plane,
 			      unsigned int pitch,
 			      unsigned int rotation);
-void intel_prepare_reset(struct drm_device *dev);
-void intel_finish_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void broxton_init_cdclk(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d5a7cfec589b..4a527d3cf026 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -827,7 +827,7 @@  static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 	bool enable_by_default = IS_HASWELL(dev_priv) ||
 				 IS_BROADWELL(dev_priv);
 
-	if (intel_vgpu_active(dev_priv->dev)) {
+	if (intel_vgpu_active(dev_priv)) {
 		fbc->no_fbc_reason = "VGPU is active";
 		return false;
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8106316ce56f..e76280d5cdff 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -246,21 +246,22 @@  static int intel_lr_context_pin(struct intel_context *ctx,
  *
  * Return: 1 if Execlists is supported and has to be enabled.
  */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
 {
 	/* On platforms with execlist available, vGPU will only
 	 * support execlist mode, no ring buffer mode.
 	 */
-	if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
 		return 1;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		return 1;
 
 	if (enable_execlists == 0)
 		return 0;
 
-	if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
+	    USES_PPGTT(dev_priv) &&
 	    i915.use_mmio_flip >= 0)
 		return 1;
 
@@ -270,19 +271,19 @@  int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 static void
 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
+	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (IS_GEN8(dev) || IS_GEN9(dev))
+	if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
 		engine->idle_lite_restore_wa = ~0;
 
-	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-					IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+					IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
 					(engine->id == VCS || engine->id == VCS2);
 
 	engine->ctx_desc_template = GEN8_CTX_VALID;
-	engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+	engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
 				   GEN8_CTX_ADDRESSING_MODE_SHIFT;
-	if (IS_GEN8(dev))
+	if (IS_GEN8(dev_priv))
 		engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
 	engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
@@ -342,8 +343,7 @@  static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 {
 
 	struct intel_engine_cs *engine = rq0->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = rq0->i915;
 	uint64_t desc[2];
 
 	if (rq1) {
@@ -425,7 +425,7 @@  static void execlists_context_unqueue(struct intel_engine_cs *engine)
 	 * If irqs are not active generate a warning as batches that finish
 	 * without the irqs may get lost and a GPU Hang may occur.
 	 */
-	WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+	WARN_ON(!intel_irqs_enabled(engine->i915));
 
 	/* Try to read in pairs */
 	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -497,7 +497,7 @@  static u32
 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 		   u32 *context_id)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	u32 status;
 
 	read_pointer %= GEN8_CSB_ENTRIES;
@@ -523,7 +523,7 @@  get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 static void intel_lrc_irq_handler(unsigned long data)
 {
 	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	u32 status_pointer;
 	unsigned int read_pointer, write_pointer;
 	u32 csb[GEN8_CSB_ENTRIES][2];
@@ -884,7 +884,7 @@  void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
 	struct drm_i915_gem_request *req, *tmp;
 	LIST_HEAD(cancel_list);
 
-	WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
 
 	spin_lock_bh(&engine->execlist_lock);
 	list_replace_init(&engine->execlist_queue, &cancel_list);
@@ -898,7 +898,7 @@  void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
 
 void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
 	if (!intel_engine_initialized(engine))
@@ -964,7 +964,7 @@  static int intel_lr_context_pin(struct intel_context *ctx,
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
 	ringbuf = ctx->engine[engine->id].ringbuf;
-	ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
 	if (ret)
 		goto unpin_map;
 
@@ -1019,9 +1019,7 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_workarounds *w = &dev_priv->workarounds;
+	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
 		return 0;
@@ -1092,7 +1090,7 @@  static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
 	 * this batch updates GEN8_L3SQCREG4 with default value we need to
 	 * set this bit here to retain the WA during flush.
 	 */
-	if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
 		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
 	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1181,7 +1179,7 @@  static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
 	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-	if (IS_BROADWELL(engine->dev)) {
+	if (IS_BROADWELL(engine->i915)) {
 		int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
 		if (rc < 0)
 			return rc;
@@ -1253,12 +1251,11 @@  static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 				    uint32_t *offset)
 {
 	int ret;
-	struct drm_device *dev = engine->dev;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
 	/* WaDisableCtxRestoreArbitration:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1279,12 +1276,11 @@  static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 			       uint32_t *const batch,
 			       uint32_t *offset)
 {
-	struct drm_device *dev = engine->dev;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
 	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
+	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
 		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
 		wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
 		wa_ctx_emit(batch, index,
@@ -1293,7 +1289,7 @@  static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 	}
 
 	/* WaClearTdlStateAckDirtyBits:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
 		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
 
 		wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1312,8 +1308,8 @@  static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 	}
 
 	/* WaDisableCtxRestoreArbitration:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
 	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1325,7 +1321,7 @@  static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
 	int ret;
 
-	engine->wa_ctx.obj = i915_gem_object_create(engine->dev,
+	engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
 						   PAGE_ALIGN(size));
 	if (IS_ERR(engine->wa_ctx.obj)) {
 		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
@@ -1365,9 +1361,9 @@  static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	WARN_ON(engine->id != RCS);
 
 	/* update this when WA for higher Gen are added */
-	if (INTEL_INFO(engine->dev)->gen > 9) {
+	if (INTEL_GEN(engine->i915) > 9) {
 		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-			  INTEL_INFO(engine->dev)->gen);
+			  INTEL_GEN(engine->i915));
 		return 0;
 	}
 
@@ -1387,7 +1383,7 @@  static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	batch = kmap_atomic(page);
 	offset = 0;
 
-	if (INTEL_INFO(engine->dev)->gen == 8) {
+	if (INTEL_GEN(engine->i915) == 8) {
 		ret = gen8_init_indirectctx_bb(engine,
 					       &wa_ctx->indirect_ctx,
 					       batch,
@@ -1401,7 +1397,7 @@  static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 					  &offset);
 		if (ret)
 			goto out;
-	} else if (INTEL_INFO(engine->dev)->gen == 9) {
+	} else if (INTEL_GEN(engine->i915) == 9) {
 		ret = gen9_init_indirectctx_bb(engine,
 					       &wa_ctx->indirect_ctx,
 					       batch,
@@ -1427,7 +1423,7 @@  out:
 
 static void lrc_init_hws(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	I915_WRITE(RING_HWS_PGA(engine->mmio_base),
 		   (u32)engine->status_page.gfx_addr);
@@ -1436,8 +1432,7 @@  static void lrc_init_hws(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned int next_context_status_buffer_hw;
 
 	lrc_init_hws(engine);
@@ -1484,8 +1479,7 @@  static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
 	ret = gen8_init_common_ring(engine);
@@ -1562,7 +1556,7 @@  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 	if (req->ctx->ppgtt &&
 	    (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
 		if (!USES_FULL_48BIT_PPGTT(req->i915) &&
-		    !intel_vgpu_active(req->i915->dev)) {
+		    !intel_vgpu_active(req->i915)) {
 			ret = intel_logical_ring_emit_pdps(req);
 			if (ret)
 				return ret;
@@ -1590,8 +1584,7 @@  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 
 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1610,8 +1603,7 @@  static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 
 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1628,8 +1620,7 @@  static int gen8_emit_flush(struct drm_i915_gem_request *request,
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *engine = ringbuf->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = request->i915;
 	uint32_t cmd;
 	int ret;
 
@@ -1697,7 +1688,7 @@  static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
 		 * pipe control.
 		 */
-		if (IS_GEN9(engine->dev))
+		if (IS_GEN9(request->i915))
 			vf_flush_wa = true;
 	}
 
@@ -1890,7 +1881,7 @@  void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
 		tasklet_kill(&engine->irq_tasklet);
 
-	dev_priv = engine->dev->dev_private;
+	dev_priv = engine->i915;
 
 	if (engine->buffer) {
 		intel_logical_ring_stop(engine);
@@ -1914,7 +1905,7 @@  void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	engine->ctx_desc_template = 0;
 
 	lrc_destroy_wa_ctx_obj(engine);
-	engine->dev = NULL;
+	engine->i915 = NULL;
 }
 
 static void
@@ -1929,7 +1920,7 @@  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->emit_bb_start = gen8_emit_bb_start;
 	engine->get_seqno = gen8_get_seqno;
 	engine->set_seqno = gen8_set_seqno;
-	if (IS_BXT_REVID(engine->dev, 0, BXT_REVID_A1)) {
+	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
 		engine->irq_seqno_barrier = bxt_a_seqno_barrier;
 		engine->set_seqno = bxt_a_set_seqno;
 	}
@@ -2023,7 +2014,7 @@  logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
 	I915_WRITE_IMR(engine, ~0);
 	POSTING_READ(RING_IMR(engine->mmio_base));
 
-	engine->dev = dev;
+	engine->i915 = dev_priv;
 
 	/* Intentionally left blank. */
 	engine->buffer = NULL;
@@ -2056,7 +2047,7 @@  logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
 	logical_ring_default_irqs(engine, info->irq_shift);
 
 	intel_engine_init_hangcheck(engine);
-	i915_gem_batch_pool_init(engine->dev, &engine->batch_pool);
+	i915_gem_batch_pool_init(dev, &engine->batch_pool);
 
 	return engine;
 }
@@ -2064,7 +2055,7 @@  logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
 static int
 logical_ring_init(struct intel_engine_cs *engine)
 {
-	struct intel_context *dctx = to_i915(engine->dev)->kernel_context;
+	struct intel_context *dctx = engine->i915->kernel_context;
 	int ret;
 
 	ret = i915_cmd_parser_init_ring(engine);
@@ -2224,7 +2215,7 @@  cleanup_render_ring:
 }
 
 static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct drm_i915_private *dev_priv)
 {
 	u32 rpcs = 0;
 
@@ -2232,7 +2223,7 @@  make_rpcs(struct drm_device *dev)
 	 * No explicit RPCS request is needed to ensure full
 	 * slice/subslice/EU enablement prior to Gen9.
 	*/
-	if (INTEL_INFO(dev)->gen < 9)
+	if (INTEL_GEN(dev_priv) < 9)
 		return 0;
 
 	/*
@@ -2241,24 +2232,24 @@  make_rpcs(struct drm_device *dev)
 	 * must make an explicit request through RPCS for full
 	 * enablement.
 	*/
-	if (INTEL_INFO(dev)->has_slice_pg) {
+	if (INTEL_INFO(dev_priv)->has_slice_pg) {
 		rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev)->slice_total <<
+		rpcs |= INTEL_INFO(dev_priv)->slice_total <<
 			GEN8_RPCS_S_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev)->has_subslice_pg) {
+	if (INTEL_INFO(dev_priv)->has_subslice_pg) {
 		rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+		rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
 			GEN8_RPCS_SS_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev)->has_eu_pg) {
-		rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+	if (INTEL_INFO(dev_priv)->has_eu_pg) {
+		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
 			GEN8_RPCS_EU_MIN_SHIFT;
-		rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
 			GEN8_RPCS_EU_MAX_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
@@ -2270,9 +2261,9 @@  static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 {
 	u32 indirect_ctx_offset;
 
-	switch (INTEL_INFO(engine->dev)->gen) {
+	switch (INTEL_GEN(engine->i915)) {
 	default:
-		MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+		MISSING_CASE(INTEL_GEN(engine->i915));
 		/* fall through */
 	case 9:
 		indirect_ctx_offset =
@@ -2293,8 +2284,7 @@  populate_lr_context(struct intel_context *ctx,
 		    struct intel_engine_cs *engine,
 		    struct intel_ringbuffer *ringbuf)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = ctx->i915;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
 	void *vaddr;
 	u32 *reg_state;
@@ -2332,7 +2322,7 @@  populate_lr_context(struct intel_context *ctx,
 		       RING_CONTEXT_CONTROL(engine),
 		       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
 					  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-					  (HAS_RESOURCE_STREAMER(dev) ?
+					  (HAS_RESOURCE_STREAMER(dev_priv) ?
 					    CTX_CTRL_RS_CTX_ENABLE : 0)));
 	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
 		       0);
@@ -2421,7 +2411,7 @@  populate_lr_context(struct intel_context *ctx,
 	if (engine->id == RCS) {
 		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
-			       make_rpcs(dev));
+			       make_rpcs(dev_priv));
 	}
 
 	i915_gem_object_unpin_map(ctx_obj);
@@ -2472,11 +2462,11 @@  uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 {
 	int ret = 0;
 
-	WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+	WARN_ON(INTEL_GEN(engine->i915) < 8);
 
 	switch (engine->id) {
 	case RCS:
-		if (INTEL_INFO(engine->dev)->gen >= 9)
+		if (INTEL_GEN(engine->i915) >= 9)
 			ret = GEN9_LR_CONTEXT_RENDER_SIZE;
 		else
 			ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2508,7 +2498,7 @@  uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 static int execlists_context_deferred_alloc(struct intel_context *ctx,
 					    struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
+	struct drm_device *dev = engine->i915->dev;
 	struct drm_i915_gem_object *ctx_obj;
 	uint32_t context_size;
 	struct intel_ringbuffer *ringbuf;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 229b8a974262..1afba0331dc6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -112,7 +112,8 @@  uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
 				     struct intel_engine_cs *engine);
 
 /* Execlists */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
+				    int enable_execlists);
 struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..b765c75f3fcd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -189,7 +189,7 @@  static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
  */
 int intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_mocs_table table;
 	unsigned int index;
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 8570c60c6fc0..4a1e774ba8cc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1508,9 +1508,8 @@  static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
 
 
 struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay = dev_priv->overlay;
 	struct intel_overlay_error_state *error;
 	struct overlay_registers __iomem *regs;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2422ac38ce5d..99add0dbea41 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6349,7 +6349,7 @@  void intel_enable_gt_powersave(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* Powersaving is controlled by the host when inside a VM */
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv))
 		return;
 
 	if (IS_IRONLAKE_M(dev)) {
@@ -7382,8 +7382,7 @@  static void __intel_rps_boost_work(struct work_struct *work)
 	struct drm_i915_gem_request *req = boost->req;
 
 	if (!i915_gem_request_completed(req, true))
-		gen6_rps_boost(to_i915(req->engine->dev), NULL,
-			       req->emitted_jiffies);
+		gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
 	i915_gem_request_unreference(req);
 	kfree(boost);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 70738a54ed0f..a723dc40baf5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -60,7 +60,7 @@  void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
 
 bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 }
 
@@ -106,7 +106,6 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	flush_domains)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
 	u32 cmd;
 	int ret;
 
@@ -145,7 +144,7 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		cmd |= MI_EXE_FLUSH;
 
 	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(dev) || IS_GEN5(dev)))
+	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
 		cmd |= MI_INVALIDATE_ISP;
 
 	ret = intel_ring_begin(req, 2);
@@ -431,19 +430,19 @@  gen8_render_ring_flush(struct drm_i915_gem_request *req,
 static void ring_write_tail(struct intel_engine_cs *engine,
 			    u32 value)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	I915_WRITE_TAIL(engine, value);
 }
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	u64 acthd;
 
-	if (INTEL_INFO(engine->dev)->gen >= 8)
+	if (INTEL_GEN(dev_priv) >= 8)
 		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
 					 RING_ACTHD_UDW(engine->mmio_base));
-	else if (INTEL_INFO(engine->dev)->gen >= 4)
+	else if (INTEL_GEN(dev_priv) >= 4)
 		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
 	else
 		acthd = I915_READ(ACTHD);
@@ -453,25 +452,24 @@  u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	u32 addr;
 
 	addr = dev_priv->status_page_dmah->busaddr;
-	if (INTEL_INFO(engine->dev)->gen >= 4)
+	if (INTEL_GEN(dev_priv) >= 4)
 		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
 	I915_WRITE(HWS_PGA, addr);
 }
 
 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	i915_reg_t mmio;
 
 	/* The ring status page addresses are no longer next to the rest of
 	 * the ring registers as of gen7.
 	 */
-	if (IS_GEN7(dev)) {
+	if (IS_GEN7(dev_priv)) {
 		switch (engine->id) {
 		case RCS:
 			mmio = RENDER_HWS_PGA_GEN7;
@@ -491,7 +489,7 @@  static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 			mmio = VEBOX_HWS_PGA_GEN7;
 			break;
 		}
-	} else if (IS_GEN6(engine->dev)) {
+	} else if (IS_GEN6(dev_priv)) {
 		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 	} else {
 		/* XXX: gen8 returns to sanity */
@@ -508,7 +506,7 @@  static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 	 * arises: do we still need this and if so how should we go about
 	 * invalidating the TLB?
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+	if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8) {
 		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
 		/* ring should be idle before issuing a sync flush*/
@@ -526,9 +524,9 @@  static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 
 static bool stop_ring(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (!IS_GEN2(engine->dev)) {
+	if (!IS_GEN2(dev_priv)) {
 		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
 		if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
 			DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -546,7 +544,7 @@  static bool stop_ring(struct intel_engine_cs *engine)
 	I915_WRITE_HEAD(engine, 0);
 	engine->write_tail(engine, 0);
 
-	if (!IS_GEN2(engine->dev)) {
+	if (!IS_GEN2(dev_priv)) {
 		(void)I915_READ_CTL(engine);
 		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 	}
@@ -561,8 +559,7 @@  void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 
 static int init_ring_common(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct intel_ringbuffer *ringbuf = engine->buffer;
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	int ret = 0;
@@ -592,7 +589,7 @@  static int init_ring_common(struct intel_engine_cs *engine)
 		}
 	}
 
-	if (I915_NEED_GFX_HWS(dev))
+	if (I915_NEED_GFX_HWS(dev_priv))
 		intel_ring_setup_status_page(engine);
 	else
 		ring_setup_phys_status_page(engine);
@@ -649,12 +646,10 @@  out:
 void
 intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-
 	if (engine->scratch.obj == NULL)
 		return;
 
-	if (INTEL_INFO(dev)->gen >= 5) {
+	if (INTEL_GEN(engine->i915) >= 5) {
 		kunmap(sg_page(engine->scratch.obj->pages->sgl));
 		i915_gem_object_ggtt_unpin(engine->scratch.obj);
 	}
@@ -670,7 +665,7 @@  intel_init_pipe_control(struct intel_engine_cs *engine)
 
 	WARN_ON(engine->scratch.obj);
 
-	engine->scratch.obj = i915_gem_object_create(engine->dev, 4096);
+	engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
 	if (IS_ERR(engine->scratch.obj)) {
 		DRM_ERROR("Failed to allocate seqno page\n");
 		ret = PTR_ERR(engine->scratch.obj);
@@ -708,11 +703,9 @@  err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_workarounds *w = &dev_priv->workarounds;
+	struct i915_workarounds *w = &req->i915->workarounds;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -801,7 +794,7 @@  static int wa_add(struct drm_i915_private *dev_priv,
 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 				 i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct i915_workarounds *wa = &dev_priv->workarounds;
 	const uint32_t index = wa->hw_whitelist_count[engine->id];
 
@@ -817,8 +810,7 @@  static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 
 static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
@@ -869,9 +861,8 @@  static int gen8_init_workarounds(struct intel_engine_cs *engine)
 
 static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen8_init_workarounds(engine);
 	if (ret)
@@ -891,16 +882,15 @@  static int bdw_init_workarounds(struct intel_engine_cs *engine)
 			  /* WaForceContextSaveRestoreNonCoherent:bdw */
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
 			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-			  (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+			  (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
 	return 0;
 }
 
 static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen8_init_workarounds(engine);
 	if (ret)
@@ -917,8 +907,7 @@  static int chv_init_workarounds(struct intel_engine_cs *engine)
 
 static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	uint32_t tmp;
 	int ret;
 
@@ -941,14 +930,14 @@  static int gen9_init_workarounds(struct intel_engine_cs *engine)
 			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
 	/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 				  GEN9_DG_MIRROR_FIX_ENABLE);
 
 	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
 		/*
@@ -974,20 +963,20 @@  static int gen9_init_workarounds(struct intel_engine_cs *engine)
 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
 
 	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
 				  PIXEL_MASK_CAMMING_DISABLE);
 
 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
 	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
-	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) ||
+	    IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
 		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
 	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
 
 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-	if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+	if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 				  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
@@ -1013,8 +1002,7 @@  static int gen9_init_workarounds(struct intel_engine_cs *engine)
 
 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	u8 vals[3] = { 0, 0, 0 };
 	unsigned int i;
 
@@ -1055,9 +1043,8 @@  static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 
 static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen9_init_workarounds(engine);
 	if (ret)
@@ -1068,12 +1055,12 @@  static int skl_init_workarounds(struct intel_engine_cs *engine)
 	 * until D0 which is the default case so this is equivalent to
 	 * !WaDisablePerCtxtPreemptionGranularityControl:skl
 	 */
-	if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
 		I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
 			   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
 	}
 
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) {
 		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
 		I915_WRITE(FF_SLICE_CS_CHICKEN2,
 			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1082,24 +1069,24 @@  static int skl_init_workarounds(struct intel_engine_cs *engine)
 	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
 	 * involving this register should also be added to WA batch as required.
 	 */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
 		/* WaDisableLSQCROPERFforOCL:skl */
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 			   GEN8_LQSC_RO_PERF_DIS);
 
 	/* WaEnableGapsTsvCreditFix:skl */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
 		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
 					   GEN9_GAPS_TSV_CREDIT_DISABLE));
 	}
 
 	/* WaDisablePowerCompilerClockGating:skl */
-	if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
 	/* This is tied to WaForceContextSaveRestoreNonCoherent */
-	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
+	if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) {
 		/*
 		 *Use Force Non-Coherent whenever executing a 3D context. This
 		 * is a workaround for a possible hang in the unlikely event
@@ -1115,13 +1102,13 @@  static int skl_init_workarounds(struct intel_engine_cs *engine)
 	}
 
 	/* WaBarrierPerformanceFixDisable:skl */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
 		WA_SET_BIT_MASKED(HDC_CHICKEN0,
 				  HDC_FENCE_DEST_SLM_DISABLE |
 				  HDC_BARRIER_PERFORMANCE_DISABLE);
 
 	/* WaDisableSbeCacheDispatchPortSharing:skl */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
 		WA_SET_BIT_MASKED(
 			GEN7_HALF_SLICE_CHICKEN1,
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1136,9 +1123,8 @@  static int skl_init_workarounds(struct intel_engine_cs *engine)
 
 static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen9_init_workarounds(engine);
 	if (ret)
@@ -1146,11 +1132,11 @@  static int bxt_init_workarounds(struct intel_engine_cs *engine)
 
 	/* WaStoreMultiplePTEenable:bxt */
 	/* This is a requirement according to Hardware specification */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
 	/* WaSetClckGatingDisableMedia:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
 					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
 	}
@@ -1160,7 +1146,7 @@  static int bxt_init_workarounds(struct intel_engine_cs *engine)
 			  STALL_DOP_GATING_DISABLE);
 
 	/* WaDisableSbeCacheDispatchPortSharing:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
 		WA_SET_BIT_MASKED(
 			GEN7_HALF_SLICE_CHICKEN1,
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1170,7 +1156,7 @@  static int bxt_init_workarounds(struct intel_engine_cs *engine)
 	/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
 	/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
 	/* WaDisableLSQCROPERFforOCL:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
 		if (ret)
 			return ret;
@@ -1181,7 +1167,7 @@  static int bxt_init_workarounds(struct intel_engine_cs *engine)
 	}
 
 	/* WaProgramL3SqcReg1DefaultForPerf:bxt */
-	if (IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+	if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
 		I915_WRITE(GEN8_L3SQCREG1, BXT_WA_L3SQCREG1_DEFAULT);
 
 	return 0;
@@ -1189,24 +1175,23 @@  static int bxt_init_workarounds(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	WARN_ON(engine->id != RCS);
 
 	dev_priv->workarounds.count = 0;
 	dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
-	if (IS_BROADWELL(dev))
+	if (IS_BROADWELL(dev_priv))
 		return bdw_init_workarounds(engine);
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		return chv_init_workarounds(engine);
 
-	if (IS_SKYLAKE(dev))
+	if (IS_SKYLAKE(dev_priv))
 		return skl_init_workarounds(engine);
 
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(dev_priv))
 		return bxt_init_workarounds(engine);
 
 	return 0;
@@ -1214,14 +1199,13 @@  int init_workarounds_ring(struct intel_engine_cs *engine)
 
 static int init_render_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret = init_ring_common(engine);
 	if (ret)
 		return ret;
 
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+	if (INTEL_GEN(dev_priv) >= 4 && INTEL_GEN(dev_priv) < 7)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
 	/* We need to disable the AsyncFlip performance optimisations in order
@@ -1230,22 +1214,22 @@  static int init_render_ring(struct intel_engine_cs *engine)
 	 *
 	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+	if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
 	/* Required for the hardware to program scanline values for waiting */
 	/* WaEnableFlushTlbInvalidationMode:snb */
-	if (INTEL_INFO(dev)->gen == 6)
+	if (INTEL_GEN(dev_priv) == 6)
 		I915_WRITE(GFX_MODE,
 			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
 	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-	if (IS_GEN7(dev))
+	if (IS_GEN7(dev_priv))
 		I915_WRITE(GFX_MODE_GEN7,
 			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
 			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(dev_priv)) {
 		/* From the Sandybridge PRM, volume 1 part 3, page 24:
 		 * "If this bit is set, STCunit will have LRA as replacement
 		 *  policy. [...] This bit must be reset.  LRA replacement
@@ -1255,19 +1239,18 @@  static int init_render_ring(struct intel_engine_cs *engine)
 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+	if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (HAS_L3_DPF(dev))
-		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+	if (HAS_L3_DPF(dev_priv))
+		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
 
 	return init_workarounds_ring(engine);
 }
 
 static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	if (dev_priv->semaphore_obj) {
 		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1283,13 +1266,12 @@  static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
 	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1325,13 +1307,12 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
 	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1364,14 +1345,13 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
 	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1459,10 +1439,9 @@  gen8_render_add_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
 					      u32 seqno)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	return dev_priv->last_seqno < seqno;
 }
 
@@ -1480,7 +1459,7 @@  gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       u32 seqno)
 {
 	struct intel_engine_cs *waiter = waiter_req->engine;
-	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+	struct drm_i915_private *dev_priv = waiter_req->i915;
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
@@ -1534,7 +1513,7 @@  gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 		return ret;
 
 	/* If seqno wrap happened, omit the wait with no-ops */
-	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
 		intel_ring_emit(waiter, dw1 | wait_mbox);
 		intel_ring_emit(waiter, seqno);
 		intel_ring_emit(waiter, 0);
@@ -1615,7 +1594,7 @@  pc_render_add_request(struct drm_i915_gem_request *req)
 static void
 gen6_seqno_barrier(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	/* Workaround to force correct ordering between irq and seqno writes on
 	 * ivb (and maybe also on snb) by reading from a CS register (like
@@ -1664,8 +1643,7 @@  pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 static bool
 gen5_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1682,8 +1660,7 @@  gen5_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen5_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1695,8 +1672,7 @@  gen5_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 i9xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -1716,8 +1692,7 @@  i9xx_ring_get_irq(struct intel_engine_cs *engine)
 static void
 i9xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1732,8 +1707,7 @@  i9xx_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 i8xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -1753,8 +1727,7 @@  i8xx_ring_get_irq(struct intel_engine_cs *engine)
 static void
 i8xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1807,8 +1780,7 @@  i9xx_add_request(struct drm_i915_gem_request *req)
 static bool
 gen6_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1816,10 +1788,10 @@  gen6_ring_get_irq(struct intel_engine_cs *engine)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (engine->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS)
+		if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
 			I915_WRITE_IMR(engine,
 				       ~(engine->irq_enable_mask |
-					 GT_PARITY_ERROR(dev)));
+					 GT_PARITY_ERROR(dev_priv)));
 		else
 			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
 		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1832,14 +1804,13 @@  gen6_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen6_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--engine->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS)
-			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+		if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
 		else
 			I915_WRITE_IMR(engine, ~0);
 		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1850,8 +1821,7 @@  gen6_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 hsw_vebox_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1870,8 +1840,7 @@  hsw_vebox_get_irq(struct intel_engine_cs *engine)
 static void
 hsw_vebox_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1885,8 +1854,7 @@  hsw_vebox_put_irq(struct intel_engine_cs *engine)
 static bool
 gen8_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1894,7 +1862,7 @@  gen8_ring_get_irq(struct intel_engine_cs *engine)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (engine->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS) {
+		if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
 			I915_WRITE_IMR(engine,
 				       ~(engine->irq_enable_mask |
 					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1911,13 +1879,12 @@  gen8_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen8_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--engine->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS) {
+		if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
 			I915_WRITE_IMR(engine,
 				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
 		} else {
@@ -2039,12 +2006,12 @@  i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 
 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	if (!dev_priv->status_page_dmah)
 		return;
 
-	drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+	drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
 	engine->status_page.page_addr = NULL;
 }
 
@@ -2070,7 +2037,7 @@  static int init_status_page(struct intel_engine_cs *engine)
 		unsigned flags;
 		int ret;
 
-		obj = i915_gem_object_create(engine->dev, 4096);
+		obj = i915_gem_object_create(engine->i915->dev, 4096);
 		if (IS_ERR(obj)) {
 			DRM_ERROR("Failed to allocate status page\n");
 			return PTR_ERR(obj);
@@ -2081,7 +2048,7 @@  static int init_status_page(struct intel_engine_cs *engine)
 			goto err_unref;
 
 		flags = 0;
-		if (!HAS_LLC(engine->dev))
+		if (!HAS_LLC(engine->i915))
 			/* On g33, we cannot place HWS above 256MiB, so
 			 * restrict its pinning to the low mappable arena.
 			 * Though this restriction is not documented for
@@ -2115,11 +2082,11 @@  err_unref:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	if (!dev_priv->status_page_dmah) {
 		dev_priv->status_page_dmah =
-			drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+			drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
 		if (!dev_priv->status_page_dmah)
 			return -ENOMEM;
 	}
@@ -2145,10 +2112,9 @@  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 	ringbuf->vma = NULL;
 }
 
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
 				     struct intel_ringbuffer *ringbuf)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2247,13 +2213,13 @@  intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
 	 * of the buffer.
 	 */
 	ring->effective_size = size;
-	if (IS_I830(engine->dev) || IS_845G(engine->dev))
+	if (IS_I830(engine->i915) || IS_845G(engine->i915))
 		ring->effective_size -= 2 * CACHELINE_BYTES;
 
 	ring->last_retired_head = -1;
 	intel_ring_update_space(ring);
 
-	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+	ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
 				 engine->name, ret);
@@ -2276,12 +2242,13 @@  intel_ringbuffer_free(struct intel_ringbuffer *ring)
 static int intel_init_ring_buffer(struct drm_device *dev,
 				  struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_ringbuffer *ringbuf;
 	int ret;
 
 	WARN_ON(engine->buffer);
 
-	engine->dev = dev;
+	engine->i915 = dev_priv;
 	INIT_LIST_HEAD(&engine->active_list);
 	INIT_LIST_HEAD(&engine->request_list);
 	INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2299,7 +2266,7 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 	}
 	engine->buffer = ringbuf;
 
-	if (I915_NEED_GFX_HWS(dev)) {
+	if (I915_NEED_GFX_HWS(dev_priv)) {
 		ret = init_status_page(engine);
 		if (ret)
 			goto error;
@@ -2310,7 +2277,7 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
@@ -2336,11 +2303,11 @@  void intel_cleanup_engine(struct intel_engine_cs *engine)
 	if (!intel_engine_initialized(engine))
 		return;
 
-	dev_priv = to_i915(engine->dev);
+	dev_priv = engine->i915;
 
 	if (engine->buffer) {
 		intel_stop_engine(engine);
-		WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
 		intel_unpin_ringbuffer_obj(engine->buffer);
 		intel_ringbuffer_free(engine->buffer);
@@ -2350,7 +2317,7 @@  void intel_cleanup_engine(struct intel_engine_cs *engine)
 	if (engine->cleanup)
 		engine->cleanup(engine);
 
-	if (I915_NEED_GFX_HWS(engine->dev)) {
+	if (I915_NEED_GFX_HWS(dev_priv)) {
 		cleanup_status_page(engine);
 	} else {
 		WARN_ON(engine->id != RCS);
@@ -2359,7 +2326,7 @@  void intel_cleanup_engine(struct intel_engine_cs *engine)
 
 	i915_cmd_parser_fini_ring(engine);
 	i915_gem_batch_pool_fini(&engine->batch_pool);
-	engine->dev = NULL;
+	engine->i915 = NULL;
 }
 
 int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2525,7 +2492,7 @@  int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
 	 * so long as the semaphore value in the register/page is greater
@@ -2561,7 +2528,7 @@  void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 				     u32 value)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
        /* Every tail move must follow the sequence below */
 
@@ -2603,7 +2570,7 @@  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(engine->dev)->gen >= 8)
+	if (INTEL_GEN(req->i915) >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2625,7 +2592,7 @@  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 	intel_ring_emit(engine, cmd);
 	intel_ring_emit(engine,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(engine->dev)->gen >= 8) {
+	if (INTEL_GEN(req->i915) >= 8) {
 		intel_ring_emit(engine, 0); /* upper addr */
 		intel_ring_emit(engine, 0); /* value */
 	} else  {
@@ -2716,7 +2683,6 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
 	uint32_t cmd;
 	int ret;
 
@@ -2725,7 +2691,7 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(dev)->gen >= 8)
+	if (INTEL_GEN(req->i915) >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2746,7 +2712,7 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 	intel_ring_emit(engine, cmd);
 	intel_ring_emit(engine,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_GEN(req->i915) >= 8) {
 		intel_ring_emit(engine, 0); /* upper addr */
 		intel_ring_emit(engine, 0); /* value */
 	} else  {
@@ -2772,7 +2738,7 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 	engine->mmio_base = RENDER_RING_BASE;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			obj = i915_gem_object_create(dev, 4096);
 			if (IS_ERR(obj)) {
 				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
@@ -2797,7 +2763,7 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		engine->get_seqno = ring_get_seqno;
 		engine->set_seqno = ring_set_seqno;
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			WARN_ON(!dev_priv->semaphore_obj);
 			engine->semaphore.sync_to = gen8_ring_sync;
 			engine->semaphore.signal = gen8_rcs_signal;
@@ -2815,7 +2781,7 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		engine->irq_seqno_barrier = gen6_seqno_barrier;
 		engine->get_seqno = ring_get_seqno;
 		engine->set_seqno = ring_set_seqno;
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen6_ring_sync;
 			engine->semaphore.signal = gen6_signal;
 			/*
@@ -2939,7 +2905,7 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			engine->irq_put = gen8_ring_put_irq;
 			engine->dispatch_execbuffer =
 				gen8_ring_dispatch_execbuffer;
-			if (i915_semaphore_is_enabled(dev)) {
+			if (i915_semaphore_is_enabled(dev_priv)) {
 				engine->semaphore.sync_to = gen8_ring_sync;
 				engine->semaphore.signal = gen8_xcs_signal;
 				GEN8_RING_SEMAPHORE_INIT(engine);
@@ -2950,7 +2916,7 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			engine->irq_put = gen6_ring_put_irq;
 			engine->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
-			if (i915_semaphore_is_enabled(dev)) {
+			if (i915_semaphore_is_enabled(dev_priv)) {
 				engine->semaphore.sync_to = gen6_ring_sync;
 				engine->semaphore.signal = gen6_signal;
 				engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -3013,7 +2979,7 @@  int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	engine->irq_put = gen8_ring_put_irq;
 	engine->dispatch_execbuffer =
 			gen8_ring_dispatch_execbuffer;
-	if (i915_semaphore_is_enabled(dev)) {
+	if (i915_semaphore_is_enabled(dev_priv)) {
 		engine->semaphore.sync_to = gen8_ring_sync;
 		engine->semaphore.signal = gen8_xcs_signal;
 		GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3046,7 +3012,7 @@  int intel_init_blt_ring_buffer(struct drm_device *dev)
 		engine->irq_get = gen8_ring_get_irq;
 		engine->irq_put = gen8_ring_put_irq;
 		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen8_ring_sync;
 			engine->semaphore.signal = gen8_xcs_signal;
 			GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3056,7 +3022,7 @@  int intel_init_blt_ring_buffer(struct drm_device *dev)
 		engine->irq_get = gen6_ring_get_irq;
 		engine->irq_put = gen6_ring_put_irq;
 		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.signal = gen6_signal;
 			engine->semaphore.sync_to = gen6_ring_sync;
 			/*
@@ -3107,7 +3073,7 @@  int intel_init_vebox_ring_buffer(struct drm_device *dev)
 		engine->irq_get = gen8_ring_get_irq;
 		engine->irq_put = gen8_ring_put_irq;
 		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen8_ring_sync;
 			engine->semaphore.signal = gen8_xcs_signal;
 			GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3117,7 +3083,7 @@  int intel_init_vebox_ring_buffer(struct drm_device *dev)
 		engine->irq_get = hsw_vebox_get_irq;
 		engine->irq_put = hsw_vebox_put_irq;
 		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-		if (i915_semaphore_is_enabled(dev)) {
+		if (i915_semaphore_is_enabled(dev_priv)) {
 			engine->semaphore.sync_to = gen6_ring_sync;
 			engine->semaphore.signal = gen6_signal;
 			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 723ff6160fbb..929e7b4af2a4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -141,7 +141,8 @@  struct  i915_ctx_workarounds {
 	struct drm_i915_gem_object *obj;
 };
 
-struct  intel_engine_cs {
+struct intel_engine_cs {
+	struct drm_i915_private *i915;
 	const char	*name;
 	enum intel_engine_id {
 		RCS = 0,
@@ -156,7 +157,6 @@  struct  intel_engine_cs {
 	unsigned int hw_id;
 	unsigned int guc_id; /* XXX same as hw_id? */
 	u32		mmio_base;
-	struct		drm_device *dev;
 	struct intel_ringbuffer *buffer;
 	struct list_head buffers;
 
@@ -350,7 +350,7 @@  struct  intel_engine_cs {
 static inline bool
 intel_engine_initialized(struct intel_engine_cs *engine)
 {
-	return engine->dev != NULL;
+	return engine->i915 != NULL;
 }
 
 static inline unsigned
@@ -425,7 +425,7 @@  intel_write_status_page(struct intel_engine_cs *engine,
 
 struct intel_ringbuffer *
 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
 				     struct intel_ringbuffer *ringbuf);
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..4ea2bf2c2a4a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1381,7 +1381,7 @@  void intel_uncore_init(struct drm_device *dev)
 		break;
 	}
 
-	if (intel_vgpu_active(dev)) {
+	if (intel_vgpu_active(dev_priv)) {
 		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
 		ASSIGN_READ_MMIO_VFUNCS(vgpu);
 	}
@@ -1663,8 +1663,8 @@  static int wait_for_register_fw(struct drm_i915_private *dev_priv,
 
 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
 		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1682,7 +1682,7 @@  static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 
 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
 		      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
@@ -1802,10 +1802,10 @@  intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
 {
 	enum forcewake_domains fw_domains;
 
-	if (intel_vgpu_active(dev_priv->dev))
+	if (intel_vgpu_active(dev_priv))
 		return 0;
 
-	switch (INTEL_INFO(dev_priv)->gen) {
+	switch (INTEL_GEN(dev_priv)) {
 	case 9:
 		fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
 		break;
@@ -1842,10 +1842,10 @@  intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 {
 	enum forcewake_domains fw_domains;
 
-	if (intel_vgpu_active(dev_priv->dev))
+	if (intel_vgpu_active(dev_priv))
 		return 0;
 
-	switch (INTEL_INFO(dev_priv)->gen) {
+	switch (INTEL_GEN(dev_priv)) {
 	case 9:
 		fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
 		break;