diff mbox

[01/11] drm/i915/gtt: Rename i915_hw_ppgtt base member

Message ID 20180605071949.14159-2-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 5, 2018, 7:19 a.m. UTC
In the near future, I want to subclass gen6_hw_ppgtt as it contains a
few specialised members and I wish to add more. To avoid the ugliness of
using ppgtt->base.base, rename the i915_hw_ppgtt base member
(i915_address_space) as vm, which is our common shorthand for an
i915_address_space local.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
---
 drivers/gpu/drm/i915/gvt/aperture_gm.c        |   2 +-
 drivers/gpu/drm/i915/gvt/gvt.h                |   4 +-
 drivers/gpu/drm/i915/i915_debugfs.c           |   4 +-
 drivers/gpu/drm/i915/i915_drv.h               |   2 +-
 drivers/gpu/drm/i915/i915_gem.c               |  30 +-
 drivers/gpu/drm/i915/i915_gem_context.c       |  10 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c    |  16 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 329 +++++++++---------
 drivers/gpu/drm/i915/i915_gem_gtt.h           |   8 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c  |   2 +-
 drivers/gpu/drm/i915/i915_gem_shrinker.c      |   2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c        |   6 +-
 drivers/gpu/drm/i915/i915_gpu_error.c         |   9 +-
 drivers/gpu/drm/i915/i915_trace.h             |   4 +-
 drivers/gpu/drm/i915/i915_vgpu.c              |   8 +-
 drivers/gpu/drm/i915/i915_vma.c               |   2 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        |   4 +-
 drivers/gpu/drm/i915/intel_guc.c              |   2 +-
 drivers/gpu/drm/i915/intel_guc_submission.c   |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c              |  10 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c       |   4 +-
 drivers/gpu/drm/i915/selftests/huge_pages.c   |  50 +--
 .../gpu/drm/i915/selftests/i915_gem_context.c |   6 +-
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |  34 +-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 110 +++---
 .../gpu/drm/i915/selftests/i915_gem_object.c  |   6 +-
 drivers/gpu/drm/i915/selftests/i915_request.c |   5 +-
 drivers/gpu/drm/i915/selftests/i915_vma.c     |  31 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |   4 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |   2 +-
 .../drm/i915/selftests/intel_workarounds.c    |   2 +-
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  66 ++--
 32 files changed, 388 insertions(+), 388 deletions(-)

Comments

Mika Kuoppala June 5, 2018, 2:38 p.m. UTC | #1
Chris Wilson <chris@chris-wilson.co.uk> writes:

> In the near future, I want to subclass gen6_hw_ppgtt as it contains a
> few specialised members and I wish to add more. To avoid the ugliness of
> using ppgtt->base.base, rename the i915_hw_ppgtt base member
> (i915_address_space) as vm, which is our common shorthand for an
> i915_address_space local.
>

Strolled it through. Couple of formatting fixes and
getting dev_priv through vm. Didn't notice anything
out of ordinary.

For me it reads better now, and we should have done this
prior to doing 32bit ppgtts :P

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Matthew Auld <matthew.william.auld@gmail.com>
> ---
>  drivers/gpu/drm/i915/gvt/aperture_gm.c        |   2 +-
>  drivers/gpu/drm/i915/gvt/gvt.h                |   4 +-
>  drivers/gpu/drm/i915/i915_debugfs.c           |   4 +-
>  drivers/gpu/drm/i915/i915_drv.h               |   2 +-
>  drivers/gpu/drm/i915/i915_gem.c               |  30 +-
>  drivers/gpu/drm/i915/i915_gem_context.c       |  10 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c    |  16 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c           | 329 +++++++++---------
>  drivers/gpu/drm/i915/i915_gem_gtt.h           |   8 +-
>  drivers/gpu/drm/i915/i915_gem_render_state.c  |   2 +-
>  drivers/gpu/drm/i915/i915_gem_shrinker.c      |   2 +-
>  drivers/gpu/drm/i915/i915_gem_stolen.c        |   6 +-
>  drivers/gpu/drm/i915/i915_gpu_error.c         |   9 +-
>  drivers/gpu/drm/i915/i915_trace.h             |   4 +-
>  drivers/gpu/drm/i915/i915_vgpu.c              |   8 +-
>  drivers/gpu/drm/i915/i915_vma.c               |   2 +-
>  drivers/gpu/drm/i915/intel_engine_cs.c        |   4 +-
>  drivers/gpu/drm/i915/intel_guc.c              |   2 +-
>  drivers/gpu/drm/i915/intel_guc_submission.c   |   2 +-
>  drivers/gpu/drm/i915/intel_lrc.c              |  10 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.c       |   4 +-
>  drivers/gpu/drm/i915/selftests/huge_pages.c   |  50 +--
>  .../gpu/drm/i915/selftests/i915_gem_context.c |   6 +-
>  .../gpu/drm/i915/selftests/i915_gem_evict.c   |  34 +-
>  drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 110 +++---
>  .../gpu/drm/i915/selftests/i915_gem_object.c  |   6 +-
>  drivers/gpu/drm/i915/selftests/i915_request.c |   5 +-
>  drivers/gpu/drm/i915/selftests/i915_vma.c     |  31 +-
>  .../gpu/drm/i915/selftests/intel_hangcheck.c  |   4 +-
>  drivers/gpu/drm/i915/selftests/intel_lrc.c    |   2 +-
>  .../drm/i915/selftests/intel_workarounds.c    |   2 +-
>  drivers/gpu/drm/i915/selftests/mock_gtt.c     |  66 ++--
>  32 files changed, 388 insertions(+), 388 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
> index 7c9ec4f4f36c..380eeb2a0e83 100644
> --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
> +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
> @@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
>  	}
>  
>  	mutex_lock(&dev_priv->drm.struct_mutex);
> -	ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
> +	ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
>  				  size, I915_GTT_PAGE_SIZE,
>  				  I915_COLOR_UNEVICTABLE,
>  				  start, end, flags);
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 05d15a095310..2ff0d40281a9 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -361,9 +361,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
>  #define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
>  #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
>  
> -#define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
> +#define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.vm.total)
>  #define gvt_ggtt_sz(gvt) \
> -	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
> +	((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
>  #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
>  
>  #define gvt_aperture_gmadr_base(gvt) (0)
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 15e86d34a81c..698af45e229c 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>  		} else {
>  			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
>  
> -			if (ppgtt->base.file != stats->file_priv)
> +			if (ppgtt->vm.file != stats->file_priv)
>  				continue;
>  		}
>  
> @@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>  		   dpy_count, dpy_size);
>  
>  	seq_printf(m, "%llu [%pa] gtt total\n",
> -		   ggtt->base.total, &ggtt->mappable_end);
> +		   ggtt->vm.total, &ggtt->mappable_end);
>  	seq_printf(m, "Supported page sizes: %s\n",
>  		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
>  					buf, sizeof(buf)));
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 06ecac4c3253..a4bb30c32a52 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3213,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
>  static inline struct i915_hw_ppgtt *
>  i915_vm_to_ppgtt(struct i915_address_space *vm)
>  {
> -	return container_of(vm, struct i915_hw_ppgtt, base);
> +	return container_of(vm, struct i915_hw_ppgtt, vm);
>  }
>  
>  /* i915_gem_fence_reg.c */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index cb680ddafa0c..6ce29d1c20be 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
>                       struct drm_mm_node *node, u32 size)
>  {
>  	memset(node, 0, sizeof(*node));
> -	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
> +	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
>  					   size, 0, I915_COLOR_UNEVICTABLE,
>  					   0, ggtt->mappable_end,
>  					   DRM_MM_INSERT_LOW);
> @@ -249,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
>  	struct i915_vma *vma;
>  	u64 pinned;
>  
> -	pinned = ggtt->base.reserved;
> +	pinned = ggtt->vm.reserved;
>  	mutex_lock(&dev->struct_mutex);
> -	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
> +	list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
>  		if (i915_vma_is_pinned(vma))
>  			pinned += vma->node.size;
> -	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
> +	list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
>  		if (i915_vma_is_pinned(vma))
>  			pinned += vma->node.size;
>  	mutex_unlock(&dev->struct_mutex);
>  
> -	args->aper_size = ggtt->base.total;
> +	args->aper_size = ggtt->vm.total;
>  	args->aper_available_size = args->aper_size - pinned;
>  
>  	return 0;
> @@ -1223,9 +1223,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
>  		page_length = remain < page_length ? remain : page_length;
>  		if (node.allocated) {
>  			wmb();
> -			ggtt->base.insert_page(&ggtt->base,
> -					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
> -					       node.start, I915_CACHE_NONE, 0);
> +			ggtt->vm.insert_page(&ggtt->vm,
> +					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
> +					     node.start, I915_CACHE_NONE, 0);
>  			wmb();
>  		} else {
>  			page_base += offset & PAGE_MASK;
> @@ -1246,8 +1246,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
>  out_unpin:
>  	if (node.allocated) {
>  		wmb();
> -		ggtt->base.clear_range(&ggtt->base,
> -				       node.start, node.size);
> +		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
>  		remove_mappable_node(&node);
>  	} else {
>  		i915_vma_unpin(vma);
> @@ -1426,9 +1425,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  		page_length = remain < page_length ? remain : page_length;
>  		if (node.allocated) {
>  			wmb(); /* flush the write before we modify the GGTT */
> -			ggtt->base.insert_page(&ggtt->base,
> -					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
> -					       node.start, I915_CACHE_NONE, 0);
> +			ggtt->vm.insert_page(&ggtt->vm,
> +					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
> +					     node.start, I915_CACHE_NONE, 0);
>  			wmb(); /* flush modifications to the GGTT (insert_page) */
>  		} else {
>  			page_base += offset & PAGE_MASK;
> @@ -1455,8 +1454,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  out_unpin:
>  	if (node.allocated) {
>  		wmb();
> -		ggtt->base.clear_range(&ggtt->base,
> -				       node.start, node.size);
> +		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
>  		remove_mappable_node(&node);
>  	} else {
>  		i915_vma_unpin(vma);
> @@ -4374,7 +4372,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
>  			 u64 flags)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> -	struct i915_address_space *vm = &dev_priv->ggtt.base;
> +	struct i915_address_space *vm = &dev_priv->ggtt.vm;
>  	struct i915_vma *vma;
>  	int ret;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 38c6e9e4e91b..b2c7ac1b074d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -197,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
>  	 */
>  	lut_close(ctx);
>  	if (ctx->ppgtt)
> -		i915_ppgtt_close(&ctx->ppgtt->base);
> +		i915_ppgtt_close(&ctx->ppgtt->vm);
>  
>  	ctx->file_priv = ERR_PTR(-EBADF);
>  	i915_gem_context_put(ctx);
> @@ -249,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
>  	desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
>  
>  	address_mode = INTEL_LEGACY_32B_CONTEXT;
> -	if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
> +	if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
>  		address_mode = INTEL_LEGACY_64B_CONTEXT;
>  	desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
>  
> @@ -810,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>  		break;
>  	case I915_CONTEXT_PARAM_GTT_SIZE:
>  		if (ctx->ppgtt)
> -			args->value = ctx->ppgtt->base.total;
> +			args->value = ctx->ppgtt->vm.total;
>  		else if (to_i915(dev)->mm.aliasing_ppgtt)
> -			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
> +			args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
>  		else
> -			args->value = to_i915(dev)->ggtt.base.total;
> +			args->value = to_i915(dev)->ggtt.vm.total;
>  		break;
>  	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
>  		args->value = i915_gem_context_no_error_capture(ctx);
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index f627a8c47c58..eefd449502e2 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
>  		return -ENOENT;
>  
>  	eb->ctx = ctx;
> -	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
> +	eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
>  
>  	eb->context_flags = 0;
>  	if (ctx->flags & CONTEXT_NO_ZEROMAP)
> @@ -943,9 +943,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
>  		if (cache->node.allocated) {
>  			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
>  
> -			ggtt->base.clear_range(&ggtt->base,
> -					       cache->node.start,
> -					       cache->node.size);
> +			ggtt->vm.clear_range(&ggtt->vm,
> +					     cache->node.start,
> +					     cache->node.size);
>  			drm_mm_remove_node(&cache->node);
>  		} else {
>  			i915_vma_unpin((struct i915_vma *)cache->node.mm);
> @@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
>  		if (IS_ERR(vma)) {
>  			memset(&cache->node, 0, sizeof(cache->node));
>  			err = drm_mm_insert_node_in_range
> -				(&ggtt->base.mm, &cache->node,
> +				(&ggtt->vm.mm, &cache->node,
>  				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
>  				 0, ggtt->mappable_end,
>  				 DRM_MM_INSERT_LOW);
> @@ -1037,9 +1037,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
>  	offset = cache->node.start;
>  	if (cache->node.allocated) {
>  		wmb();
> -		ggtt->base.insert_page(&ggtt->base,
> -				       i915_gem_object_get_dma_address(obj, page),
> -				       offset, I915_CACHE_NONE, 0);
> +		ggtt->vm.insert_page(&ggtt->vm,
> +				     i915_gem_object_get_dma_address(obj, page),
> +				     offset, I915_CACHE_NONE, 0);
>  	} else {
>  		offset += page << PAGE_SHIFT;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index de9180516308..12b1386e47e9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -780,7 +780,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
>   */
>  static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
>  {
> -	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
> +	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
>  }
>  
>  /* Removes entries from a single page table, releasing it if it's empty.
> @@ -973,7 +973,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
>  	gen8_pte_t *vaddr;
>  	bool ret;
>  
> -	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
> +	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
>  	pd = pdp->page_directory[idx->pdpe];
>  	vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
>  	do {
> @@ -1004,7 +1004,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
>  					break;
>  				}
>  
> -				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
> +				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
>  				pd = pdp->page_directory[idx->pdpe];
>  			}
>  
> @@ -1233,7 +1233,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
>  
>  static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> +	struct i915_address_space *vm = &ppgtt->vm;
>  	struct drm_i915_private *dev_priv = vm->i915;
>  	enum vgt_g2v_type msg;
>  	int i;
> @@ -1294,13 +1294,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
>  	int i;
>  
>  	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
> -		if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
> +		if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
>  			continue;
>  
> -		gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
> +		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
>  	}
>  
> -	cleanup_px(&ppgtt->base, &ppgtt->pml4);
> +	cleanup_px(&ppgtt->vm, &ppgtt->pml4);
>  }
>  
>  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> @@ -1314,7 +1314,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
>  	if (use_4lvl(vm))
>  		gen8_ppgtt_cleanup_4lvl(ppgtt);
>  	else
> -		gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
> +		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
>  
>  	gen8_free_scratch(vm);
>  }
> @@ -1450,7 +1450,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
>  			  gen8_pte_t scratch_pte,
>  			  struct seq_file *m)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> +	struct i915_address_space *vm = &ppgtt->vm;
>  	struct i915_page_directory *pd;
>  	u32 pdpe;
>  
> @@ -1460,7 +1460,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
>  		u64 pd_start = start;
>  		u32 pde;
>  
> -		if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
> +		if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
>  			continue;
>  
>  		seq_printf(m, "\tPDPE #%d\n", pdpe);
> @@ -1468,7 +1468,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
>  			u32 pte;
>  			gen8_pte_t *pt_vaddr;
>  
> -			if (pd->page_table[pde] == ppgtt->base.scratch_pt)
> +			if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
>  				continue;
>  
>  			pt_vaddr = kmap_atomic_px(pt);
> @@ -1501,10 +1501,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
>  
>  static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> +	struct i915_address_space *vm = &ppgtt->vm;
>  	const gen8_pte_t scratch_pte =
>  		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
> -	u64 start = 0, length = ppgtt->base.total;
> +	u64 start = 0, length = ppgtt->vm.total;
>  
>  	if (use_4lvl(vm)) {
>  		u64 pml4e;
> @@ -1512,7 +1512,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  		struct i915_page_directory_pointer *pdp;
>  
>  		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
> -			if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
> +			if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
>  				continue;
>  
>  			seq_printf(m, "    PML4E #%llu\n", pml4e);
> @@ -1525,10 +1525,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  
>  static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> +	struct i915_address_space *vm = &ppgtt->vm;
>  	struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
>  	struct i915_page_directory *pd;
> -	u64 start = 0, length = ppgtt->base.total;
> +	u64 start = 0, length = ppgtt->vm.total;
>  	u64 from = start;
>  	unsigned int pdpe;
>  
> @@ -1564,11 +1564,11 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
>   */
>  static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> +	struct i915_address_space *vm = &ppgtt->vm;
>  	struct drm_i915_private *dev_priv = vm->i915;
>  	int ret;
>  
> -	ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
> +	ppgtt->vm.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
>  		1ULL << 48 :
>  		1ULL << 32;
>  
> @@ -1576,26 +1576,26 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>  	 * And we are not sure about the latter so play safe for now.
>  	 */
>  	if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
> -		ppgtt->base.pt_kmap_wc = true;
> +		ppgtt->vm.pt_kmap_wc = true;
>  
> -	ret = gen8_init_scratch(&ppgtt->base);
> +	ret = gen8_init_scratch(&ppgtt->vm);
>  	if (ret) {
> -		ppgtt->base.total = 0;
> +		ppgtt->vm.total = 0;
>  		return ret;
>  	}
>  
>  	if (use_4lvl(vm)) {
> -		ret = setup_px(&ppgtt->base, &ppgtt->pml4);
> +		ret = setup_px(&ppgtt->vm, &ppgtt->pml4);
>  		if (ret)
>  			goto free_scratch;
>  
> -		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
> +		gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
>  
> -		ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
> -		ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
> -		ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
> +		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
> +		ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
> +		ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
>  	} else {
> -		ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
> +		ret = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
>  		if (ret)
>  			goto free_scratch;
>  
> @@ -1607,35 +1607,35 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>  			}
>  		}
>  
> -		ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
> -		ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
> -		ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
> +		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
> +		ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
> +		ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
>  	}
>  
>  	if (intel_vgpu_active(dev_priv))
>  		gen8_ppgtt_notify_vgt(ppgtt, true);
>  
> -	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
> -	ppgtt->base.bind_vma = gen8_ppgtt_bind_vma;
> -	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
> -	ppgtt->base.set_pages = ppgtt_set_pages;
> -	ppgtt->base.clear_pages = clear_pages;
> +	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
> +	ppgtt->vm.bind_vma = gen8_ppgtt_bind_vma;
> +	ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
> +	ppgtt->vm.set_pages = ppgtt_set_pages;
> +	ppgtt->vm.clear_pages = clear_pages;
>  	ppgtt->debug_dump = gen8_dump_ppgtt;
>  
>  	return 0;
>  
>  free_scratch:
> -	gen8_free_scratch(&ppgtt->base);
> +	gen8_free_scratch(&ppgtt->vm);
>  	return ret;
>  }
>  
>  static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> +	struct i915_address_space *vm = &ppgtt->vm;
>  	struct i915_page_table *unused;
>  	gen6_pte_t scratch_pte;
>  	u32 pd_entry, pte, pde;
> -	u32 start = 0, length = ppgtt->base.total;
> +	u32 start = 0, length = ppgtt->vm.total;
>  
>  	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
>  				     I915_CACHE_LLC, 0);
> @@ -1972,8 +1972,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
>  
>  static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
>  {
> -	struct i915_address_space *vm = &ppgtt->base;
> -	struct drm_i915_private *dev_priv = ppgtt->base.i915;
> +	struct i915_address_space *vm = &ppgtt->vm;
> +	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
>  	int ret;
>  
> @@ -1981,16 +1981,16 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
>  	 * allocator works in address space sizes, so it's multiplied by page
>  	 * size. We allocate at the top of the GTT to avoid fragmentation.
>  	 */
> -	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
> +	BUG_ON(!drm_mm_initialized(&ggtt->vm.mm));
>  
>  	ret = gen6_init_scratch(vm);
>  	if (ret)
>  		return ret;
>  
> -	ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
> +	ret = i915_gem_gtt_insert(&ggtt->vm, &ppgtt->node,
>  				  GEN6_PD_SIZE, GEN6_PD_ALIGN,
>  				  I915_COLOR_UNEVICTABLE,
> -				  0, ggtt->base.total,
> +				  0, ggtt->vm.total,
>  				  PIN_HIGH);
>  	if (ret)
>  		goto err_out;
> @@ -2023,16 +2023,16 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
>  	u32 pde;
>  
>  	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
> -		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
> +		ppgtt->pd.page_table[pde] = ppgtt->vm.scratch_pt;
>  }
>  
>  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>  {
> -	struct drm_i915_private *dev_priv = ppgtt->base.i915;
> +	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
>  	int ret;
>  
> -	ppgtt->base.pte_encode = ggtt->base.pte_encode;
> +	ppgtt->vm.pte_encode = ggtt->vm.pte_encode;
>  	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
>  		ppgtt->switch_mm = gen6_mm_switch;
>  	else if (IS_HASWELL(dev_priv))
> @@ -2046,24 +2046,24 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>  	if (ret)
>  		return ret;
>  
> -	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
> +	ppgtt->vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
>  
> -	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
> -	gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
> +	gen6_scratch_va_range(ppgtt, 0, ppgtt->vm.total);
> +	gen6_write_page_range(ppgtt, 0, ppgtt->vm.total);
>  
> -	ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
> +	ret = gen6_alloc_va_range(&ppgtt->vm, 0, ppgtt->vm.total);
>  	if (ret) {
> -		gen6_ppgtt_cleanup(&ppgtt->base);
> +		gen6_ppgtt_cleanup(&ppgtt->vm);
>  		return ret;
>  	}
>  
> -	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
> -	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
> -	ppgtt->base.bind_vma = gen6_ppgtt_bind_vma;
> -	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
> -	ppgtt->base.set_pages = ppgtt_set_pages;
> -	ppgtt->base.clear_pages = clear_pages;
> -	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
> +	ppgtt->vm.clear_range = gen6_ppgtt_clear_range;
> +	ppgtt->vm.insert_entries = gen6_ppgtt_insert_entries;
> +	ppgtt->vm.bind_vma = gen6_ppgtt_bind_vma;
> +	ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
> +	ppgtt->vm.set_pages = ppgtt_set_pages;
> +	ppgtt->vm.clear_pages = clear_pages;
> +	ppgtt->vm.cleanup = gen6_ppgtt_cleanup;
>  	ppgtt->debug_dump = gen6_dump_ppgtt;
>  
>  	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
> @@ -2079,8 +2079,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>  static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
>  			   struct drm_i915_private *dev_priv)
>  {
> -	ppgtt->base.i915 = dev_priv;
> -	ppgtt->base.dma = &dev_priv->drm.pdev->dev;
> +	ppgtt->vm.i915 = dev_priv;
> +	ppgtt->vm.dma = &dev_priv->drm.pdev->dev;
>  
>  	if (INTEL_GEN(dev_priv) < 8)
>  		return gen6_ppgtt_init(ppgtt);
> @@ -2190,10 +2190,10 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
>  	}
>  
>  	kref_init(&ppgtt->ref);
> -	i915_address_space_init(&ppgtt->base, dev_priv, name);
> -	ppgtt->base.file = fpriv;
> +	i915_address_space_init(&ppgtt->vm, dev_priv, name);
> +	ppgtt->vm.file = fpriv;
>  
> -	trace_i915_ppgtt_create(&ppgtt->base);
> +	trace_i915_ppgtt_create(&ppgtt->vm);
>  
>  	return ppgtt;
>  }
> @@ -2227,16 +2227,16 @@ void i915_ppgtt_release(struct kref *kref)
>  	struct i915_hw_ppgtt *ppgtt =
>  		container_of(kref, struct i915_hw_ppgtt, ref);
>  
> -	trace_i915_ppgtt_release(&ppgtt->base);
> +	trace_i915_ppgtt_release(&ppgtt->vm);
>  
> -	ppgtt_destroy_vma(&ppgtt->base);
> +	ppgtt_destroy_vma(&ppgtt->vm);
>  
> -	GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
> -	GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
> -	GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
> +	GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
> +	GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
> +	GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
>  
> -	ppgtt->base.cleanup(&ppgtt->base);
> -	i915_address_space_fini(&ppgtt->base);
> +	ppgtt->vm.cleanup(&ppgtt->vm);
> +	i915_address_space_fini(&ppgtt->vm);
>  	kfree(ppgtt);
>  }
>  
> @@ -2332,7 +2332,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
>  
>  	i915_check_and_clear_faults(dev_priv);
>  
> -	ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
> +	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
>  
>  	i915_ggtt_invalidate(dev_priv);
>  }
> @@ -2675,16 +2675,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
>  		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
>  
>  		if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
> -		    appgtt->base.allocate_va_range) {
> -			ret = appgtt->base.allocate_va_range(&appgtt->base,
> -							     vma->node.start,
> -							     vma->size);
> +		    appgtt->vm.allocate_va_range) {
> +			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
> +							   vma->node.start,
> +							   vma->size);
>  			if (ret)
>  				return ret;
>  		}
>  
> -		appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
> -					    pte_flags);
> +		appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
> +					  pte_flags);
>  	}
>  
>  	if (flags & I915_VMA_GLOBAL_BIND) {
> @@ -2707,7 +2707,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
>  	}
>  
>  	if (vma->flags & I915_VMA_LOCAL_BIND) {
> -		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
> +		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
>  
>  		vm->clear_range(vm, vma->node.start, vma->size);
>  	}
> @@ -2774,30 +2774,30 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
>  	if (IS_ERR(ppgtt))
>  		return PTR_ERR(ppgtt);
>  
> -	if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
> +	if (WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
>  		err = -ENODEV;
>  		goto err_ppgtt;
>  	}
>  
> -	if (ppgtt->base.allocate_va_range) {
> +	if (ppgtt->vm.allocate_va_range) {
>  		/* Note we only pre-allocate as far as the end of the global
>  		 * GTT. On 48b / 4-level page-tables, the difference is very,
>  		 * very significant! We have to preallocate as GVT/vgpu does
>  		 * not like the page directory disappearing.
>  		 */
> -		err = ppgtt->base.allocate_va_range(&ppgtt->base,
> -						    0, ggtt->base.total);
> +		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
> +						  0, ggtt->vm.total);
>  		if (err)
>  			goto err_ppgtt;
>  	}
>  
>  	i915->mm.aliasing_ppgtt = ppgtt;
>  
> -	GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
> -	ggtt->base.bind_vma = aliasing_gtt_bind_vma;
> +	GEM_BUG_ON(ggtt->vm.bind_vma != ggtt_bind_vma);
> +	ggtt->vm.bind_vma = aliasing_gtt_bind_vma;
>  
> -	GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
> -	ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
> +	GEM_BUG_ON(ggtt->vm.unbind_vma != ggtt_unbind_vma);
> +	ggtt->vm.unbind_vma = aliasing_gtt_unbind_vma;
>  
>  	return 0;
>  
> @@ -2817,8 +2817,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
>  
>  	i915_ppgtt_put(ppgtt);
>  
> -	ggtt->base.bind_vma = ggtt_bind_vma;
> -	ggtt->base.unbind_vma = ggtt_unbind_vma;
> +	ggtt->vm.bind_vma = ggtt_bind_vma;
> +	ggtt->vm.unbind_vma = ggtt_unbind_vma;
>  }
>  
>  int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
> @@ -2842,7 +2842,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
>  		return ret;
>  
>  	/* Reserve a mappable slot for our lockless error capture */
> -	ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
> +	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
>  					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
>  					  0, ggtt->mappable_end,
>  					  DRM_MM_INSERT_LOW);
> @@ -2850,16 +2850,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
>  		return ret;
>  
>  	/* Clear any non-preallocated blocks */
> -	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
> +	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
>  		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
>  			      hole_start, hole_end);
> -		ggtt->base.clear_range(&ggtt->base, hole_start,
> -				       hole_end - hole_start);
> +		ggtt->vm.clear_range(&ggtt->vm, hole_start,
> +				     hole_end - hole_start);
>  	}
>  
>  	/* And finally clear the reserved guard page */
> -	ggtt->base.clear_range(&ggtt->base,
> -			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
> +	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
>  
>  	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
>  		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
> @@ -2884,11 +2883,11 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
>  	struct i915_vma *vma, *vn;
>  	struct pagevec *pvec;
>  
> -	ggtt->base.closed = true;
> +	ggtt->vm.closed = true;
>  
>  	mutex_lock(&dev_priv->drm.struct_mutex);
> -	GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
> -	list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
> +	GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
> +	list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
>  		WARN_ON(i915_vma_unbind(vma));
>  	mutex_unlock(&dev_priv->drm.struct_mutex);
>  
> @@ -2900,12 +2899,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
>  	if (drm_mm_node_allocated(&ggtt->error_capture))
>  		drm_mm_remove_node(&ggtt->error_capture);
>  
> -	if (drm_mm_initialized(&ggtt->base.mm)) {
> +	if (drm_mm_initialized(&ggtt->vm.mm)) {
>  		intel_vgt_deballoon(dev_priv);
> -		i915_address_space_fini(&ggtt->base);
> +		i915_address_space_fini(&ggtt->vm);
>  	}
>  
> -	ggtt->base.cleanup(&ggtt->base);
> +	ggtt->vm.cleanup(&ggtt->vm);
>  
>  	pvec = &dev_priv->mm.wc_stash;
>  	if (pvec->nr) {
> @@ -2955,7 +2954,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
>  
>  static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
>  {
> -	struct drm_i915_private *dev_priv = ggtt->base.i915;
> +	struct drm_i915_private *dev_priv = ggtt->vm.i915;
>  	struct pci_dev *pdev = dev_priv->drm.pdev;
>  	phys_addr_t phys_addr;
>  	int ret;
> @@ -2979,7 +2978,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
>  		return -ENOMEM;
>  	}
>  
> -	ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
> +	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
>  	if (ret) {
>  		DRM_ERROR("Scratch setup failed\n");
>  		/* iounmap will also get called at remove, but meh */
> @@ -3285,7 +3284,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
>  
>  static int gen8_gmch_probe(struct i915_ggtt *ggtt)
>  {
> -	struct drm_i915_private *dev_priv = ggtt->base.i915;
> +	struct drm_i915_private *dev_priv = ggtt->vm.i915;
>  	struct pci_dev *pdev = dev_priv->drm.pdev;
>  	unsigned int size;
>  	u16 snb_gmch_ctl;
> @@ -3309,25 +3308,25 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
>  	else
>  		size = gen8_get_total_gtt_size(snb_gmch_ctl);
>  
> -	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
> -	ggtt->base.cleanup = gen6_gmch_remove;
> -	ggtt->base.bind_vma = ggtt_bind_vma;
> -	ggtt->base.unbind_vma = ggtt_unbind_vma;
> -	ggtt->base.set_pages = ggtt_set_pages;
> -	ggtt->base.clear_pages = clear_pages;
> -	ggtt->base.insert_page = gen8_ggtt_insert_page;
> -	ggtt->base.clear_range = nop_clear_range;
> +	ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
> +	ggtt->vm.cleanup = gen6_gmch_remove;
> +	ggtt->vm.bind_vma = ggtt_bind_vma;
> +	ggtt->vm.unbind_vma = ggtt_unbind_vma;
> +	ggtt->vm.set_pages = ggtt_set_pages;
> +	ggtt->vm.clear_pages = clear_pages;
> +	ggtt->vm.insert_page = gen8_ggtt_insert_page;
> +	ggtt->vm.clear_range = nop_clear_range;
>  	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
> -		ggtt->base.clear_range = gen8_ggtt_clear_range;
> +		ggtt->vm.clear_range = gen8_ggtt_clear_range;
>  
> -	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
> +	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
>  
>  	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
>  	if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
> -		ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
> -		ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
> -		if (ggtt->base.clear_range != nop_clear_range)
> -			ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
> +		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
> +		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
> +		if (ggtt->vm.clear_range != nop_clear_range)
> +			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
>  	}
>  
>  	ggtt->invalidate = gen6_ggtt_invalidate;
> @@ -3339,7 +3338,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
>  
>  static int gen6_gmch_probe(struct i915_ggtt *ggtt)
>  {
> -	struct drm_i915_private *dev_priv = ggtt->base.i915;
> +	struct drm_i915_private *dev_priv = ggtt->vm.i915;
>  	struct pci_dev *pdev = dev_priv->drm.pdev;
>  	unsigned int size;
>  	u16 snb_gmch_ctl;
> @@ -3366,29 +3365,29 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
>  	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
>  
>  	size = gen6_get_total_gtt_size(snb_gmch_ctl);
> -	ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
> +	ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
>  
> -	ggtt->base.clear_range = gen6_ggtt_clear_range;
> -	ggtt->base.insert_page = gen6_ggtt_insert_page;
> -	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
> -	ggtt->base.bind_vma = ggtt_bind_vma;
> -	ggtt->base.unbind_vma = ggtt_unbind_vma;
> -	ggtt->base.set_pages = ggtt_set_pages;
> -	ggtt->base.clear_pages = clear_pages;
> -	ggtt->base.cleanup = gen6_gmch_remove;
> +	ggtt->vm.clear_range = gen6_ggtt_clear_range;
> +	ggtt->vm.insert_page = gen6_ggtt_insert_page;
> +	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
> +	ggtt->vm.bind_vma = ggtt_bind_vma;
> +	ggtt->vm.unbind_vma = ggtt_unbind_vma;
> +	ggtt->vm.set_pages = ggtt_set_pages;
> +	ggtt->vm.clear_pages = clear_pages;
> +	ggtt->vm.cleanup = gen6_gmch_remove;
>  
>  	ggtt->invalidate = gen6_ggtt_invalidate;
>  
>  	if (HAS_EDRAM(dev_priv))
> -		ggtt->base.pte_encode = iris_pte_encode;
> +		ggtt->vm.pte_encode = iris_pte_encode;
>  	else if (IS_HASWELL(dev_priv))
> -		ggtt->base.pte_encode = hsw_pte_encode;
> +		ggtt->vm.pte_encode = hsw_pte_encode;
>  	else if (IS_VALLEYVIEW(dev_priv))
> -		ggtt->base.pte_encode = byt_pte_encode;
> +		ggtt->vm.pte_encode = byt_pte_encode;
>  	else if (INTEL_GEN(dev_priv) >= 7)
> -		ggtt->base.pte_encode = ivb_pte_encode;
> +		ggtt->vm.pte_encode = ivb_pte_encode;
>  	else
> -		ggtt->base.pte_encode = snb_pte_encode;
> +		ggtt->vm.pte_encode = snb_pte_encode;
>  
>  	return ggtt_probe_common(ggtt, size);
>  }
> @@ -3400,7 +3399,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
>  
>  static int i915_gmch_probe(struct i915_ggtt *ggtt)
>  {
> -	struct drm_i915_private *dev_priv = ggtt->base.i915;
> +	struct drm_i915_private *dev_priv = ggtt->vm.i915;
>  	phys_addr_t gmadr_base;
>  	int ret;
>  
> @@ -3410,23 +3409,21 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
>  		return -EIO;
>  	}
>  
> -	intel_gtt_get(&ggtt->base.total,
> -		      &gmadr_base,
> -		      &ggtt->mappable_end);
> +	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
>  
>  	ggtt->gmadr =
>  		(struct resource) DEFINE_RES_MEM(gmadr_base,
>  						 ggtt->mappable_end);
>  
>  	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
> -	ggtt->base.insert_page = i915_ggtt_insert_page;
> -	ggtt->base.insert_entries = i915_ggtt_insert_entries;
> -	ggtt->base.clear_range = i915_ggtt_clear_range;
> -	ggtt->base.bind_vma = ggtt_bind_vma;
> -	ggtt->base.unbind_vma = ggtt_unbind_vma;
> -	ggtt->base.set_pages = ggtt_set_pages;
> -	ggtt->base.clear_pages = clear_pages;
> -	ggtt->base.cleanup = i915_gmch_remove;
> +	ggtt->vm.insert_page = i915_ggtt_insert_page;
> +	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
> +	ggtt->vm.clear_range = i915_ggtt_clear_range;
> +	ggtt->vm.bind_vma = ggtt_bind_vma;
> +	ggtt->vm.unbind_vma = ggtt_unbind_vma;
> +	ggtt->vm.set_pages = ggtt_set_pages;
> +	ggtt->vm.clear_pages = clear_pages;
> +	ggtt->vm.cleanup = i915_gmch_remove;
>  
>  	ggtt->invalidate = gmch_ggtt_invalidate;
>  
> @@ -3445,8 +3442,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
>  	int ret;
>  
> -	ggtt->base.i915 = dev_priv;
> -	ggtt->base.dma = &dev_priv->drm.pdev->dev;
> +	ggtt->vm.i915 = dev_priv;
> +	ggtt->vm.dma = &dev_priv->drm.pdev->dev;
>  
>  	if (INTEL_GEN(dev_priv) <= 5)
>  		ret = i915_gmch_probe(ggtt);
> @@ -3463,27 +3460,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
>  	 * restriction!
>  	 */
>  	if (USES_GUC(dev_priv)) {
> -		ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
> -		ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
> +		ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
> +		ggtt->mappable_end =
> +			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
>  	}
>  
> -	if ((ggtt->base.total - 1) >> 32) {
> +	if ((ggtt->vm.total - 1) >> 32) {
>  		DRM_ERROR("We never expected a Global GTT with more than 32bits"
>  			  " of address space! Found %lldM!\n",
> -			  ggtt->base.total >> 20);
> -		ggtt->base.total = 1ULL << 32;
> -		ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
> +			  ggtt->vm.total >> 20);
> +		ggtt->vm.total = 1ULL << 32;
> +		ggtt->mappable_end =
> +			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
>  	}
>  
> -	if (ggtt->mappable_end > ggtt->base.total) {
> +	if (ggtt->mappable_end > ggtt->vm.total) {
>  		DRM_ERROR("mappable aperture extends past end of GGTT,"
>  			  " aperture=%pa, total=%llx\n",
> -			  &ggtt->mappable_end, ggtt->base.total);
> -		ggtt->mappable_end = ggtt->base.total;
> +			  &ggtt->mappable_end, ggtt->vm.total);
> +		ggtt->mappable_end = ggtt->vm.total;
>  	}
>  
>  	/* GMADR is the PCI mmio aperture into the global GTT. */
> -	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
> +	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
>  	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
>  	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
>  			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
> @@ -3510,9 +3509,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
>  	 * and beyond the end of the GTT if we do not provide a guard.
>  	 */
>  	mutex_lock(&dev_priv->drm.struct_mutex);
> -	i915_address_space_init(&ggtt->base, dev_priv, "[global]");
> +	i915_address_space_init(&ggtt->vm, dev_priv, "[global]");
>  	if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
> -		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
> +		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
>  	mutex_unlock(&dev_priv->drm.struct_mutex);
>  
>  	if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
> @@ -3535,7 +3534,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
>  	return 0;
>  
>  out_gtt_cleanup:
> -	ggtt->base.cleanup(&ggtt->base);
> +	ggtt->vm.cleanup(&ggtt->vm);
>  	return ret;
>  }
>  
> @@ -3574,9 +3573,9 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
>  	i915_check_and_clear_faults(dev_priv);
>  
>  	/* First fill our portion of the GTT with scratch pages */
> -	ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
> +	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
>  
> -	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
> +	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
>  
>  	/* clflush objects bound into the GGTT and rebind them. */
>  	list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
> @@ -3596,7 +3595,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
>  			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
>  	}
>  
> -	ggtt->base.closed = false;
> +	ggtt->vm.closed = false;
>  
>  	if (INTEL_GEN(dev_priv) >= 8) {
>  		struct intel_ppat *ppat = &dev_priv->ppat;
> @@ -3619,7 +3618,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
>  			if (!ppgtt)
>  				continue;
>  
> -			gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
> +			gen6_write_page_range(ppgtt, 0, ppgtt->vm.total);
>  		}
>  	}
>  
> @@ -3841,7 +3840,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
>  	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
>  	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
>  	GEM_BUG_ON(range_overflows(offset, size, vm->total));
> -	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
> +	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
>  	GEM_BUG_ON(drm_mm_node_allocated(node));
>  
>  	node->size = size;
> @@ -3938,7 +3937,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
>  	GEM_BUG_ON(start >= end);
>  	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
>  	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
> -	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
> +	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
>  	GEM_BUG_ON(drm_mm_node_allocated(node));
>  
>  	if (unlikely(range_overflows(start, size, end)))
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index aec4f73574f4..197c2c06ecb7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -65,7 +65,7 @@ typedef u64 gen8_pde_t;
>  typedef u64 gen8_ppgtt_pdpe_t;
>  typedef u64 gen8_ppgtt_pml4e_t;
>  
> -#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
> +#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
>  
>  /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
>  #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
> @@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
>   * the spec.
>   */
>  struct i915_ggtt {
> -	struct i915_address_space base;
> +	struct i915_address_space vm;
>  
>  	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
>  	struct resource gmadr;          /* GMADR resource */
> @@ -385,7 +385,7 @@ struct i915_ggtt {
>  };
>  
>  struct i915_hw_ppgtt {
> -	struct i915_address_space base;
> +	struct i915_address_space vm;
>  	struct kref ref;
>  	struct drm_mm_node node;
>  	unsigned long pd_dirty_rings;
> @@ -543,7 +543,7 @@ static inline struct i915_ggtt *
>  i915_vm_to_ggtt(struct i915_address_space *vm)
>  {
>  	GEM_BUG_ON(!i915_is_ggtt(vm));
> -	return container_of(vm, struct i915_ggtt, base);
> +	return container_of(vm, struct i915_ggtt, vm);
>  }
>  
>  #define INTEL_MAX_PPAT_ENTRIES 8
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index 1036e8686916..3210cedfa46c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
>  	if (IS_ERR(so.obj))
>  		return PTR_ERR(so.obj);
>  
> -	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
> +	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
>  	if (IS_ERR(so.vma)) {
>  		err = PTR_ERR(so.vma);
>  		goto err_obj;
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 5757fb7c4b5a..55e84e71f526 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>  
>  	/* We also want to clear any cached iomaps as they wrap vmap */
>  	list_for_each_entry_safe(vma, next,
> -				 &i915->ggtt.base.inactive_list, vm_link) {
> +				 &i915->ggtt.vm.inactive_list, vm_link) {
>  		unsigned long count = vma->node.size >> PAGE_SHIFT;
>  		if (vma->iomap && i915_vma_unbind(vma) == 0)
>  			freed_pages += count;
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index ad949cc30928..79a347295e00 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
>  	if (ret)
>  		goto err;
>  
> -	vma = i915_vma_instance(obj, &ggtt->base, NULL);
> +	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto err_pages;
> @@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
>  	 * setting up the GTT space. The actual reservation will occur
>  	 * later.
>  	 */
> -	ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
> +	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
>  				   size, gtt_offset, obj->cache_level,
>  				   0);
>  	if (ret) {
> @@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
>  	vma->pages = obj->mm.pages;
>  	vma->flags |= I915_VMA_GLOBAL_BIND;
>  	__i915_vma_set_map_and_fenceable(vma);
> -	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
> +	list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
>  
>  	spin_lock(&dev_priv->mm.obj_lock);
>  	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 47721437a4c5..cd09a1688192 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915,
>  		void __iomem *s;
>  		int ret;
>  
> -		ggtt->base.insert_page(&ggtt->base, dma, slot,
> -				       I915_CACHE_NONE, 0);
> +		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
>  
>  		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
>  		ret = compress_page(&compress, (void  __force *)s, dst);
> @@ -993,7 +992,7 @@ i915_error_object_create(struct drm_i915_private *i915,
>  
>  out:
>  	compress_fini(&compress, dst);
> -	ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
> +	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
>  	return dst;
>  }
>  
> @@ -1466,7 +1465,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
>  			struct i915_gem_context *ctx = request->gem_context;
>  			struct intel_ring *ring;
>  
> -			ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;
> +			ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
>  
>  			record_context(&ee->context, ctx);
>  
> @@ -1564,7 +1563,7 @@ static void capture_active_buffers(struct i915_gpu_state *error)
>  
>  static void capture_pinned_buffers(struct i915_gpu_state *error)
>  {
> -	struct i915_address_space *vm = &error->i915->ggtt.base;
> +	struct i915_address_space *vm = &error->i915->ggtt.vm;
>  	struct drm_i915_error_buffer *bo;
>  	struct i915_vma *vma;
>  	int count_inactive, count_active;
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 5d4f78765083..03299bae45a2 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -936,7 +936,7 @@ DECLARE_EVENT_CLASS(i915_context,
>  			__entry->dev = ctx->i915->drm.primary->index;
>  			__entry->ctx = ctx;
>  			__entry->hw_id = ctx->hw_id;
> -			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
> +			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
>  	),
>  
>  	TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
> @@ -975,7 +975,7 @@ TRACE_EVENT(switch_mm,
>  	TP_fast_assign(
>  			__entry->ring = engine->id;
>  			__entry->to = to;
> -			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
> +			__entry->vm = to->ppgtt? &to->ppgtt->vm : NULL;
>  			__entry->dev = engine->i915->drm.primary->index;
>  	),
>  
> diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
> index 5fe9f3f39467..869cf4a3b6de 100644
> --- a/drivers/gpu/drm/i915/i915_vgpu.c
> +++ b/drivers/gpu/drm/i915/i915_vgpu.c
> @@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
>  			 node->start + node->size,
>  			 node->size / 1024);
>  
> -	ggtt->base.reserved -= node->size;
> +	ggtt->vm.reserved -= node->size;
>  	drm_mm_remove_node(node);
>  }
>  
> @@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
>  
>  	DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
>  		 start, end, size / 1024);
> -	ret = i915_gem_gtt_reserve(&ggtt->base, node,
> +	ret = i915_gem_gtt_reserve(&ggtt->vm, node,
>  				   size, start, I915_COLOR_UNEVICTABLE,
>  				   0);
>  	if (!ret)
> -		ggtt->base.reserved += size;
> +		ggtt->vm.reserved += size;
>  
>  	return ret;
>  }
> @@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
>  int intel_vgt_balloon(struct drm_i915_private *dev_priv)
>  {
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
> -	unsigned long ggtt_end = ggtt->base.total;
> +	unsigned long ggtt_end = ggtt->vm.total;
>  
>  	unsigned long mappable_base, mappable_size, mappable_end;
>  	unsigned long unmappable_base, unmappable_size, unmappable_end;
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 9324d476e0a7..e8f07cdca063 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj,
>  	int i;
>  
>  	/* The aliasing_ppgtt should never be used directly! */
> -	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
> +	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
>  
>  	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
>  	if (vma == NULL)
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 13448ea76f57..2ec2e60dc670 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -515,7 +515,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
>  		return PTR_ERR(obj);
>  	}
>  
> -	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto err_unref;
> @@ -585,7 +585,7 @@ static int init_status_page(struct intel_engine_cs *engine)
>  	if (ret)
>  		goto err;
>  
> -	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto err;
> diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
> index e28a996b9604..29fd95c1306b 100644
> --- a/drivers/gpu/drm/i915/intel_guc.c
> +++ b/drivers/gpu/drm/i915/intel_guc.c
> @@ -570,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
>  	if (IS_ERR(obj))
>  		return ERR_CAST(obj);
>  
> -	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
>  	if (IS_ERR(vma))
>  		goto err;
>  
> diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
> index 133367a17863..e271e296b9da 100644
> --- a/drivers/gpu/drm/i915/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/intel_guc_submission.c
> @@ -536,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
>   */
>  static void flush_ggtt_writes(struct i915_vma *vma)
>  {
> -	struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
> +	struct drm_i915_private *dev_priv = vma->vm->i915;
>  
>  	if (i915_vma_is_map_and_fenceable(vma))
>  		POSTING_READ_FW(GUC_STATUS);
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index eb25afa9694f..091e28f0e024 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -431,7 +431,7 @@ static u64 execlists_update_context(struct i915_request *rq)
>  	 * PML4 is allocated during ppgtt init, so this is not needed
>  	 * in 48-bit mode.
>  	 */
> -	if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
> +	if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
>  		execlists_update_context_pdps(ppgtt, reg_state);
>  
>  	return ce->lrc_desc;
> @@ -1671,7 +1671,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
>  	if (IS_ERR(obj))
>  		return PTR_ERR(obj);
>  
> -	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		err = PTR_ERR(vma);
>  		goto err;
> @@ -2069,7 +2069,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
>  	 * not needed in 48-bit.*/
>  	if (rq->gem_context->ppgtt &&
>  	    (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
> -	    !i915_vm_is_48bit(&rq->gem_context->ppgtt->base) &&
> +	    !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
>  	    !intel_vgpu_active(rq->i915)) {
>  		ret = intel_logical_ring_emit_pdps(rq);
>  		if (ret)
> @@ -2667,7 +2667,7 @@ static void execlists_init_reg_state(u32 *regs,
>  	CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
>  	CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
>  
> -	if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
> +	if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
>  		/* 64b PPGTT (48bit canonical)
>  		 * PDP0_DESCRIPTOR contains the base address to PML4 and
>  		 * other PDP Descriptors are ignored.
> @@ -2773,7 +2773,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>  		goto error_deref_obj;
>  	}
>  
> -	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto error_deref_obj;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 97b38bbb7ce2..fa517a3e3c25 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1123,7 +1123,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
>  	/* mark ring buffers as read-only from GPU side by default */
>  	obj->gt_ro = 1;
>  
> -	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
>  	if (IS_ERR(vma))
>  		goto err;
>  
> @@ -1279,7 +1279,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
>  		i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
>  	}
>  
> -	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		err = PTR_ERR(vma);
>  		goto err_obj;
> diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
> index 91c72911be3c..01ab60f1a7e8 100644
> --- a/drivers/gpu/drm/i915/selftests/huge_pages.c
> +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
> @@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
>  
>  static int igt_check_page_sizes(struct i915_vma *vma)
>  {
> -	struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
> +	struct drm_i915_private *i915 = vma->vm->i915;
>  	unsigned int supported = INTEL_INFO(i915)->page_sizes;
>  	struct drm_i915_gem_object *obj = vma->obj;
>  	int err = 0;
> @@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
>  static int igt_mock_exhaust_device_supported_pages(void *arg)
>  {
>  	struct i915_hw_ppgtt *ppgtt = arg;
> -	struct drm_i915_private *i915 = ppgtt->base.i915;
> +	struct drm_i915_private *i915 = ppgtt->vm.i915;
>  	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
>  	struct drm_i915_gem_object *obj;
>  	struct i915_vma *vma;
> @@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
>  				goto out_put;
>  			}
>  
> -			vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  			if (IS_ERR(vma)) {
>  				err = PTR_ERR(vma);
>  				goto out_put;
> @@ -458,7 +458,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
>  static int igt_mock_ppgtt_misaligned_dma(void *arg)
>  {
>  	struct i915_hw_ppgtt *ppgtt = arg;
> -	struct drm_i915_private *i915 = ppgtt->base.i915;
> +	struct drm_i915_private *i915 = ppgtt->vm.i915;
>  	unsigned long supported = INTEL_INFO(i915)->page_sizes;
>  	struct drm_i915_gem_object *obj;
>  	int bit;
> @@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
>  		/* Force the page size for this object */
>  		obj->mm.page_sizes.sg = page_size;
>  
> -		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out_unpin;
> @@ -591,7 +591,7 @@ static void close_object_list(struct list_head *objects,
>  	list_for_each_entry_safe(obj, on, objects, st_link) {
>  		struct i915_vma *vma;
>  
> -		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  		if (!IS_ERR(vma))
>  			i915_vma_close(vma);
>  
> @@ -604,8 +604,8 @@ static void close_object_list(struct list_head *objects,
>  static int igt_mock_ppgtt_huge_fill(void *arg)
>  {
>  	struct i915_hw_ppgtt *ppgtt = arg;
> -	struct drm_i915_private *i915 = ppgtt->base.i915;
> -	unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
> +	struct drm_i915_private *i915 = ppgtt->vm.i915;
> +	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
>  	unsigned long page_num;
>  	bool single = false;
>  	LIST_HEAD(objects);
> @@ -641,7 +641,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
>  
>  		list_add(&obj->st_link, &objects);
>  
> -		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			break;
> @@ -725,7 +725,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
>  static int igt_mock_ppgtt_64K(void *arg)
>  {
>  	struct i915_hw_ppgtt *ppgtt = arg;
> -	struct drm_i915_private *i915 = ppgtt->base.i915;
> +	struct drm_i915_private *i915 = ppgtt->vm.i915;
>  	struct drm_i915_gem_object *obj;
>  	const struct object_info {
>  		unsigned int size;
> @@ -819,7 +819,7 @@ static int igt_mock_ppgtt_64K(void *arg)
>  			 */
>  			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
>  
> -			vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  			if (IS_ERR(vma)) {
>  				err = PTR_ERR(vma);
>  				goto out_object_unpin;
> @@ -887,8 +887,8 @@ static int igt_mock_ppgtt_64K(void *arg)
>  static struct i915_vma *
>  gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
>  {
> -	struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
> -	const int gen = INTEL_GEN(vma->vm->i915);
> +	struct drm_i915_private *i915 = vma->vm->i915;
> +	const int gen = INTEL_GEN(i915);
>  	unsigned int count = vma->size >> PAGE_SHIFT;
>  	struct drm_i915_gem_object *obj;
>  	struct i915_vma *batch;
> @@ -1047,7 +1047,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
>  			    u32 dword, u32 val)
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +	struct i915_address_space *vm =
> +		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
>  	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
>  	struct i915_vma *vma;
>  	int err;
> @@ -1100,7 +1101,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
>  			  struct drm_i915_gem_object *obj)
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +	struct i915_address_space *vm =
> +		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
>  	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
>  	struct intel_engine_cs *engine;
>  	I915_RND_STATE(prng);
> @@ -1439,7 +1441,7 @@ static int igt_ppgtt_pin_update(void *arg)
>  		if (IS_ERR(obj))
>  			return PTR_ERR(obj);
>  
> -		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out_put;
> @@ -1493,7 +1495,7 @@ static int igt_ppgtt_pin_update(void *arg)
>  	if (IS_ERR(obj))
>  		return PTR_ERR(obj);
>  
> -	vma = i915_vma_instance(obj, &ppgtt->base, NULL);
> +	vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
>  	if (IS_ERR(vma)) {
>  		err = PTR_ERR(vma);
>  		goto out_put;
> @@ -1531,7 +1533,8 @@ static int igt_tmpfs_fallback(void *arg)
>  	struct i915_gem_context *ctx = arg;
>  	struct drm_i915_private *i915 = ctx->i915;
>  	struct vfsmount *gemfs = i915->mm.gemfs;
> -	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +	struct i915_address_space *vm =
> +		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
>  	struct drm_i915_gem_object *obj;
>  	struct i915_vma *vma;
>  	u32 *vaddr;
> @@ -1587,7 +1590,8 @@ static int igt_shrink_thp(void *arg)
>  {
>  	struct i915_gem_context *ctx = arg;
>  	struct drm_i915_private *i915 = ctx->i915;
> -	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +	struct i915_address_space *vm =
> +	       	ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
>  	struct drm_i915_gem_object *obj;
>  	struct i915_vma *vma;
>  	unsigned int flags = PIN_USER;
> @@ -1696,14 +1700,14 @@ int i915_gem_huge_page_mock_selftests(void)
>  		goto out_unlock;
>  	}
>  
> -	if (!i915_vm_is_48bit(&ppgtt->base)) {
> +	if (!i915_vm_is_48bit(&ppgtt->vm)) {
>  		pr_err("failed to create 48b PPGTT\n");
>  		err = -EINVAL;
>  		goto out_close;
>  	}
>  
>  	/* If we were ever hit this then it's time to mock the 64K scratch */
> -	if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
> +	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
>  		pr_err("PPGTT missing 64K scratch page\n");
>  		err = -EINVAL;
>  		goto out_close;
> @@ -1712,7 +1716,7 @@ int i915_gem_huge_page_mock_selftests(void)
>  	err = i915_subtests(tests, ppgtt);
>  
>  out_close:
> -	i915_ppgtt_close(&ppgtt->base);
> +	i915_ppgtt_close(&ppgtt->vm);
>  	i915_ppgtt_put(ppgtt);
>  
>  out_unlock:
> @@ -1758,7 +1762,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
>  	}
>  
>  	if (ctx->ppgtt)
> -		ctx->ppgtt->base.scrub_64K = true;
> +		ctx->ppgtt->vm.scrub_64K = true;
>  
>  	err = i915_subtests(tests, ctx);
>  
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> index b39392a00a6f..708e8d721448 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> @@ -115,7 +115,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
>  	struct i915_address_space *vm =
> -		ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
>  	struct i915_request *rq;
>  	struct i915_vma *vma;
>  	struct i915_vma *batch;
> @@ -290,7 +290,7 @@ create_test_object(struct i915_gem_context *ctx,
>  {
>  	struct drm_i915_gem_object *obj;
>  	struct i915_address_space *vm =
> -		ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
> +		ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
>  	u64 size;
>  	int err;
>  
> @@ -557,7 +557,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
>  	list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
>  		struct i915_vma *vma;
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma))
>  			continue;
>  
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> index ab9d7bee0aae..2dc72a984d45 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> @@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
>  	u64 size;
>  
>  	for (size = 0;
> -	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
> +	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
>  	     size += I915_GTT_PAGE_SIZE) {
>  		struct i915_vma *vma;
>  
> @@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
>  		return -EINVAL;
>  	}
>  
> -	if (list_empty(&i915->ggtt.base.inactive_list)) {
> +	if (list_empty(&i915->ggtt.vm.inactive_list)) {
>  		pr_err("No objects on the GGTT inactive list!\n");
>  		return -EINVAL;
>  	}
> @@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
>  {
>  	struct i915_vma *vma;
>  
> -	list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
> +	list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
>  		i915_vma_unpin(vma);
>  }
>  
> @@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
>  		goto cleanup;
>  
>  	/* Everything is pinned, nothing should happen */
> -	err = i915_gem_evict_something(&ggtt->base,
> +	err = i915_gem_evict_something(&ggtt->vm,
>  				       I915_GTT_PAGE_SIZE, 0, 0,
>  				       0, U64_MAX,
>  				       0);
> @@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
>  	unpin_ggtt(i915);
>  
>  	/* Everything is unpinned, we should be able to evict something */
> -	err = i915_gem_evict_something(&ggtt->base,
> +	err = i915_gem_evict_something(&ggtt->vm,
>  				       I915_GTT_PAGE_SIZE, 0, 0,
>  				       0, U64_MAX,
>  				       0);
> @@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
>  		goto cleanup;
>  
>  	/* Everything is pinned, nothing should happen */
> -	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
> +	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
>  	if (err != -ENOSPC) {
>  		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
>  		       err);
> @@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
>  	unpin_ggtt(i915);
>  
>  	/* Everything is unpinned, we should be able to evict the node */
> -	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
> +	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
>  	if (err) {
>  		pr_err("i915_gem_evict_for_node returned err=%d\n",
>  		       err);
> @@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
>  	 * i915_gtt_color_adjust throughout our driver, so using a mock color
>  	 * adjust will work just fine for our purposes.
>  	 */
> -	ggtt->base.mm.color_adjust = mock_color_adjust;
> +	ggtt->vm.mm.color_adjust = mock_color_adjust;
>  
>  	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
>  	if (IS_ERR(obj)) {
> @@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
>  	i915_vma_unpin(vma);
>  
>  	/* Remove just the second vma */
> -	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
> +	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
>  	if (err) {
>  		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
>  		goto cleanup;
> @@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
>  	 */
>  	target.color = I915_CACHE_L3_LLC;
>  
> -	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
> +	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
>  	if (!err) {
>  		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
>  		err = -EINVAL;
> @@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
>  cleanup:
>  	unpin_ggtt(i915);
>  	cleanup_objects(i915);
> -	ggtt->base.mm.color_adjust = NULL;
> +	ggtt->vm.mm.color_adjust = NULL;
>  	return err;
>  }
>  
> @@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
>  		goto cleanup;
>  
>  	/* Everything is pinned, nothing should happen */
> -	err = i915_gem_evict_vm(&ggtt->base);
> +	err = i915_gem_evict_vm(&ggtt->vm);
>  	if (err) {
>  		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
>  		       err);
> @@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
>  
>  	unpin_ggtt(i915);
>  
> -	err = i915_gem_evict_vm(&ggtt->base);
> +	err = i915_gem_evict_vm(&ggtt->vm);
>  	if (err) {
>  		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
>  		       err);
> @@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
>  
>  	/* Reserve a block so that we know we have enough to fit a few rq */
>  	memset(&hole, 0, sizeof(hole));
> -	err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
> +	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
>  				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
> -				  0, i915->ggtt.base.total,
> +				  0, i915->ggtt.vm.total,
>  				  PIN_NOEVICT);
>  	if (err)
>  		goto out_locked;
> @@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
>  			goto out_locked;
>  		}
>  
> -		if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
> +		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
>  					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
> -					0, i915->ggtt.base.total,
> +					0, i915->ggtt.vm.total,
>  					PIN_NOEVICT)) {
>  			kfree(r);
>  			break;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> index f7dc926f4ef1..58ab5e84ceb7 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> @@ -151,14 +151,14 @@ static int igt_ppgtt_alloc(void *arg)
>  	if (err)
>  		goto err_ppgtt;
>  
> -	if (!ppgtt->base.allocate_va_range)
> +	if (!ppgtt->vm.allocate_va_range)
>  		goto err_ppgtt_cleanup;
>  
>  	/* Check we can allocate the entire range */
>  	for (size = 4096;
> -	     size <= ppgtt->base.total;
> +	     size <= ppgtt->vm.total;
>  	     size <<= 2) {
> -		err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
> +		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
>  		if (err) {
>  			if (err == -ENOMEM) {
>  				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
> @@ -168,15 +168,15 @@ static int igt_ppgtt_alloc(void *arg)
>  			goto err_ppgtt_cleanup;
>  		}
>  
> -		ppgtt->base.clear_range(&ppgtt->base, 0, size);
> +		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
>  	}
>  
>  	/* Check we can incrementally allocate the entire range */
>  	for (last = 0, size = 4096;
> -	     size <= ppgtt->base.total;
> +	     size <= ppgtt->vm.total;
>  	     last = size, size <<= 2) {
> -		err = ppgtt->base.allocate_va_range(&ppgtt->base,
> -						    last, size - last);
> +		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
> +						  last, size - last);
>  		if (err) {
>  			if (err == -ENOMEM) {
>  				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
> @@ -188,7 +188,7 @@ static int igt_ppgtt_alloc(void *arg)
>  	}
>  
>  err_ppgtt_cleanup:
> -	ppgtt->base.cleanup(&ppgtt->base);
> +	ppgtt->vm.cleanup(&ppgtt->vm);
>  err_ppgtt:
>  	mutex_unlock(&dev_priv->drm.struct_mutex);
>  	kfree(ppgtt);
> @@ -987,12 +987,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
>  		err = PTR_ERR(ppgtt);
>  		goto out_unlock;
>  	}
> -	GEM_BUG_ON(offset_in_page(ppgtt->base.total));
> -	GEM_BUG_ON(ppgtt->base.closed);
> +	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
> +	GEM_BUG_ON(ppgtt->vm.closed);
>  
> -	err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
> +	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
>  
> -	i915_ppgtt_close(&ppgtt->base);
> +	i915_ppgtt_close(&ppgtt->vm);
>  	i915_ppgtt_put(ppgtt);
>  out_unlock:
>  	mutex_unlock(&dev_priv->drm.struct_mutex);
> @@ -1061,18 +1061,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
>  
>  	mutex_lock(&i915->drm.struct_mutex);
>  restart:
> -	list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
> -	drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
> +	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
> +	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
>  		if (hole_start < last)
>  			continue;
>  
> -		if (ggtt->base.mm.color_adjust)
> -			ggtt->base.mm.color_adjust(node, 0,
> -						   &hole_start, &hole_end);
> +		if (ggtt->vm.mm.color_adjust)
> +			ggtt->vm.mm.color_adjust(node, 0,
> +						 &hole_start, &hole_end);
>  		if (hole_start >= hole_end)
>  			continue;
>  
> -		err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
> +		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
>  		if (err)
>  			break;
>  
> @@ -1134,7 +1134,7 @@ static int igt_ggtt_page(void *arg)
>  		goto out_free;
>  
>  	memset(&tmp, 0, sizeof(tmp));
> -	err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
> +	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
>  					  count * PAGE_SIZE, 0,
>  					  I915_COLOR_UNEVICTABLE,
>  					  0, ggtt->mappable_end,
> @@ -1147,9 +1147,9 @@ static int igt_ggtt_page(void *arg)
>  	for (n = 0; n < count; n++) {
>  		u64 offset = tmp.start + n * PAGE_SIZE;
>  
> -		ggtt->base.insert_page(&ggtt->base,
> -				       i915_gem_object_get_dma_address(obj, 0),
> -				       offset, I915_CACHE_NONE, 0);
> +		ggtt->vm.insert_page(&ggtt->vm,
> +				     i915_gem_object_get_dma_address(obj, 0),
> +				     offset, I915_CACHE_NONE, 0);
>  	}
>  
>  	order = i915_random_order(count, &prng);
> @@ -1188,7 +1188,7 @@ static int igt_ggtt_page(void *arg)
>  
>  	kfree(order);
>  out_remove:
> -	ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
> +	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
>  	intel_runtime_pm_put(i915);
>  	drm_mm_remove_node(&tmp);
>  out_unpin:
> @@ -1229,7 +1229,7 @@ static int exercise_mock(struct drm_i915_private *i915,
>  	ppgtt = ctx->ppgtt;
>  	GEM_BUG_ON(!ppgtt);
>  
> -	err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
> +	err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
>  
>  	mock_context_close(ctx);
>  	return err;
> @@ -1270,7 +1270,7 @@ static int igt_gtt_reserve(void *arg)
>  
>  	/* Start by filling the GGTT */
>  	for (total = 0;
> -	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
> +	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
>  	     total += 2*I915_GTT_PAGE_SIZE) {
>  		struct i915_vma *vma;
>  
> @@ -1288,20 +1288,20 @@ static int igt_gtt_reserve(void *arg)
>  
>  		list_add(&obj->st_link, &objects);
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
>  		}
>  
> -		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
> +		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
>  					   obj->base.size,
>  					   total,
>  					   obj->cache_level,
>  					   0);
>  		if (err) {
>  			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
> -			       total, i915->ggtt.base.total, err);
> +			       total, i915->ggtt.vm.total, err);
>  			goto out;
>  		}
>  		track_vma_bind(vma);
> @@ -1319,7 +1319,7 @@ static int igt_gtt_reserve(void *arg)
>  
>  	/* Now we start forcing evictions */
>  	for (total = I915_GTT_PAGE_SIZE;
> -	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
> +	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
>  	     total += 2*I915_GTT_PAGE_SIZE) {
>  		struct i915_vma *vma;
>  
> @@ -1337,20 +1337,20 @@ static int igt_gtt_reserve(void *arg)
>  
>  		list_add(&obj->st_link, &objects);
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
>  		}
>  
> -		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
> +		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
>  					   obj->base.size,
>  					   total,
>  					   obj->cache_level,
>  					   0);
>  		if (err) {
>  			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
> -			       total, i915->ggtt.base.total, err);
> +			       total, i915->ggtt.vm.total, err);
>  			goto out;
>  		}
>  		track_vma_bind(vma);
> @@ -1371,7 +1371,7 @@ static int igt_gtt_reserve(void *arg)
>  		struct i915_vma *vma;
>  		u64 offset;
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
> @@ -1383,18 +1383,18 @@ static int igt_gtt_reserve(void *arg)
>  			goto out;
>  		}
>  
> -		offset = random_offset(0, i915->ggtt.base.total,
> +		offset = random_offset(0, i915->ggtt.vm.total,
>  				       2*I915_GTT_PAGE_SIZE,
>  				       I915_GTT_MIN_ALIGNMENT);
>  
> -		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
> +		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
>  					   obj->base.size,
>  					   offset,
>  					   obj->cache_level,
>  					   0);
>  		if (err) {
>  			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
> -			       total, i915->ggtt.base.total, err);
> +			       total, i915->ggtt.vm.total, err);
>  			goto out;
>  		}
>  		track_vma_bind(vma);
> @@ -1429,8 +1429,8 @@ static int igt_gtt_insert(void *arg)
>  		u64 start, end;
>  	} invalid_insert[] = {
>  		{
> -			i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
> -			0, i915->ggtt.base.total,
> +			i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
> +			0, i915->ggtt.vm.total,
>  		},
>  		{
>  			2*I915_GTT_PAGE_SIZE, 0,
> @@ -1460,7 +1460,7 @@ static int igt_gtt_insert(void *arg)
>  
>  	/* Check a couple of obviously invalid requests */
>  	for (ii = invalid_insert; ii->size; ii++) {
> -		err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
> +		err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
>  					  ii->size, ii->alignment,
>  					  I915_COLOR_UNEVICTABLE,
>  					  ii->start, ii->end,
> @@ -1475,7 +1475,7 @@ static int igt_gtt_insert(void *arg)
>  
>  	/* Start by filling the GGTT */
>  	for (total = 0;
> -	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
> +	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
>  	     total += I915_GTT_PAGE_SIZE) {
>  		struct i915_vma *vma;
>  
> @@ -1493,15 +1493,15 @@ static int igt_gtt_insert(void *arg)
>  
>  		list_add(&obj->st_link, &objects);
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
>  		}
>  
> -		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
> +		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
>  					  obj->base.size, 0, obj->cache_level,
> -					  0, i915->ggtt.base.total,
> +					  0, i915->ggtt.vm.total,
>  					  0);
>  		if (err == -ENOSPC) {
>  			/* maxed out the GGTT space */
> @@ -1510,7 +1510,7 @@ static int igt_gtt_insert(void *arg)
>  		}
>  		if (err) {
>  			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
> -			       total, i915->ggtt.base.total, err);
> +			       total, i915->ggtt.vm.total, err);
>  			goto out;
>  		}
>  		track_vma_bind(vma);
> @@ -1522,7 +1522,7 @@ static int igt_gtt_insert(void *arg)
>  	list_for_each_entry(obj, &objects, st_link) {
>  		struct i915_vma *vma;
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
> @@ -1542,7 +1542,7 @@ static int igt_gtt_insert(void *arg)
>  		struct i915_vma *vma;
>  		u64 offset;
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
> @@ -1557,13 +1557,13 @@ static int igt_gtt_insert(void *arg)
>  			goto out;
>  		}
>  
> -		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
> +		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
>  					  obj->base.size, 0, obj->cache_level,
> -					  0, i915->ggtt.base.total,
> +					  0, i915->ggtt.vm.total,
>  					  0);
>  		if (err) {
>  			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
> -			       total, i915->ggtt.base.total, err);
> +			       total, i915->ggtt.vm.total, err);
>  			goto out;
>  		}
>  		track_vma_bind(vma);
> @@ -1579,7 +1579,7 @@ static int igt_gtt_insert(void *arg)
>  
>  	/* And then force evictions */
>  	for (total = 0;
> -	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
> +	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
>  	     total += 2*I915_GTT_PAGE_SIZE) {
>  		struct i915_vma *vma;
>  
> @@ -1597,19 +1597,19 @@ static int igt_gtt_insert(void *arg)
>  
>  		list_add(&obj->st_link, &objects);
>  
> -		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  		if (IS_ERR(vma)) {
>  			err = PTR_ERR(vma);
>  			goto out;
>  		}
>  
> -		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
> +		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
>  					  obj->base.size, 0, obj->cache_level,
> -					  0, i915->ggtt.base.total,
> +					  0, i915->ggtt.vm.total,
>  					  0);
>  		if (err) {
>  			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
> -			       total, i915->ggtt.base.total, err);
> +			       total, i915->ggtt.vm.total, err);
>  			goto out;
>  		}
>  		track_vma_bind(vma);
> @@ -1669,7 +1669,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
>  		SUBTEST(igt_ggtt_page),
>  	};
>  
> -	GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
> +	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
>  
>  	return i915_subtests(tests, i915);
>  }
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
> index fbdb2419d418..2b2dde94526f 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
> @@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
>  
>  	obj = huge_gem_object(i915,
>  			      nreal * PAGE_SIZE,
> -			      i915->ggtt.base.total + PAGE_SIZE);
> +			      i915->ggtt.vm.total + PAGE_SIZE);
>  	if (IS_ERR(obj))
>  		return PTR_ERR(obj);
>  
> @@ -311,7 +311,7 @@ static int igt_partial_tiling(void *arg)
>  
>  	obj = huge_gem_object(i915,
>  			      nreal << PAGE_SHIFT,
> -			      (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
> +			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
>  	if (IS_ERR(obj))
>  		return PTR_ERR(obj);
>  
> @@ -440,7 +440,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
>  	struct i915_vma *vma;
>  	int err;
>  
> -	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma))
>  		return PTR_ERR(vma);
>  
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index 94bc2e1898a4..a3a89aadeccb 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
>  	if (err)
>  		goto err;
>  
> -	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		err = PTR_ERR(vma);
>  		goto err;
> @@ -555,7 +555,8 @@ static int live_empty_request(void *arg)
>  static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
>  {
>  	struct i915_gem_context *ctx = i915->kernel_context;
> -	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +	struct i915_address_space *vm =
> +		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
>  	struct drm_i915_gem_object *obj;
>  	const int gen = INTEL_GEN(i915);
>  	struct i915_vma *vma;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
> index e90f97236e50..8400a8cc5cf2 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_vma.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
> @@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
>  {
>  	bool ok = true;
>  
> -	if (vma->vm != &ctx->ppgtt->base) {
> +	if (vma->vm != &ctx->ppgtt->vm) {
>  		pr_err("VMA created with wrong VM\n");
>  		ok = false;
>  	}
> @@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
>  	list_for_each_entry(obj, objects, st_link) {
>  		for (pinned = 0; pinned <= 1; pinned++) {
>  			list_for_each_entry(ctx, contexts, link) {
> -				struct i915_address_space *vm =
> -					&ctx->ppgtt->base;
> +				struct i915_address_space *vm = &ctx->ppgtt->vm;
>  				struct i915_vma *vma;
>  				int err;
>  
> @@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
>  		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
>  		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
>  		VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
> -		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
> +		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
>  
>  		VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
>  		INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
> -		VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
> -		INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
> +		VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
> +		INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
>  		INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
>  
>  		VALID(4096, PIN_GLOBAL),
> @@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
>  		VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
>  		VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
>  		NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
> -		VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
> -		VALID(i915->ggtt.base.total, PIN_GLOBAL),
> -		NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
> +		VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
> +		VALID(i915->ggtt.vm.total, PIN_GLOBAL),
> +		NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
>  		NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
>  		INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
> -		INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
> +		INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
>  		INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
>  
>  		VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
> @@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
>  		 * variable start, end and size.
>  		 */
>  		NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
> -		NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
> +		NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
>  		NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
> -		NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
> +		NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
>  #endif
>  		{ },
>  #undef NOSPACE
> @@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
>  	 * focusing on error handling of boundary conditions.
>  	 */
>  
> -	GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
> +	GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
>  
>  	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
>  	if (IS_ERR(obj))
>  		return PTR_ERR(obj);
>  
> -	vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
> +	vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma))
>  		goto out;
>  
> @@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
>  static int igt_vma_rotate(void *arg)
>  {
>  	struct drm_i915_private *i915 = arg;
> -	struct i915_address_space *vm = &i915->ggtt.base;
> +	struct i915_address_space *vm = &i915->ggtt.vm;
>  	struct drm_i915_gem_object *obj;
>  	const struct intel_rotation_plane_info planes[] = {
>  		{ .width = 1, .height = 1, .stride = 1 },
> @@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
>  static int igt_vma_partial(void *arg)
>  {
>  	struct drm_i915_private *i915 = arg;
> -	struct i915_address_space *vm = &i915->ggtt.base;
> +	struct i915_address_space *vm = &i915->ggtt.vm;
>  	const unsigned int npages = 1021; /* prime! */
>  	struct drm_i915_gem_object *obj;
>  	const struct phase {
> diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> index 2091e3a6a5be..390a157b37c3 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> @@ -107,8 +107,8 @@ static int emit_recurse_batch(struct hang *h,
>  	struct drm_i915_private *i915 = h->i915;
>  	struct i915_address_space *vm =
>  		rq->gem_context->ppgtt ?
> -		&rq->gem_context->ppgtt->base :
> -		&i915->ggtt.base;
> +		&rq->gem_context->ppgtt->vm :
> +		&i915->ggtt.vm;
>  	struct i915_vma *hws, *vma;
>  	unsigned int flags;
>  	u32 *batch;
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index 68cb9126b3e1..0b6da08c8cae 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
>  			      struct i915_request *rq,
>  			      u32 arbitration_command)
>  {
> -	struct i915_address_space *vm = &rq->gem_context->ppgtt->base;
> +	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
>  	struct i915_vma *hws, *vma;
>  	u32 *batch;
>  	int err;
> diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> index 17444a3abbb9..f1cfb0fb6bea 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> @@ -33,7 +33,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
>  	memset(cs, 0xc5, PAGE_SIZE);
>  	i915_gem_object_unpin_map(result);
>  
> -	vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
> +	vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
>  	if (IS_ERR(vma)) {
>  		err = PTR_ERR(vma);
>  		goto err_obj;
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
> index 36c112088940..556c546f2715 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
> @@ -66,25 +66,25 @@ mock_ppgtt(struct drm_i915_private *i915,
>  		return NULL;
>  
>  	kref_init(&ppgtt->ref);
> -	ppgtt->base.i915 = i915;
> -	ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
> -	ppgtt->base.file = ERR_PTR(-ENODEV);
> -
> -	INIT_LIST_HEAD(&ppgtt->base.active_list);
> -	INIT_LIST_HEAD(&ppgtt->base.inactive_list);
> -	INIT_LIST_HEAD(&ppgtt->base.unbound_list);
> -
> -	INIT_LIST_HEAD(&ppgtt->base.global_link);
> -	drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
> -
> -	ppgtt->base.clear_range = nop_clear_range;
> -	ppgtt->base.insert_page = mock_insert_page;
> -	ppgtt->base.insert_entries = mock_insert_entries;
> -	ppgtt->base.bind_vma = mock_bind_ppgtt;
> -	ppgtt->base.unbind_vma = mock_unbind_ppgtt;
> -	ppgtt->base.set_pages = ppgtt_set_pages;
> -	ppgtt->base.clear_pages = clear_pages;
> -	ppgtt->base.cleanup = mock_cleanup;
> +	ppgtt->vm.i915 = i915;
> +	ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
> +	ppgtt->vm.file = ERR_PTR(-ENODEV);
> +
> +	INIT_LIST_HEAD(&ppgtt->vm.active_list);
> +	INIT_LIST_HEAD(&ppgtt->vm.inactive_list);
> +	INIT_LIST_HEAD(&ppgtt->vm.unbound_list);
> +
> +	INIT_LIST_HEAD(&ppgtt->vm.global_link);
> +	drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total);
> +
> +	ppgtt->vm.clear_range = nop_clear_range;
> +	ppgtt->vm.insert_page = mock_insert_page;
> +	ppgtt->vm.insert_entries = mock_insert_entries;
> +	ppgtt->vm.bind_vma = mock_bind_ppgtt;
> +	ppgtt->vm.unbind_vma = mock_unbind_ppgtt;
> +	ppgtt->vm.set_pages = ppgtt_set_pages;
> +	ppgtt->vm.clear_pages = clear_pages;
> +	ppgtt->vm.cleanup = mock_cleanup;
>  
>  	return ppgtt;
>  }
> @@ -107,27 +107,27 @@ void mock_init_ggtt(struct drm_i915_private *i915)
>  
>  	INIT_LIST_HEAD(&i915->vm_list);
>  
> -	ggtt->base.i915 = i915;
> +	ggtt->vm.i915 = i915;
>  
>  	ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
>  	ggtt->mappable_end = resource_size(&ggtt->gmadr);
> -	ggtt->base.total = 4096 * PAGE_SIZE;
> -
> -	ggtt->base.clear_range = nop_clear_range;
> -	ggtt->base.insert_page = mock_insert_page;
> -	ggtt->base.insert_entries = mock_insert_entries;
> -	ggtt->base.bind_vma = mock_bind_ggtt;
> -	ggtt->base.unbind_vma = mock_unbind_ggtt;
> -	ggtt->base.set_pages = ggtt_set_pages;
> -	ggtt->base.clear_pages = clear_pages;
> -	ggtt->base.cleanup = mock_cleanup;
> -
> -	i915_address_space_init(&ggtt->base, i915, "global");
> +	ggtt->vm.total = 4096 * PAGE_SIZE;
> +
> +	ggtt->vm.clear_range = nop_clear_range;
> +	ggtt->vm.insert_page = mock_insert_page;
> +	ggtt->vm.insert_entries = mock_insert_entries;
> +	ggtt->vm.bind_vma = mock_bind_ggtt;
> +	ggtt->vm.unbind_vma = mock_unbind_ggtt;
> +	ggtt->vm.set_pages = ggtt_set_pages;
> +	ggtt->vm.clear_pages = clear_pages;
> +	ggtt->vm.cleanup = mock_cleanup;
> +
> +	i915_address_space_init(&ggtt->vm, i915, "global");
>  }
>  
>  void mock_fini_ggtt(struct drm_i915_private *i915)
>  {
>  	struct i915_ggtt *ggtt = &i915->ggtt;
>  
> -	i915_address_space_fini(&ggtt->base);
> +	i915_address_space_fini(&ggtt->vm);
>  }
> -- 
> 2.17.1
Chris Wilson June 5, 2018, 2:41 p.m. UTC | #2
Quoting Mika Kuoppala (2018-06-05 15:38:09)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > In the near future, I want to subclass gen6_hw_ppgtt as it contains a
> > few specialised members and I wish to add more. To avoid the ugliness of
> > using ppgtt->base.base, rename the i915_hw_ppgtt base member
> > (i915_address_space) as vm, which is our common shorthand for an
> > i915_address_space local.
> >
> 
> Strolled it through. Couple of formatting fixes and
> getting dev_priv through vm. Didn't notice anything
> out of ordinary.
> 
> For me it reads better now, and we should have done this
> prior to doing 32bit ppgtts :P

The nuisance one for me is ggtt->vm.mm, I think we could do with calling
the drm_mm mgr, i.e. ggtt->vm.mgr. We may also want to rename vma as
iova to follow other drivers.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7c9ec4f4f36c..380eeb2a0e83 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,7 +61,7 @@  static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 	}
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+	ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
 				  size, I915_GTT_PAGE_SIZE,
 				  I915_COLOR_UNEVICTABLE,
 				  start, end, flags);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 05d15a095310..2ff0d40281a9 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -361,9 +361,9 @@  int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
 
-#define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.vm.total)
 #define gvt_ggtt_sz(gvt) \
-	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+	((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
 #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
 
 #define gvt_aperture_gmadr_base(gvt) (0)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 15e86d34a81c..698af45e229c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -328,7 +328,7 @@  static int per_file_stats(int id, void *ptr, void *data)
 		} else {
 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 
-			if (ppgtt->base.file != stats->file_priv)
+			if (ppgtt->vm.file != stats->file_priv)
 				continue;
 		}
 
@@ -508,7 +508,7 @@  static int i915_gem_object_info(struct seq_file *m, void *data)
 		   dpy_count, dpy_size);
 
 	seq_printf(m, "%llu [%pa] gtt total\n",
-		   ggtt->base.total, &ggtt->mappable_end);
+		   ggtt->vm.total, &ggtt->mappable_end);
 	seq_printf(m, "Supported page sizes: %s\n",
 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
 					buf, sizeof(buf)));
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06ecac4c3253..a4bb30c32a52 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3213,7 +3213,7 @@  struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
-	return container_of(vm, struct i915_hw_ppgtt, base);
+	return container_of(vm, struct i915_hw_ppgtt, vm);
 }
 
 /* i915_gem_fence_reg.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cb680ddafa0c..6ce29d1c20be 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -65,7 +65,7 @@  insert_mappable_node(struct i915_ggtt *ggtt,
                      struct drm_mm_node *node, u32 size)
 {
 	memset(node, 0, sizeof(*node));
-	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
 					   size, 0, I915_COLOR_UNEVICTABLE,
 					   0, ggtt->mappable_end,
 					   DRM_MM_INSERT_LOW);
@@ -249,17 +249,17 @@  i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	struct i915_vma *vma;
 	u64 pinned;
 
-	pinned = ggtt->base.reserved;
+	pinned = ggtt->vm.reserved;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+	list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
 		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
-	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+	list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
 		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
-	args->aper_size = ggtt->base.total;
+	args->aper_size = ggtt->vm.total;
 	args->aper_available_size = args->aper_size - pinned;
 
 	return 0;
@@ -1223,9 +1223,9 @@  i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 		page_length = remain < page_length ? remain : page_length;
 		if (node.allocated) {
 			wmb();
-			ggtt->base.insert_page(&ggtt->base,
-					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-					       node.start, I915_CACHE_NONE, 0);
+			ggtt->vm.insert_page(&ggtt->vm,
+					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+					     node.start, I915_CACHE_NONE, 0);
 			wmb();
 		} else {
 			page_base += offset & PAGE_MASK;
@@ -1246,8 +1246,7 @@  i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 out_unpin:
 	if (node.allocated) {
 		wmb();
-		ggtt->base.clear_range(&ggtt->base,
-				       node.start, node.size);
+		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
 		remove_mappable_node(&node);
 	} else {
 		i915_vma_unpin(vma);
@@ -1426,9 +1425,9 @@  i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		page_length = remain < page_length ? remain : page_length;
 		if (node.allocated) {
 			wmb(); /* flush the write before we modify the GGTT */
-			ggtt->base.insert_page(&ggtt->base,
-					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-					       node.start, I915_CACHE_NONE, 0);
+			ggtt->vm.insert_page(&ggtt->vm,
+					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+					     node.start, I915_CACHE_NONE, 0);
 			wmb(); /* flush modifications to the GGTT (insert_page) */
 		} else {
 			page_base += offset & PAGE_MASK;
@@ -1455,8 +1454,7 @@  i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_unpin:
 	if (node.allocated) {
 		wmb();
-		ggtt->base.clear_range(&ggtt->base,
-				       node.start, node.size);
+		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
 		remove_mappable_node(&node);
 	} else {
 		i915_vma_unpin(vma);
@@ -4374,7 +4372,7 @@  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_address_space *vm = &dev_priv->ggtt.base;
+	struct i915_address_space *vm = &dev_priv->ggtt.vm;
 	struct i915_vma *vma;
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 38c6e9e4e91b..b2c7ac1b074d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -197,7 +197,7 @@  static void context_close(struct i915_gem_context *ctx)
 	 */
 	lut_close(ctx);
 	if (ctx->ppgtt)
-		i915_ppgtt_close(&ctx->ppgtt->base);
+		i915_ppgtt_close(&ctx->ppgtt->vm);
 
 	ctx->file_priv = ERR_PTR(-EBADF);
 	i915_gem_context_put(ctx);
@@ -249,7 +249,7 @@  static u32 default_desc_template(const struct drm_i915_private *i915,
 	desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
 
 	address_mode = INTEL_LEGACY_32B_CONTEXT;
-	if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+	if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
 		address_mode = INTEL_LEGACY_64B_CONTEXT;
 	desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
@@ -810,11 +810,11 @@  int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		break;
 	case I915_CONTEXT_PARAM_GTT_SIZE:
 		if (ctx->ppgtt)
-			args->value = ctx->ppgtt->base.total;
+			args->value = ctx->ppgtt->vm.total;
 		else if (to_i915(dev)->mm.aliasing_ppgtt)
-			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+			args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
 		else
-			args->value = to_i915(dev)->ggtt.base.total;
+			args->value = to_i915(dev)->ggtt.vm.total;
 		break;
 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 		args->value = i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f627a8c47c58..eefd449502e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -703,7 +703,7 @@  static int eb_select_context(struct i915_execbuffer *eb)
 		return -ENOENT;
 
 	eb->ctx = ctx;
-	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+	eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
 
 	eb->context_flags = 0;
 	if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -943,9 +943,9 @@  static void reloc_cache_reset(struct reloc_cache *cache)
 		if (cache->node.allocated) {
 			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
-			ggtt->base.clear_range(&ggtt->base,
-					       cache->node.start,
-					       cache->node.size);
+			ggtt->vm.clear_range(&ggtt->vm,
+					     cache->node.start,
+					     cache->node.size);
 			drm_mm_remove_node(&cache->node);
 		} else {
 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1016,7 +1016,7 @@  static void *reloc_iomap(struct drm_i915_gem_object *obj,
 		if (IS_ERR(vma)) {
 			memset(&cache->node, 0, sizeof(cache->node));
 			err = drm_mm_insert_node_in_range
-				(&ggtt->base.mm, &cache->node,
+				(&ggtt->vm.mm, &cache->node,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
 				 DRM_MM_INSERT_LOW);
@@ -1037,9 +1037,9 @@  static void *reloc_iomap(struct drm_i915_gem_object *obj,
 	offset = cache->node.start;
 	if (cache->node.allocated) {
 		wmb();
-		ggtt->base.insert_page(&ggtt->base,
-				       i915_gem_object_get_dma_address(obj, page),
-				       offset, I915_CACHE_NONE, 0);
+		ggtt->vm.insert_page(&ggtt->vm,
+				     i915_gem_object_get_dma_address(obj, page),
+				     offset, I915_CACHE_NONE, 0);
 	} else {
 		offset += page << PAGE_SHIFT;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index de9180516308..12b1386e47e9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -780,7 +780,7 @@  static void gen8_initialize_pml4(struct i915_address_space *vm,
  */
 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 {
-	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
 }
 
 /* Removes entries from a single page table, releasing it if it's empty.
@@ -973,7 +973,7 @@  gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 	gen8_pte_t *vaddr;
 	bool ret;
 
-	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
 	pd = pdp->page_directory[idx->pdpe];
 	vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
 	do {
@@ -1004,7 +1004,7 @@  gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 					break;
 				}
 
-				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
 				pd = pdp->page_directory[idx->pdpe];
 			}
 
@@ -1233,7 +1233,7 @@  static int gen8_init_scratch(struct i915_address_space *vm)
 
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 {
-	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_address_space *vm = &ppgtt->vm;
 	struct drm_i915_private *dev_priv = vm->i915;
 	enum vgt_g2v_type msg;
 	int i;
@@ -1294,13 +1294,13 @@  static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 	int i;
 
 	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
-		if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+		if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
 			continue;
 
-		gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
 	}
 
-	cleanup_px(&ppgtt->base, &ppgtt->pml4);
+	cleanup_px(&ppgtt->vm, &ppgtt->pml4);
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1314,7 +1314,7 @@  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 	if (use_4lvl(vm))
 		gen8_ppgtt_cleanup_4lvl(ppgtt);
 	else
-		gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
 
 	gen8_free_scratch(vm);
 }
@@ -1450,7 +1450,7 @@  static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 			  gen8_pte_t scratch_pte,
 			  struct seq_file *m)
 {
-	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_address_space *vm = &ppgtt->vm;
 	struct i915_page_directory *pd;
 	u32 pdpe;
 
@@ -1460,7 +1460,7 @@  static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 		u64 pd_start = start;
 		u32 pde;
 
-		if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+		if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
 			continue;
 
 		seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1468,7 +1468,7 @@  static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 			u32 pte;
 			gen8_pte_t *pt_vaddr;
 
-			if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+			if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
 				continue;
 
 			pt_vaddr = kmap_atomic_px(pt);
@@ -1501,10 +1501,10 @@  static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
-	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_address_space *vm = &ppgtt->vm;
 	const gen8_pte_t scratch_pte =
 		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
-	u64 start = 0, length = ppgtt->base.total;
+	u64 start = 0, length = ppgtt->vm.total;
 
 	if (use_4lvl(vm)) {
 		u64 pml4e;
@@ -1512,7 +1512,7 @@  static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 		struct i915_page_directory_pointer *pdp;
 
 		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-			if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+			if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
 				continue;
 
 			seq_printf(m, "    PML4E #%llu\n", pml4e);
@@ -1525,10 +1525,10 @@  static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 
 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 {
-	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_address_space *vm = &ppgtt->vm;
 	struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
 	struct i915_page_directory *pd;
-	u64 start = 0, length = ppgtt->base.total;
+	u64 start = 0, length = ppgtt->vm.total;
 	u64 from = start;
 	unsigned int pdpe;
 
@@ -1564,11 +1564,11 @@  static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_address_space *vm = &ppgtt->vm;
 	struct drm_i915_private *dev_priv = vm->i915;
 	int ret;
 
-	ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+	ppgtt->vm.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
 		1ULL << 48 :
 		1ULL << 32;
 
@@ -1576,26 +1576,26 @@  static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	 * And we are not sure about the latter so play safe for now.
 	 */
 	if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
-		ppgtt->base.pt_kmap_wc = true;
+		ppgtt->vm.pt_kmap_wc = true;
 
-	ret = gen8_init_scratch(&ppgtt->base);
+	ret = gen8_init_scratch(&ppgtt->vm);
 	if (ret) {
-		ppgtt->base.total = 0;
+		ppgtt->vm.total = 0;
 		return ret;
 	}
 
 	if (use_4lvl(vm)) {
-		ret = setup_px(&ppgtt->base, &ppgtt->pml4);
+		ret = setup_px(&ppgtt->vm, &ppgtt->pml4);
 		if (ret)
 			goto free_scratch;
 
-		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+		gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
 
-		ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
-		ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
-		ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+		ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+		ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
 	} else {
-		ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
+		ret = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
 		if (ret)
 			goto free_scratch;
 
@@ -1607,35 +1607,35 @@  static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 			}
 		}
 
-		ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
-		ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
-		ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+		ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+		ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
 	}
 
 	if (intel_vgpu_active(dev_priv))
 		gen8_ppgtt_notify_vgt(ppgtt, true);
 
-	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
-	ppgtt->base.bind_vma = gen8_ppgtt_bind_vma;
-	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
-	ppgtt->base.set_pages = ppgtt_set_pages;
-	ppgtt->base.clear_pages = clear_pages;
+	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+	ppgtt->vm.bind_vma = gen8_ppgtt_bind_vma;
+	ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
+	ppgtt->vm.set_pages = ppgtt_set_pages;
+	ppgtt->vm.clear_pages = clear_pages;
 	ppgtt->debug_dump = gen8_dump_ppgtt;
 
 	return 0;
 
 free_scratch:
-	gen8_free_scratch(&ppgtt->base);
+	gen8_free_scratch(&ppgtt->vm);
 	return ret;
 }
 
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
-	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_address_space *vm = &ppgtt->vm;
 	struct i915_page_table *unused;
 	gen6_pte_t scratch_pte;
 	u32 pd_entry, pte, pde;
-	u32 start = 0, length = ppgtt->base.total;
+	u32 start = 0, length = ppgtt->vm.total;
 
 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, 0);
@@ -1972,8 +1972,8 @@  static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
-	struct i915_address_space *vm = &ppgtt->base;
-	struct drm_i915_private *dev_priv = ppgtt->base.i915;
+	struct i915_address_space *vm = &ppgtt->vm;
+	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
@@ -1981,16 +1981,16 @@  static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 	 * allocator works in address space sizes, so it's multiplied by page
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
-	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+	BUG_ON(!drm_mm_initialized(&ggtt->vm.mm));
 
 	ret = gen6_init_scratch(vm);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
+	ret = i915_gem_gtt_insert(&ggtt->vm, &ppgtt->node,
 				  GEN6_PD_SIZE, GEN6_PD_ALIGN,
 				  I915_COLOR_UNEVICTABLE,
-				  0, ggtt->base.total,
+				  0, ggtt->vm.total,
 				  PIN_HIGH);
 	if (ret)
 		goto err_out;
@@ -2023,16 +2023,16 @@  static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
 	u32 pde;
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
-		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+		ppgtt->pd.page_table[pde] = ppgtt->vm.scratch_pt;
 }
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-	struct drm_i915_private *dev_priv = ppgtt->base.i915;
+	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
-	ppgtt->base.pte_encode = ggtt->base.pte_encode;
+	ppgtt->vm.pte_encode = ggtt->vm.pte_encode;
 	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
 		ppgtt->switch_mm = gen6_mm_switch;
 	else if (IS_HASWELL(dev_priv))
@@ -2046,24 +2046,24 @@  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	if (ret)
 		return ret;
 
-	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+	ppgtt->vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
 
-	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
-	gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+	gen6_scratch_va_range(ppgtt, 0, ppgtt->vm.total);
+	gen6_write_page_range(ppgtt, 0, ppgtt->vm.total);
 
-	ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
+	ret = gen6_alloc_va_range(&ppgtt->vm, 0, ppgtt->vm.total);
 	if (ret) {
-		gen6_ppgtt_cleanup(&ppgtt->base);
+		gen6_ppgtt_cleanup(&ppgtt->vm);
 		return ret;
 	}
 
-	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
-	ppgtt->base.bind_vma = gen6_ppgtt_bind_vma;
-	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
-	ppgtt->base.set_pages = ppgtt_set_pages;
-	ppgtt->base.clear_pages = clear_pages;
-	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+	ppgtt->vm.clear_range = gen6_ppgtt_clear_range;
+	ppgtt->vm.insert_entries = gen6_ppgtt_insert_entries;
+	ppgtt->vm.bind_vma = gen6_ppgtt_bind_vma;
+	ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
+	ppgtt->vm.set_pages = ppgtt_set_pages;
+	ppgtt->vm.clear_pages = clear_pages;
+	ppgtt->vm.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->debug_dump = gen6_dump_ppgtt;
 
 	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
@@ -2079,8 +2079,8 @@  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
 			   struct drm_i915_private *dev_priv)
 {
-	ppgtt->base.i915 = dev_priv;
-	ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+	ppgtt->vm.i915 = dev_priv;
+	ppgtt->vm.dma = &dev_priv->drm.pdev->dev;
 
 	if (INTEL_GEN(dev_priv) < 8)
 		return gen6_ppgtt_init(ppgtt);
@@ -2190,10 +2190,10 @@  i915_ppgtt_create(struct drm_i915_private *dev_priv,
 	}
 
 	kref_init(&ppgtt->ref);
-	i915_address_space_init(&ppgtt->base, dev_priv, name);
-	ppgtt->base.file = fpriv;
+	i915_address_space_init(&ppgtt->vm, dev_priv, name);
+	ppgtt->vm.file = fpriv;
 
-	trace_i915_ppgtt_create(&ppgtt->base);
+	trace_i915_ppgtt_create(&ppgtt->vm);
 
 	return ppgtt;
 }
@@ -2227,16 +2227,16 @@  void i915_ppgtt_release(struct kref *kref)
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(kref, struct i915_hw_ppgtt, ref);
 
-	trace_i915_ppgtt_release(&ppgtt->base);
+	trace_i915_ppgtt_release(&ppgtt->vm);
 
-	ppgtt_destroy_vma(&ppgtt->base);
+	ppgtt_destroy_vma(&ppgtt->vm);
 
-	GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
-	GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
-	GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+	GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+	GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+	GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
 
-	ppgtt->base.cleanup(&ppgtt->base);
-	i915_address_space_fini(&ppgtt->base);
+	ppgtt->vm.cleanup(&ppgtt->vm);
+	i915_address_space_fini(&ppgtt->vm);
 	kfree(ppgtt);
 }
 
@@ -2332,7 +2332,7 @@  void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
 
 	i915_check_and_clear_faults(dev_priv);
 
-	ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
 	i915_ggtt_invalidate(dev_priv);
 }
@@ -2675,16 +2675,16 @@  static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 
 		if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
-		    appgtt->base.allocate_va_range) {
-			ret = appgtt->base.allocate_va_range(&appgtt->base,
-							     vma->node.start,
-							     vma->size);
+		    appgtt->vm.allocate_va_range) {
+			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+							   vma->node.start,
+							   vma->size);
 			if (ret)
 				return ret;
 		}
 
-		appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
-					    pte_flags);
+		appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+					  pte_flags);
 	}
 
 	if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2707,7 +2707,7 @@  static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
 	}
 
 	if (vma->flags & I915_VMA_LOCAL_BIND) {
-		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
 
 		vm->clear_range(vm, vma->node.start, vma->size);
 	}
@@ -2774,30 +2774,30 @@  int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
 	if (IS_ERR(ppgtt))
 		return PTR_ERR(ppgtt);
 
-	if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+	if (WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
 		err = -ENODEV;
 		goto err_ppgtt;
 	}
 
-	if (ppgtt->base.allocate_va_range) {
+	if (ppgtt->vm.allocate_va_range) {
 		/* Note we only pre-allocate as far as the end of the global
 		 * GTT. On 48b / 4-level page-tables, the difference is very,
 		 * very significant! We have to preallocate as GVT/vgpu does
 		 * not like the page directory disappearing.
 		 */
-		err = ppgtt->base.allocate_va_range(&ppgtt->base,
-						    0, ggtt->base.total);
+		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+						  0, ggtt->vm.total);
 		if (err)
 			goto err_ppgtt;
 	}
 
 	i915->mm.aliasing_ppgtt = ppgtt;
 
-	GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
-	ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+	GEM_BUG_ON(ggtt->vm.bind_vma != ggtt_bind_vma);
+	ggtt->vm.bind_vma = aliasing_gtt_bind_vma;
 
-	GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
-	ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+	GEM_BUG_ON(ggtt->vm.unbind_vma != ggtt_unbind_vma);
+	ggtt->vm.unbind_vma = aliasing_gtt_unbind_vma;
 
 	return 0;
 
@@ -2817,8 +2817,8 @@  void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
 
 	i915_ppgtt_put(ppgtt);
 
-	ggtt->base.bind_vma = ggtt_bind_vma;
-	ggtt->base.unbind_vma = ggtt_unbind_vma;
+	ggtt->vm.bind_vma = ggtt_bind_vma;
+	ggtt->vm.unbind_vma = ggtt_unbind_vma;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2842,7 +2842,7 @@  int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 		return ret;
 
 	/* Reserve a mappable slot for our lockless error capture */
-	ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
 					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 					  0, ggtt->mappable_end,
 					  DRM_MM_INSERT_LOW);
@@ -2850,16 +2850,15 @@  int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 		return ret;
 
 	/* Clear any non-preallocated blocks */
-	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
 			      hole_start, hole_end);
-		ggtt->base.clear_range(&ggtt->base, hole_start,
-				       hole_end - hole_start);
+		ggtt->vm.clear_range(&ggtt->vm, hole_start,
+				     hole_end - hole_start);
 	}
 
 	/* And finally clear the reserved guard page */
-	ggtt->base.clear_range(&ggtt->base,
-			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 
 	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
 		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2884,11 +2883,11 @@  void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 	struct i915_vma *vma, *vn;
 	struct pagevec *pvec;
 
-	ggtt->base.closed = true;
+	ggtt->vm.closed = true;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
-	list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
+	GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+	list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
 		WARN_ON(i915_vma_unbind(vma));
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
@@ -2900,12 +2899,12 @@  void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 	if (drm_mm_node_allocated(&ggtt->error_capture))
 		drm_mm_remove_node(&ggtt->error_capture);
 
-	if (drm_mm_initialized(&ggtt->base.mm)) {
+	if (drm_mm_initialized(&ggtt->vm.mm)) {
 		intel_vgt_deballoon(dev_priv);
-		i915_address_space_fini(&ggtt->base);
+		i915_address_space_fini(&ggtt->vm);
 	}
 
-	ggtt->base.cleanup(&ggtt->base);
+	ggtt->vm.cleanup(&ggtt->vm);
 
 	pvec = &dev_priv->mm.wc_stash;
 	if (pvec->nr) {
@@ -2955,7 +2954,7 @@  static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
 
 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
-	struct drm_i915_private *dev_priv = ggtt->base.i915;
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	phys_addr_t phys_addr;
 	int ret;
@@ -2979,7 +2978,7 @@  static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 		return -ENOMEM;
 	}
 
-	ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
 	if (ret) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
@@ -3285,7 +3284,7 @@  static void setup_private_pat(struct drm_i915_private *dev_priv)
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *dev_priv = ggtt->base.i915;
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	unsigned int size;
 	u16 snb_gmch_ctl;
@@ -3309,25 +3308,25 @@  static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	else
 		size = gen8_get_total_gtt_size(snb_gmch_ctl);
 
-	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
-	ggtt->base.cleanup = gen6_gmch_remove;
-	ggtt->base.bind_vma = ggtt_bind_vma;
-	ggtt->base.unbind_vma = ggtt_unbind_vma;
-	ggtt->base.set_pages = ggtt_set_pages;
-	ggtt->base.clear_pages = clear_pages;
-	ggtt->base.insert_page = gen8_ggtt_insert_page;
-	ggtt->base.clear_range = nop_clear_range;
+	ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+	ggtt->vm.cleanup = gen6_gmch_remove;
+	ggtt->vm.bind_vma = ggtt_bind_vma;
+	ggtt->vm.unbind_vma = ggtt_unbind_vma;
+	ggtt->vm.set_pages = ggtt_set_pages;
+	ggtt->vm.clear_pages = clear_pages;
+	ggtt->vm.insert_page = gen8_ggtt_insert_page;
+	ggtt->vm.clear_range = nop_clear_range;
 	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
-		ggtt->base.clear_range = gen8_ggtt_clear_range;
+		ggtt->vm.clear_range = gen8_ggtt_clear_range;
 
-	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
 
 	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
 	if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
-		ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
-		ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
-		if (ggtt->base.clear_range != nop_clear_range)
-			ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+		if (ggtt->vm.clear_range != nop_clear_range)
+			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
 	}
 
 	ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3339,7 +3338,7 @@  static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *dev_priv = ggtt->base.i915;
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	unsigned int size;
 	u16 snb_gmch_ctl;
@@ -3366,29 +3365,29 @@  static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
 	size = gen6_get_total_gtt_size(snb_gmch_ctl);
-	ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+	ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
-	ggtt->base.clear_range = gen6_ggtt_clear_range;
-	ggtt->base.insert_page = gen6_ggtt_insert_page;
-	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
-	ggtt->base.bind_vma = ggtt_bind_vma;
-	ggtt->base.unbind_vma = ggtt_unbind_vma;
-	ggtt->base.set_pages = ggtt_set_pages;
-	ggtt->base.clear_pages = clear_pages;
-	ggtt->base.cleanup = gen6_gmch_remove;
+	ggtt->vm.clear_range = gen6_ggtt_clear_range;
+	ggtt->vm.insert_page = gen6_ggtt_insert_page;
+	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+	ggtt->vm.bind_vma = ggtt_bind_vma;
+	ggtt->vm.unbind_vma = ggtt_unbind_vma;
+	ggtt->vm.set_pages = ggtt_set_pages;
+	ggtt->vm.clear_pages = clear_pages;
+	ggtt->vm.cleanup = gen6_gmch_remove;
 
 	ggtt->invalidate = gen6_ggtt_invalidate;
 
 	if (HAS_EDRAM(dev_priv))
-		ggtt->base.pte_encode = iris_pte_encode;
+		ggtt->vm.pte_encode = iris_pte_encode;
 	else if (IS_HASWELL(dev_priv))
-		ggtt->base.pte_encode = hsw_pte_encode;
+		ggtt->vm.pte_encode = hsw_pte_encode;
 	else if (IS_VALLEYVIEW(dev_priv))
-		ggtt->base.pte_encode = byt_pte_encode;
+		ggtt->vm.pte_encode = byt_pte_encode;
 	else if (INTEL_GEN(dev_priv) >= 7)
-		ggtt->base.pte_encode = ivb_pte_encode;
+		ggtt->vm.pte_encode = ivb_pte_encode;
 	else
-		ggtt->base.pte_encode = snb_pte_encode;
+		ggtt->vm.pte_encode = snb_pte_encode;
 
 	return ggtt_probe_common(ggtt, size);
 }
@@ -3400,7 +3399,7 @@  static void i915_gmch_remove(struct i915_address_space *vm)
 
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *dev_priv = ggtt->base.i915;
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	phys_addr_t gmadr_base;
 	int ret;
 
@@ -3410,23 +3409,21 @@  static int i915_gmch_probe(struct i915_ggtt *ggtt)
 		return -EIO;
 	}
 
-	intel_gtt_get(&ggtt->base.total,
-		      &gmadr_base,
-		      &ggtt->mappable_end);
+	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
 
 	ggtt->gmadr =
 		(struct resource) DEFINE_RES_MEM(gmadr_base,
 						 ggtt->mappable_end);
 
 	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
-	ggtt->base.insert_page = i915_ggtt_insert_page;
-	ggtt->base.insert_entries = i915_ggtt_insert_entries;
-	ggtt->base.clear_range = i915_ggtt_clear_range;
-	ggtt->base.bind_vma = ggtt_bind_vma;
-	ggtt->base.unbind_vma = ggtt_unbind_vma;
-	ggtt->base.set_pages = ggtt_set_pages;
-	ggtt->base.clear_pages = clear_pages;
-	ggtt->base.cleanup = i915_gmch_remove;
+	ggtt->vm.insert_page = i915_ggtt_insert_page;
+	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+	ggtt->vm.clear_range = i915_ggtt_clear_range;
+	ggtt->vm.bind_vma = ggtt_bind_vma;
+	ggtt->vm.unbind_vma = ggtt_unbind_vma;
+	ggtt->vm.set_pages = ggtt_set_pages;
+	ggtt->vm.clear_pages = clear_pages;
+	ggtt->vm.cleanup = i915_gmch_remove;
 
 	ggtt->invalidate = gmch_ggtt_invalidate;
 
@@ -3445,8 +3442,8 @@  int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
-	ggtt->base.i915 = dev_priv;
-	ggtt->base.dma = &dev_priv->drm.pdev->dev;
+	ggtt->vm.i915 = dev_priv;
+	ggtt->vm.dma = &dev_priv->drm.pdev->dev;
 
 	if (INTEL_GEN(dev_priv) <= 5)
 		ret = i915_gmch_probe(ggtt);
@@ -3463,27 +3460,29 @@  int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
 	 * restriction!
 	 */
 	if (USES_GUC(dev_priv)) {
-		ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
-		ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+		ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+		ggtt->mappable_end =
+			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
 	}
 
-	if ((ggtt->base.total - 1) >> 32) {
+	if ((ggtt->vm.total - 1) >> 32) {
 		DRM_ERROR("We never expected a Global GTT with more than 32bits"
 			  " of address space! Found %lldM!\n",
-			  ggtt->base.total >> 20);
-		ggtt->base.total = 1ULL << 32;
-		ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+			  ggtt->vm.total >> 20);
+		ggtt->vm.total = 1ULL << 32;
+		ggtt->mappable_end =
+			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
 	}
 
-	if (ggtt->mappable_end > ggtt->base.total) {
+	if (ggtt->mappable_end > ggtt->vm.total) {
 		DRM_ERROR("mappable aperture extends past end of GGTT,"
 			  " aperture=%pa, total=%llx\n",
-			  &ggtt->mappable_end, ggtt->base.total);
-		ggtt->mappable_end = ggtt->base.total;
+			  &ggtt->mappable_end, ggtt->vm.total);
+		ggtt->mappable_end = ggtt->vm.total;
 	}
 
 	/* GMADR is the PCI mmio aperture into the global GTT. */
-	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
 	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
 	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
 			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3510,9 +3509,9 @@  int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	 * and beyond the end of the GTT if we do not provide a guard.
 	 */
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+	i915_address_space_init(&ggtt->vm, dev_priv, "[global]");
 	if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
-		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3535,7 +3534,7 @@  int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	return 0;
 
 out_gtt_cleanup:
-	ggtt->base.cleanup(&ggtt->base);
+	ggtt->vm.cleanup(&ggtt->vm);
 	return ret;
 }
 
@@ -3574,9 +3573,9 @@  void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 	i915_check_and_clear_faults(dev_priv);
 
 	/* First fill our portion of the GTT with scratch pages */
-	ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
-	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
 
 	/* clflush objects bound into the GGTT and rebind them. */
 	list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
@@ -3596,7 +3595,7 @@  void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 	}
 
-	ggtt->base.closed = false;
+	ggtt->vm.closed = false;
 
 	if (INTEL_GEN(dev_priv) >= 8) {
 		struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3619,7 +3618,7 @@  void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 			if (!ppgtt)
 				continue;
 
-			gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+			gen6_write_page_range(ppgtt, 0, ppgtt->vm.total);
 		}
 	}
 
@@ -3841,7 +3840,7 @@  int i915_gem_gtt_reserve(struct i915_address_space *vm,
 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
 	GEM_BUG_ON(range_overflows(offset, size, vm->total));
-	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
 	GEM_BUG_ON(drm_mm_node_allocated(node));
 
 	node->size = size;
@@ -3938,7 +3937,7 @@  int i915_gem_gtt_insert(struct i915_address_space *vm,
 	GEM_BUG_ON(start >= end);
 	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
-	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
 	GEM_BUG_ON(drm_mm_node_allocated(node));
 
 	if (unlikely(range_overflows(start, size, end)))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aec4f73574f4..197c2c06ecb7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -65,7 +65,7 @@  typedef u64 gen8_pde_t;
 typedef u64 gen8_ppgtt_pdpe_t;
 typedef u64 gen8_ppgtt_pml4e_t;
 
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
 
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
@@ -367,7 +367,7 @@  i915_vm_has_scratch_64K(struct i915_address_space *vm)
  * the spec.
  */
 struct i915_ggtt {
-	struct i915_address_space base;
+	struct i915_address_space vm;
 
 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
 	struct resource gmadr;          /* GMADR resource */
@@ -385,7 +385,7 @@  struct i915_ggtt {
 };
 
 struct i915_hw_ppgtt {
-	struct i915_address_space base;
+	struct i915_address_space vm;
 	struct kref ref;
 	struct drm_mm_node node;
 	unsigned long pd_dirty_rings;
@@ -543,7 +543,7 @@  static inline struct i915_ggtt *
 i915_vm_to_ggtt(struct i915_address_space *vm)
 {
 	GEM_BUG_ON(!i915_is_ggtt(vm));
-	return container_of(vm, struct i915_ggtt, base);
+	return container_of(vm, struct i915_ggtt, vm);
 }
 
 #define INTEL_MAX_PPAT_ENTRIES 8
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 1036e8686916..3210cedfa46c 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -194,7 +194,7 @@  int i915_gem_render_state_emit(struct i915_request *rq)
 	if (IS_ERR(so.obj))
 		return PTR_ERR(so.obj);
 
-	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(so.vma)) {
 		err = PTR_ERR(so.vma);
 		goto err_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5757fb7c4b5a..55e84e71f526 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -480,7 +480,7 @@  i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 
 	/* We also want to clear any cached iomaps as they wrap vmap */
 	list_for_each_entry_safe(vma, next,
-				 &i915->ggtt.base.inactive_list, vm_link) {
+				 &i915->ggtt.vm.inactive_list, vm_link) {
 		unsigned long count = vma->node.size >> PAGE_SHIFT;
 		if (vma->iomap && i915_vma_unbind(vma) == 0)
 			freed_pages += count;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ad949cc30928..79a347295e00 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -642,7 +642,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	if (ret)
 		goto err;
 
-	vma = i915_vma_instance(obj, &ggtt->base, NULL);
+	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err_pages;
@@ -653,7 +653,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	 * setting up the GTT space. The actual reservation will occur
 	 * later.
 	 */
-	ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
 				   size, gtt_offset, obj->cache_level,
 				   0);
 	if (ret) {
@@ -666,7 +666,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	vma->pages = obj->mm.pages;
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
-	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
 
 	spin_lock(&dev_priv->mm.obj_lock);
 	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 47721437a4c5..cd09a1688192 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -973,8 +973,7 @@  i915_error_object_create(struct drm_i915_private *i915,
 		void __iomem *s;
 		int ret;
 
-		ggtt->base.insert_page(&ggtt->base, dma, slot,
-				       I915_CACHE_NONE, 0);
+		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
 		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
 		ret = compress_page(&compress, (void  __force *)s, dst);
@@ -993,7 +992,7 @@  i915_error_object_create(struct drm_i915_private *i915,
 
 out:
 	compress_fini(&compress, dst);
-	ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
 	return dst;
 }
 
@@ -1466,7 +1465,7 @@  static void gem_record_rings(struct i915_gpu_state *error)
 			struct i915_gem_context *ctx = request->gem_context;
 			struct intel_ring *ring;
 
-			ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;
+			ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
 
 			record_context(&ee->context, ctx);
 
@@ -1564,7 +1563,7 @@  static void capture_active_buffers(struct i915_gpu_state *error)
 
 static void capture_pinned_buffers(struct i915_gpu_state *error)
 {
-	struct i915_address_space *vm = &error->i915->ggtt.base;
+	struct i915_address_space *vm = &error->i915->ggtt.vm;
 	struct drm_i915_error_buffer *bo;
 	struct i915_vma *vma;
 	int count_inactive, count_active;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5d4f78765083..03299bae45a2 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -936,7 +936,7 @@  DECLARE_EVENT_CLASS(i915_context,
 			__entry->dev = ctx->i915->drm.primary->index;
 			__entry->ctx = ctx;
 			__entry->hw_id = ctx->hw_id;
-			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
 	),
 
 	TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -975,7 +975,7 @@  TRACE_EVENT(switch_mm,
 	TP_fast_assign(
 			__entry->ring = engine->id;
 			__entry->to = to;
-			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+			__entry->vm = to->ppgtt? &to->ppgtt->vm : NULL;
 			__entry->dev = engine->i915->drm.primary->index;
 	),
 
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5fe9f3f39467..869cf4a3b6de 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -105,7 +105,7 @@  static void vgt_deballoon_space(struct i915_ggtt *ggtt,
 			 node->start + node->size,
 			 node->size / 1024);
 
-	ggtt->base.reserved -= node->size;
+	ggtt->vm.reserved -= node->size;
 	drm_mm_remove_node(node);
 }
 
@@ -141,11 +141,11 @@  static int vgt_balloon_space(struct i915_ggtt *ggtt,
 
 	DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
 		 start, end, size / 1024);
-	ret = i915_gem_gtt_reserve(&ggtt->base, node,
+	ret = i915_gem_gtt_reserve(&ggtt->vm, node,
 				   size, start, I915_COLOR_UNEVICTABLE,
 				   0);
 	if (!ret)
-		ggtt->base.reserved += size;
+		ggtt->vm.reserved += size;
 
 	return ret;
 }
@@ -197,7 +197,7 @@  static int vgt_balloon_space(struct i915_ggtt *ggtt,
 int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 {
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	unsigned long ggtt_end = ggtt->base.total;
+	unsigned long ggtt_end = ggtt->vm.total;
 
 	unsigned long mappable_base, mappable_size, mappable_end;
 	unsigned long unmappable_base, unmappable_size, unmappable_end;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9324d476e0a7..e8f07cdca063 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -85,7 +85,7 @@  vma_create(struct drm_i915_gem_object *obj,
 	int i;
 
 	/* The aliasing_ppgtt should never be used directly! */
-	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
 
 	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
 	if (vma == NULL)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 13448ea76f57..2ec2e60dc670 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -515,7 +515,7 @@  int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
 		return PTR_ERR(obj);
 	}
 
-	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err_unref;
@@ -585,7 +585,7 @@  static int init_status_page(struct intel_engine_cs *engine)
 	if (ret)
 		goto err;
 
-	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index e28a996b9604..29fd95c1306b 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -570,7 +570,7 @@  struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
 	if (IS_ERR(vma))
 		goto err;
 
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 133367a17863..e271e296b9da 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -536,7 +536,7 @@  static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
  */
 static void flush_ggtt_writes(struct i915_vma *vma)
 {
-	struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+	struct drm_i915_private *dev_priv = vma->vm->i915;
 
 	if (i915_vma_is_map_and_fenceable(vma))
 		POSTING_READ_FW(GUC_STATUS);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index eb25afa9694f..091e28f0e024 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -431,7 +431,7 @@  static u64 execlists_update_context(struct i915_request *rq)
 	 * PML4 is allocated during ppgtt init, so this is not needed
 	 * in 48-bit mode.
 	 */
-	if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+	if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
 		execlists_update_context_pdps(ppgtt, reg_state);
 
 	return ce->lrc_desc;
@@ -1671,7 +1671,7 @@  static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err;
@@ -2069,7 +2069,7 @@  static int gen8_emit_bb_start(struct i915_request *rq,
 	 * not needed in 48-bit.*/
 	if (rq->gem_context->ppgtt &&
 	    (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
-	    !i915_vm_is_48bit(&rq->gem_context->ppgtt->base) &&
+	    !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
 	    !intel_vgpu_active(rq->i915)) {
 		ret = intel_logical_ring_emit_pdps(rq);
 		if (ret)
@@ -2667,7 +2667,7 @@  static void execlists_init_reg_state(u32 *regs,
 	CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
 	CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-	if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+	if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
 		/* 64b PPGTT (48bit canonical)
 		 * PDP0_DESCRIPTOR contains the base address to PML4 and
 		 * other PDP Descriptors are ignored.
@@ -2773,7 +2773,7 @@  static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		goto error_deref_obj;
 	}
 
-	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 97b38bbb7ce2..fa517a3e3c25 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1123,7 +1123,7 @@  intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 	/* mark ring buffers as read-only from GPU side by default */
 	obj->gt_ro = 1;
 
-	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
 	if (IS_ERR(vma))
 		goto err;
 
@@ -1279,7 +1279,7 @@  alloc_context_vma(struct intel_engine_cs *engine)
 		i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
 	}
 
-	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err_obj;
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 91c72911be3c..01ab60f1a7e8 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -338,7 +338,7 @@  fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
 
 static int igt_check_page_sizes(struct i915_vma *vma)
 {
-	struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+	struct drm_i915_private *i915 = vma->vm->i915;
 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
 	struct drm_i915_gem_object *obj = vma->obj;
 	int err = 0;
@@ -379,7 +379,7 @@  static int igt_check_page_sizes(struct i915_vma *vma)
 static int igt_mock_exhaust_device_supported_pages(void *arg)
 {
 	struct i915_hw_ppgtt *ppgtt = arg;
-	struct drm_i915_private *i915 = ppgtt->base.i915;
+	struct drm_i915_private *i915 = ppgtt->vm.i915;
 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
@@ -415,7 +415,7 @@  static int igt_mock_exhaust_device_supported_pages(void *arg)
 				goto out_put;
 			}
 
-			vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 			if (IS_ERR(vma)) {
 				err = PTR_ERR(vma);
 				goto out_put;
@@ -458,7 +458,7 @@  static int igt_mock_exhaust_device_supported_pages(void *arg)
 static int igt_mock_ppgtt_misaligned_dma(void *arg)
 {
 	struct i915_hw_ppgtt *ppgtt = arg;
-	struct drm_i915_private *i915 = ppgtt->base.i915;
+	struct drm_i915_private *i915 = ppgtt->vm.i915;
 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
 	struct drm_i915_gem_object *obj;
 	int bit;
@@ -500,7 +500,7 @@  static int igt_mock_ppgtt_misaligned_dma(void *arg)
 		/* Force the page size for this object */
 		obj->mm.page_sizes.sg = page_size;
 
-		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out_unpin;
@@ -591,7 +591,7 @@  static void close_object_list(struct list_head *objects,
 	list_for_each_entry_safe(obj, on, objects, st_link) {
 		struct i915_vma *vma;
 
-		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 		if (!IS_ERR(vma))
 			i915_vma_close(vma);
 
@@ -604,8 +604,8 @@  static void close_object_list(struct list_head *objects,
 static int igt_mock_ppgtt_huge_fill(void *arg)
 {
 	struct i915_hw_ppgtt *ppgtt = arg;
-	struct drm_i915_private *i915 = ppgtt->base.i915;
-	unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+	struct drm_i915_private *i915 = ppgtt->vm.i915;
+	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
 	unsigned long page_num;
 	bool single = false;
 	LIST_HEAD(objects);
@@ -641,7 +641,7 @@  static int igt_mock_ppgtt_huge_fill(void *arg)
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			break;
@@ -725,7 +725,7 @@  static int igt_mock_ppgtt_huge_fill(void *arg)
 static int igt_mock_ppgtt_64K(void *arg)
 {
 	struct i915_hw_ppgtt *ppgtt = arg;
-	struct drm_i915_private *i915 = ppgtt->base.i915;
+	struct drm_i915_private *i915 = ppgtt->vm.i915;
 	struct drm_i915_gem_object *obj;
 	const struct object_info {
 		unsigned int size;
@@ -819,7 +819,7 @@  static int igt_mock_ppgtt_64K(void *arg)
 			 */
 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
 
-			vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 			if (IS_ERR(vma)) {
 				err = PTR_ERR(vma);
 				goto out_object_unpin;
@@ -887,8 +887,8 @@  static int igt_mock_ppgtt_64K(void *arg)
 static struct i915_vma *
 gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
 {
-	struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
-	const int gen = INTEL_GEN(vma->vm->i915);
+	struct drm_i915_private *i915 = vma->vm->i915;
+	const int gen = INTEL_GEN(i915);
 	unsigned int count = vma->size >> PAGE_SHIFT;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *batch;
@@ -1047,7 +1047,8 @@  static int __igt_write_huge(struct i915_gem_context *ctx,
 			    u32 dword, u32 val)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct i915_address_space *vm =
+		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 	struct i915_vma *vma;
 	int err;
@@ -1100,7 +1101,8 @@  static int igt_write_huge(struct i915_gem_context *ctx,
 			  struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct i915_address_space *vm =
+		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
 	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
 	I915_RND_STATE(prng);
@@ -1439,7 +1441,7 @@  static int igt_ppgtt_pin_update(void *arg)
 		if (IS_ERR(obj))
 			return PTR_ERR(obj);
 
-		vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out_put;
@@ -1493,7 +1495,7 @@  static int igt_ppgtt_pin_update(void *arg)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+	vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto out_put;
@@ -1531,7 +1533,8 @@  static int igt_tmpfs_fallback(void *arg)
 	struct i915_gem_context *ctx = arg;
 	struct drm_i915_private *i915 = ctx->i915;
 	struct vfsmount *gemfs = i915->mm.gemfs;
-	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct i915_address_space *vm =
+		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 	u32 *vaddr;
@@ -1587,7 +1590,8 @@  static int igt_shrink_thp(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
 	struct drm_i915_private *i915 = ctx->i915;
-	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct i915_address_space *vm =
+	       	ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 	unsigned int flags = PIN_USER;
@@ -1696,14 +1700,14 @@  int i915_gem_huge_page_mock_selftests(void)
 		goto out_unlock;
 	}
 
-	if (!i915_vm_is_48bit(&ppgtt->base)) {
+	if (!i915_vm_is_48bit(&ppgtt->vm)) {
 		pr_err("failed to create 48b PPGTT\n");
 		err = -EINVAL;
 		goto out_close;
 	}
 
 	/* If we were ever hit this then it's time to mock the 64K scratch */
-	if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
 		pr_err("PPGTT missing 64K scratch page\n");
 		err = -EINVAL;
 		goto out_close;
@@ -1712,7 +1716,7 @@  int i915_gem_huge_page_mock_selftests(void)
 	err = i915_subtests(tests, ppgtt);
 
 out_close:
-	i915_ppgtt_close(&ppgtt->base);
+	i915_ppgtt_close(&ppgtt->vm);
 	i915_ppgtt_put(ppgtt);
 
 out_unlock:
@@ -1758,7 +1762,7 @@  int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
 	}
 
 	if (ctx->ppgtt)
-		ctx->ppgtt->base.scrub_64K = true;
+		ctx->ppgtt->vm.scrub_64K = true;
 
 	err = i915_subtests(tests, ctx);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index b39392a00a6f..708e8d721448 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -115,7 +115,7 @@  static int gpu_fill(struct drm_i915_gem_object *obj,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_address_space *vm =
-		ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
 	struct i915_request *rq;
 	struct i915_vma *vma;
 	struct i915_vma *batch;
@@ -290,7 +290,7 @@  create_test_object(struct i915_gem_context *ctx,
 {
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm =
-		ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+		ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
 	u64 size;
 	int err;
 
@@ -557,7 +557,7 @@  static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
 	list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
 		struct i915_vma *vma;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index ab9d7bee0aae..2dc72a984d45 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -35,7 +35,7 @@  static int populate_ggtt(struct drm_i915_private *i915)
 	u64 size;
 
 	for (size = 0;
-	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
 	     size += I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
@@ -57,7 +57,7 @@  static int populate_ggtt(struct drm_i915_private *i915)
 		return -EINVAL;
 	}
 
-	if (list_empty(&i915->ggtt.base.inactive_list)) {
+	if (list_empty(&i915->ggtt.vm.inactive_list)) {
 		pr_err("No objects on the GGTT inactive list!\n");
 		return -EINVAL;
 	}
@@ -69,7 +69,7 @@  static void unpin_ggtt(struct drm_i915_private *i915)
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+	list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
 		i915_vma_unpin(vma);
 }
 
@@ -103,7 +103,7 @@  static int igt_evict_something(void *arg)
 		goto cleanup;
 
 	/* Everything is pinned, nothing should happen */
-	err = i915_gem_evict_something(&ggtt->base,
+	err = i915_gem_evict_something(&ggtt->vm,
 				       I915_GTT_PAGE_SIZE, 0, 0,
 				       0, U64_MAX,
 				       0);
@@ -116,7 +116,7 @@  static int igt_evict_something(void *arg)
 	unpin_ggtt(i915);
 
 	/* Everything is unpinned, we should be able to evict something */
-	err = i915_gem_evict_something(&ggtt->base,
+	err = i915_gem_evict_something(&ggtt->vm,
 				       I915_GTT_PAGE_SIZE, 0, 0,
 				       0, U64_MAX,
 				       0);
@@ -181,7 +181,7 @@  static int igt_evict_for_vma(void *arg)
 		goto cleanup;
 
 	/* Everything is pinned, nothing should happen */
-	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
 	if (err != -ENOSPC) {
 		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
 		       err);
@@ -191,7 +191,7 @@  static int igt_evict_for_vma(void *arg)
 	unpin_ggtt(i915);
 
 	/* Everything is unpinned, we should be able to evict the node */
-	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
 	if (err) {
 		pr_err("i915_gem_evict_for_node returned err=%d\n",
 		       err);
@@ -229,7 +229,7 @@  static int igt_evict_for_cache_color(void *arg)
 	 * i915_gtt_color_adjust throughout our driver, so using a mock color
 	 * adjust will work just fine for our purposes.
 	 */
-	ggtt->base.mm.color_adjust = mock_color_adjust;
+	ggtt->vm.mm.color_adjust = mock_color_adjust;
 
 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
 	if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@  static int igt_evict_for_cache_color(void *arg)
 	i915_vma_unpin(vma);
 
 	/* Remove just the second vma */
-	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
 	if (err) {
 		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
 		goto cleanup;
@@ -276,7 +276,7 @@  static int igt_evict_for_cache_color(void *arg)
 	 */
 	target.color = I915_CACHE_L3_LLC;
 
-	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
 	if (!err) {
 		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
 		err = -EINVAL;
@@ -288,7 +288,7 @@  static int igt_evict_for_cache_color(void *arg)
 cleanup:
 	unpin_ggtt(i915);
 	cleanup_objects(i915);
-	ggtt->base.mm.color_adjust = NULL;
+	ggtt->vm.mm.color_adjust = NULL;
 	return err;
 }
 
@@ -305,7 +305,7 @@  static int igt_evict_vm(void *arg)
 		goto cleanup;
 
 	/* Everything is pinned, nothing should happen */
-	err = i915_gem_evict_vm(&ggtt->base);
+	err = i915_gem_evict_vm(&ggtt->vm);
 	if (err) {
 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
 		       err);
@@ -314,7 +314,7 @@  static int igt_evict_vm(void *arg)
 
 	unpin_ggtt(i915);
 
-	err = i915_gem_evict_vm(&ggtt->base);
+	err = i915_gem_evict_vm(&ggtt->vm);
 	if (err) {
 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
 		       err);
@@ -359,9 +359,9 @@  static int igt_evict_contexts(void *arg)
 
 	/* Reserve a block so that we know we have enough to fit a few rq */
 	memset(&hole, 0, sizeof(hole));
-	err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
 				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
-				  0, i915->ggtt.base.total,
+				  0, i915->ggtt.vm.total,
 				  PIN_NOEVICT);
 	if (err)
 		goto out_locked;
@@ -377,9 +377,9 @@  static int igt_evict_contexts(void *arg)
 			goto out_locked;
 		}
 
-		if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
 					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
-					0, i915->ggtt.base.total,
+					0, i915->ggtt.vm.total,
 					PIN_NOEVICT)) {
 			kfree(r);
 			break;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f7dc926f4ef1..58ab5e84ceb7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -151,14 +151,14 @@  static int igt_ppgtt_alloc(void *arg)
 	if (err)
 		goto err_ppgtt;
 
-	if (!ppgtt->base.allocate_va_range)
+	if (!ppgtt->vm.allocate_va_range)
 		goto err_ppgtt_cleanup;
 
 	/* Check we can allocate the entire range */
 	for (size = 4096;
-	     size <= ppgtt->base.total;
+	     size <= ppgtt->vm.total;
 	     size <<= 2) {
-		err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
 		if (err) {
 			if (err == -ENOMEM) {
 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +168,15 @@  static int igt_ppgtt_alloc(void *arg)
 			goto err_ppgtt_cleanup;
 		}
 
-		ppgtt->base.clear_range(&ppgtt->base, 0, size);
+		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
 	}
 
 	/* Check we can incrementally allocate the entire range */
 	for (last = 0, size = 4096;
-	     size <= ppgtt->base.total;
+	     size <= ppgtt->vm.total;
 	     last = size, size <<= 2) {
-		err = ppgtt->base.allocate_va_range(&ppgtt->base,
-						    last, size - last);
+		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+						  last, size - last);
 		if (err) {
 			if (err == -ENOMEM) {
 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -188,7 +188,7 @@  static int igt_ppgtt_alloc(void *arg)
 	}
 
 err_ppgtt_cleanup:
-	ppgtt->base.cleanup(&ppgtt->base);
+	ppgtt->vm.cleanup(&ppgtt->vm);
 err_ppgtt:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	kfree(ppgtt);
@@ -987,12 +987,12 @@  static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 		err = PTR_ERR(ppgtt);
 		goto out_unlock;
 	}
-	GEM_BUG_ON(offset_in_page(ppgtt->base.total));
-	GEM_BUG_ON(ppgtt->base.closed);
+	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+	GEM_BUG_ON(ppgtt->vm.closed);
 
-	err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
-	i915_ppgtt_close(&ppgtt->base);
+	i915_ppgtt_close(&ppgtt->vm);
 	i915_ppgtt_put(ppgtt);
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1061,18 @@  static int exercise_ggtt(struct drm_i915_private *i915,
 
 	mutex_lock(&i915->drm.struct_mutex);
 restart:
-	list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
-	drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
 		if (hole_start < last)
 			continue;
 
-		if (ggtt->base.mm.color_adjust)
-			ggtt->base.mm.color_adjust(node, 0,
-						   &hole_start, &hole_end);
+		if (ggtt->vm.mm.color_adjust)
+			ggtt->vm.mm.color_adjust(node, 0,
+						 &hole_start, &hole_end);
 		if (hole_start >= hole_end)
 			continue;
 
-		err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
 		if (err)
 			break;
 
@@ -1134,7 +1134,7 @@  static int igt_ggtt_page(void *arg)
 		goto out_free;
 
 	memset(&tmp, 0, sizeof(tmp));
-	err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
 					  count * PAGE_SIZE, 0,
 					  I915_COLOR_UNEVICTABLE,
 					  0, ggtt->mappable_end,
@@ -1147,9 +1147,9 @@  static int igt_ggtt_page(void *arg)
 	for (n = 0; n < count; n++) {
 		u64 offset = tmp.start + n * PAGE_SIZE;
 
-		ggtt->base.insert_page(&ggtt->base,
-				       i915_gem_object_get_dma_address(obj, 0),
-				       offset, I915_CACHE_NONE, 0);
+		ggtt->vm.insert_page(&ggtt->vm,
+				     i915_gem_object_get_dma_address(obj, 0),
+				     offset, I915_CACHE_NONE, 0);
 	}
 
 	order = i915_random_order(count, &prng);
@@ -1188,7 +1188,7 @@  static int igt_ggtt_page(void *arg)
 
 	kfree(order);
 out_remove:
-	ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
 	intel_runtime_pm_put(i915);
 	drm_mm_remove_node(&tmp);
 out_unpin:
@@ -1229,7 +1229,7 @@  static int exercise_mock(struct drm_i915_private *i915,
 	ppgtt = ctx->ppgtt;
 	GEM_BUG_ON(!ppgtt);
 
-	err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+	err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
 	mock_context_close(ctx);
 	return err;
@@ -1270,7 +1270,7 @@  static int igt_gtt_reserve(void *arg)
 
 	/* Start by filling the GGTT */
 	for (total = 0;
-	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
 	     total += 2*I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
@@ -1288,20 +1288,20 @@  static int igt_gtt_reserve(void *arg)
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
 					   obj->base.size,
 					   total,
 					   obj->cache_level,
 					   0);
 		if (err) {
 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.base.total, err);
+			       total, i915->ggtt.vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1319,7 +1319,7 @@  static int igt_gtt_reserve(void *arg)
 
 	/* Now we start forcing evictions */
 	for (total = I915_GTT_PAGE_SIZE;
-	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
 	     total += 2*I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
@@ -1337,20 +1337,20 @@  static int igt_gtt_reserve(void *arg)
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
 					   obj->base.size,
 					   total,
 					   obj->cache_level,
 					   0);
 		if (err) {
 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.base.total, err);
+			       total, i915->ggtt.vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1371,7 +1371,7 @@  static int igt_gtt_reserve(void *arg)
 		struct i915_vma *vma;
 		u64 offset;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
@@ -1383,18 +1383,18 @@  static int igt_gtt_reserve(void *arg)
 			goto out;
 		}
 
-		offset = random_offset(0, i915->ggtt.base.total,
+		offset = random_offset(0, i915->ggtt.vm.total,
 				       2*I915_GTT_PAGE_SIZE,
 				       I915_GTT_MIN_ALIGNMENT);
 
-		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
 					   obj->base.size,
 					   offset,
 					   obj->cache_level,
 					   0);
 		if (err) {
 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.base.total, err);
+			       total, i915->ggtt.vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1429,8 +1429,8 @@  static int igt_gtt_insert(void *arg)
 		u64 start, end;
 	} invalid_insert[] = {
 		{
-			i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
-			0, i915->ggtt.base.total,
+			i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+			0, i915->ggtt.vm.total,
 		},
 		{
 			2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1460,7 @@  static int igt_gtt_insert(void *arg)
 
 	/* Check a couple of obviously invalid requests */
 	for (ii = invalid_insert; ii->size; ii++) {
-		err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+		err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
 					  ii->size, ii->alignment,
 					  I915_COLOR_UNEVICTABLE,
 					  ii->start, ii->end,
@@ -1475,7 +1475,7 @@  static int igt_gtt_insert(void *arg)
 
 	/* Start by filling the GGTT */
 	for (total = 0;
-	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
 	     total += I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
@@ -1493,15 +1493,15 @@  static int igt_gtt_insert(void *arg)
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
 					  obj->base.size, 0, obj->cache_level,
-					  0, i915->ggtt.base.total,
+					  0, i915->ggtt.vm.total,
 					  0);
 		if (err == -ENOSPC) {
 			/* maxed out the GGTT space */
@@ -1510,7 +1510,7 @@  static int igt_gtt_insert(void *arg)
 		}
 		if (err) {
 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.base.total, err);
+			       total, i915->ggtt.vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1522,7 +1522,7 @@  static int igt_gtt_insert(void *arg)
 	list_for_each_entry(obj, &objects, st_link) {
 		struct i915_vma *vma;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
@@ -1542,7 +1542,7 @@  static int igt_gtt_insert(void *arg)
 		struct i915_vma *vma;
 		u64 offset;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
@@ -1557,13 +1557,13 @@  static int igt_gtt_insert(void *arg)
 			goto out;
 		}
 
-		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
 					  obj->base.size, 0, obj->cache_level,
-					  0, i915->ggtt.base.total,
+					  0, i915->ggtt.vm.total,
 					  0);
 		if (err) {
 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.base.total, err);
+			       total, i915->ggtt.vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1579,7 +1579,7 @@  static int igt_gtt_insert(void *arg)
 
 	/* And then force evictions */
 	for (total = 0;
-	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
 	     total += 2*I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
@@ -1597,19 +1597,19 @@  static int igt_gtt_insert(void *arg)
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
 					  obj->base.size, 0, obj->cache_level,
-					  0, i915->ggtt.base.total,
+					  0, i915->ggtt.vm.total,
 					  0);
 		if (err) {
 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.base.total, err);
+			       total, i915->ggtt.vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1669,7 +1669,7 @@  int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ggtt_page),
 	};
 
-	GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
 
 	return i915_subtests(tests, i915);
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index fbdb2419d418..2b2dde94526f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -113,7 +113,7 @@  static int igt_gem_huge(void *arg)
 
 	obj = huge_gem_object(i915,
 			      nreal * PAGE_SIZE,
-			      i915->ggtt.base.total + PAGE_SIZE);
+			      i915->ggtt.vm.total + PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -311,7 +311,7 @@  static int igt_partial_tiling(void *arg)
 
 	obj = huge_gem_object(i915,
 			      nreal << PAGE_SHIFT,
-			      (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -440,7 +440,7 @@  static int make_obj_busy(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 	int err;
 
-	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 94bc2e1898a4..a3a89aadeccb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -430,7 +430,7 @@  static struct i915_vma *empty_batch(struct drm_i915_private *i915)
 	if (err)
 		goto err;
 
-	vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err;
@@ -555,7 +555,8 @@  static int live_empty_request(void *arg)
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
 	struct i915_gem_context *ctx = i915->kernel_context;
-	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct i915_address_space *vm =
+		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
 	struct drm_i915_gem_object *obj;
 	const int gen = INTEL_GEN(i915);
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index e90f97236e50..8400a8cc5cf2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -35,7 +35,7 @@  static bool assert_vma(struct i915_vma *vma,
 {
 	bool ok = true;
 
-	if (vma->vm != &ctx->ppgtt->base) {
+	if (vma->vm != &ctx->ppgtt->vm) {
 		pr_err("VMA created with wrong VM\n");
 		ok = false;
 	}
@@ -110,8 +110,7 @@  static int create_vmas(struct drm_i915_private *i915,
 	list_for_each_entry(obj, objects, st_link) {
 		for (pinned = 0; pinned <= 1; pinned++) {
 			list_for_each_entry(ctx, contexts, link) {
-				struct i915_address_space *vm =
-					&ctx->ppgtt->base;
+				struct i915_address_space *vm = &ctx->ppgtt->vm;
 				struct i915_vma *vma;
 				int err;
 
@@ -259,12 +258,12 @@  static int igt_vma_pin1(void *arg)
 		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
 		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
 		VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+		VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
 
 		VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
 		INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
-		VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
-		INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+		VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+		INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
 		INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
 
 		VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@  static int igt_vma_pin1(void *arg)
 		VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
 		VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
 		NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
-		VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
-		VALID(i915->ggtt.base.total, PIN_GLOBAL),
-		NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+		VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+		VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+		NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
 		NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
 		INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
-		INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+		INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
 		INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
 
 		VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@  static int igt_vma_pin1(void *arg)
 		 * variable start, end and size.
 		 */
 		NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
-		NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+		NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
 		NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-		NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+		NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
 #endif
 		{ },
 #undef NOSPACE
@@ -307,13 +306,13 @@  static int igt_vma_pin1(void *arg)
 	 * focusing on error handling of boundary conditions.
 	 */
 
-	GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+	GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
 
 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+	vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
 	if (IS_ERR(vma))
 		goto out;
 
@@ -405,7 +404,7 @@  static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
 static int igt_vma_rotate(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct i915_address_space *vm = &i915->ggtt.base;
+	struct i915_address_space *vm = &i915->ggtt.vm;
 	struct drm_i915_gem_object *obj;
 	const struct intel_rotation_plane_info planes[] = {
 		{ .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@  static bool assert_pin(struct i915_vma *vma,
 static int igt_vma_partial(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct i915_address_space *vm = &i915->ggtt.base;
+	struct i915_address_space *vm = &i915->ggtt.vm;
 	const unsigned int npages = 1021; /* prime! */
 	struct drm_i915_gem_object *obj;
 	const struct phase {
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 2091e3a6a5be..390a157b37c3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -107,8 +107,8 @@  static int emit_recurse_batch(struct hang *h,
 	struct drm_i915_private *i915 = h->i915;
 	struct i915_address_space *vm =
 		rq->gem_context->ppgtt ?
-		&rq->gem_context->ppgtt->base :
-		&i915->ggtt.base;
+		&rq->gem_context->ppgtt->vm :
+		&i915->ggtt.vm;
 	struct i915_vma *hws, *vma;
 	unsigned int flags;
 	u32 *batch;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 68cb9126b3e1..0b6da08c8cae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -83,7 +83,7 @@  static int emit_recurse_batch(struct spinner *spin,
 			      struct i915_request *rq,
 			      u32 arbitration_command)
 {
-	struct i915_address_space *vm = &rq->gem_context->ppgtt->base;
+	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
 	struct i915_vma *hws, *vma;
 	u32 *batch;
 	int err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 17444a3abbb9..f1cfb0fb6bea 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -33,7 +33,7 @@  read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	memset(cs, 0xc5, PAGE_SIZE);
 	i915_gem_object_unpin_map(result);
 
-	vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+	vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err_obj;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 36c112088940..556c546f2715 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -66,25 +66,25 @@  mock_ppgtt(struct drm_i915_private *i915,
 		return NULL;
 
 	kref_init(&ppgtt->ref);
-	ppgtt->base.i915 = i915;
-	ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
-	ppgtt->base.file = ERR_PTR(-ENODEV);
-
-	INIT_LIST_HEAD(&ppgtt->base.active_list);
-	INIT_LIST_HEAD(&ppgtt->base.inactive_list);
-	INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
-	INIT_LIST_HEAD(&ppgtt->base.global_link);
-	drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
-	ppgtt->base.clear_range = nop_clear_range;
-	ppgtt->base.insert_page = mock_insert_page;
-	ppgtt->base.insert_entries = mock_insert_entries;
-	ppgtt->base.bind_vma = mock_bind_ppgtt;
-	ppgtt->base.unbind_vma = mock_unbind_ppgtt;
-	ppgtt->base.set_pages = ppgtt_set_pages;
-	ppgtt->base.clear_pages = clear_pages;
-	ppgtt->base.cleanup = mock_cleanup;
+	ppgtt->vm.i915 = i915;
+	ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+	ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+	INIT_LIST_HEAD(&ppgtt->vm.active_list);
+	INIT_LIST_HEAD(&ppgtt->vm.inactive_list);
+	INIT_LIST_HEAD(&ppgtt->vm.unbound_list);
+
+	INIT_LIST_HEAD(&ppgtt->vm.global_link);
+	drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total);
+
+	ppgtt->vm.clear_range = nop_clear_range;
+	ppgtt->vm.insert_page = mock_insert_page;
+	ppgtt->vm.insert_entries = mock_insert_entries;
+	ppgtt->vm.bind_vma = mock_bind_ppgtt;
+	ppgtt->vm.unbind_vma = mock_unbind_ppgtt;
+	ppgtt->vm.set_pages = ppgtt_set_pages;
+	ppgtt->vm.clear_pages = clear_pages;
+	ppgtt->vm.cleanup = mock_cleanup;
 
 	return ppgtt;
 }
@@ -107,27 +107,27 @@  void mock_init_ggtt(struct drm_i915_private *i915)
 
 	INIT_LIST_HEAD(&i915->vm_list);
 
-	ggtt->base.i915 = i915;
+	ggtt->vm.i915 = i915;
 
 	ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
-	ggtt->base.total = 4096 * PAGE_SIZE;
-
-	ggtt->base.clear_range = nop_clear_range;
-	ggtt->base.insert_page = mock_insert_page;
-	ggtt->base.insert_entries = mock_insert_entries;
-	ggtt->base.bind_vma = mock_bind_ggtt;
-	ggtt->base.unbind_vma = mock_unbind_ggtt;
-	ggtt->base.set_pages = ggtt_set_pages;
-	ggtt->base.clear_pages = clear_pages;
-	ggtt->base.cleanup = mock_cleanup;
-
-	i915_address_space_init(&ggtt->base, i915, "global");
+	ggtt->vm.total = 4096 * PAGE_SIZE;
+
+	ggtt->vm.clear_range = nop_clear_range;
+	ggtt->vm.insert_page = mock_insert_page;
+	ggtt->vm.insert_entries = mock_insert_entries;
+	ggtt->vm.bind_vma = mock_bind_ggtt;
+	ggtt->vm.unbind_vma = mock_unbind_ggtt;
+	ggtt->vm.set_pages = ggtt_set_pages;
+	ggtt->vm.clear_pages = clear_pages;
+	ggtt->vm.cleanup = mock_cleanup;
+
+	i915_address_space_init(&ggtt->vm, i915, "global");
 }
 
 void mock_fini_ggtt(struct drm_i915_private *i915)
 {
 	struct i915_ggtt *ggtt = &i915->ggtt;
 
-	i915_address_space_fini(&ggtt->base);
+	i915_address_space_fini(&ggtt->vm);
 }