[076/190] drm/i915: Rename vma->*_list to *_link for consistency
diff mbox

Message ID 1452503961-14837-76-git-send-email-chris@chris-wilson.co.uk
State New
Headers show

Commit Message

Chris Wilson Jan. 11, 2016, 9:17 a.m. UTC
Elsewhere we have adopted the convention of using '_link' to denote
elements in the list (and '_list' for the actual list_head itself), and
that the name should indicate which list the link belongs to (and
preferrably not just where the link is being stored).

s/vma_link/obj_link/ (we iterate over obj->vma_list)
s/mm_list/vm_link/ (we iterate over vm->[in]active_list)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c      | 17 +++++------
 drivers/gpu/drm/i915/i915_gem.c          | 50 ++++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_context.c  |  2 +-
 drivers/gpu/drm/i915/i915_gem_evict.c    |  6 ++--
 drivers/gpu/drm/i915/i915_gem_gtt.c      | 10 +++----
 drivers/gpu/drm/i915/i915_gem_gtt.h      |  4 +--
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  4 +--
 drivers/gpu/drm/i915/i915_gem_stolen.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c  |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c    |  8 ++---
 10 files changed, 52 insertions(+), 53 deletions(-)

Comments

Tvrtko Ursulin Jan. 12, 2016, 1:49 p.m. UTC | #1
On 11/01/16 09:17, Chris Wilson wrote:
> Elsewhere we have adopted the convention of using '_link' to denote
> elements in the list (and '_list' for the actual list_head itself), and
> that the name should indicate which list the link belongs to (and
> preferrably not just where the link is being stored).
>
> s/vma_link/obj_link/ (we iterate over obj->vma_list)
> s/mm_list/vm_link/ (we iterate over vm->[in]active_list)
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c      | 17 +++++------
>   drivers/gpu/drm/i915/i915_gem.c          | 50 ++++++++++++++++----------------
>   drivers/gpu/drm/i915/i915_gem_context.c  |  2 +-
>   drivers/gpu/drm/i915/i915_gem_evict.c    |  6 ++--
>   drivers/gpu/drm/i915/i915_gem_gtt.c      | 10 +++----
>   drivers/gpu/drm/i915/i915_gem_gtt.h      |  4 +--
>   drivers/gpu/drm/i915/i915_gem_shrinker.c |  4 +--
>   drivers/gpu/drm/i915/i915_gem_stolen.c   |  2 +-
>   drivers/gpu/drm/i915/i915_gem_userptr.c  |  2 +-
>   drivers/gpu/drm/i915/i915_gpu_error.c    |  8 ++---
>   10 files changed, 52 insertions(+), 53 deletions(-)

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index efa9572fc217..f311df758195 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>   	u64 size = 0;
>   	struct i915_vma *vma;
>
> -	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> -		if (i915_is_ggtt(vma->vm) &&
> -		    drm_mm_node_allocated(&vma->node))
> +	list_for_each_entry(vma, &obj->vma_list, obj_link) {
> +		if (i915_is_ggtt(vma->vm) && drm_mm_node_allocated(&vma->node))
>   			size += vma->node.size;
>   	}
>
> @@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>   	if (obj->base.name)
>   		seq_printf(m, " (name: %d)", obj->base.name);
> -	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   		if (vma->pin_count > 0)
>   			pin_count++;
>   	}
> @@ -164,7 +163,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		seq_printf(m, " (display)");
>   	if (obj->fence_reg != I915_FENCE_REG_NONE)
>   		seq_printf(m, " (fence: %d)", obj->fence_reg);
> -	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
>   			   i915_is_ggtt(vma->vm) ? "g" : "pp",
>   			   vma->node.start, vma->node.size);
> @@ -229,7 +228,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
>   	}
>
>   	total_obj_size = total_gtt_size = count = 0;
> -	list_for_each_entry(vma, head, mm_list) {
> +	list_for_each_entry(vma, head, vm_link) {
>   		seq_printf(m, "   ");
>   		describe_obj(m, vma->obj);
>   		seq_printf(m, "\n");
> @@ -341,7 +340,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>   		stats->shared += obj->base.size;
>
>   	if (USES_FULL_PPGTT(obj->base.dev)) {
> -		list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   			struct i915_hw_ppgtt *ppgtt;
>
>   			if (!drm_mm_node_allocated(&vma->node))
> @@ -453,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
>   		   count, mappable_count, size, mappable_size);
>
>   	size = count = mappable_size = mappable_count = 0;
> -	count_vmas(&vm->active_list, mm_list);
> +	count_vmas(&vm->active_list, vm_link);
>   	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
>   		   count, mappable_count, size, mappable_size);
>
>   	size = count = mappable_size = mappable_count = 0;
> -	count_vmas(&vm->inactive_list, mm_list);
> +	count_vmas(&vm->inactive_list, vm_link);
>   	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
>   		   count, mappable_count, size, mappable_size);
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 4eef13ebdaf3..e4d7c7f5aca2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -128,10 +128,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
>
>   	pinned = 0;
>   	mutex_lock(&dev->struct_mutex);
> -	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
> +	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
>   		if (vma->pin_count)
>   			pinned += vma->node.size;
> -	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
> +	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
>   		if (vma->pin_count)
>   			pinned += vma->node.size;
>   	mutex_unlock(&dev->struct_mutex);
> @@ -261,7 +261,7 @@ drop_pages(struct drm_i915_gem_object *obj)
>   	int ret;
>
>   	drm_gem_object_reference(&obj->base);
> -	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
> +	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
>   		if (i915_vma_unbind(vma))
>   			break;
>
> @@ -2038,7 +2038,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
>   	obj->active |= intel_engine_flag(engine);
>
>   	i915_gem_request_mark_active(req, &obj->last_read[engine->id]);
> -	list_move_tail(&vma->mm_list, &vma->vm->active_list);
> +	list_move_tail(&vma->vm_link, &vma->vm->active_list);
>   }
>
>   static void
> @@ -2079,9 +2079,9 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
>   	 */
>   	list_move_tail(&obj->global_list, &request->i915->mm.bound_list);
>
> -	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> -		if (!list_empty(&vma->mm_list))
> -			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
> +	list_for_each_entry(vma, &obj->vma_list, obj_link) {
> +		if (!list_empty(&vma->vm_link))
> +			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>   	}
>
>   	drm_gem_object_unreference(&obj->base);
> @@ -2576,7 +2576,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>   	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>   	int ret;
>
> -	if (list_empty(&vma->vma_link))
> +	if (list_empty(&vma->obj_link))
>   		return 0;
>
>   	if (!drm_mm_node_allocated(&vma->node)) {
> @@ -2610,7 +2610,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>   	vma->vm->unbind_vma(vma);
>   	vma->bound = 0;
>
> -	list_del_init(&vma->mm_list);
> +	list_del_init(&vma->vm_link);
>   	if (i915_is_ggtt(vma->vm)) {
>   		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
>   			obj->map_and_fenceable = false;
> @@ -2864,7 +2864,7 @@ search_free:
>   		goto err_remove_node;
>
>   	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> -	list_add_tail(&vma->mm_list, &vm->inactive_list);
> +	list_add_tail(&vma->vm_link, &vm->inactive_list);
>
>   	return vma;
>
> @@ -3029,7 +3029,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>   	/* And bump the LRU for this access */
>   	vma = i915_gem_obj_to_ggtt(obj);
>   	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
> -		list_move_tail(&vma->mm_list,
> +		list_move_tail(&vma->vm_link,
>   			       &to_i915(obj->base.dev)->gtt.base.inactive_list);
>
>   	return 0;
> @@ -3064,7 +3064,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>   	 * catch the issue of the CS prefetch crossing page boundaries and
>   	 * reading an invalid PTE on older architectures.
>   	 */
> -	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
> +	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
>   		if (!drm_mm_node_allocated(&vma->node))
>   			continue;
>
> @@ -3127,7 +3127,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>   			 */
>   		}
>
> -		list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   			if (!drm_mm_node_allocated(&vma->node))
>   				continue;
>
> @@ -3137,7 +3137,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>   		}
>   	}
>
> -	list_for_each_entry(vma, &obj->vma_list, vma_link)
> +	list_for_each_entry(vma, &obj->vma_list, obj_link)
>   		vma->node.color = cache_level;
>   	obj->cache_level = cache_level;
>
> @@ -3797,7 +3797,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>
>   	trace_i915_gem_object_destroy(obj);
>
> -	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
> +	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
>   		int ret;
>
>   		vma->pin_count = 0;
> @@ -3854,7 +3854,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
>   				     struct i915_address_space *vm)
>   {
>   	struct i915_vma *vma;
> -	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
>   		    vma->vm == vm)
>   			return vma;
> @@ -3871,7 +3871,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
>   	if (WARN_ONCE(!view, "no view specified"))
>   		return ERR_PTR(-EINVAL);
>
> -	list_for_each_entry(vma, &obj->vma_list, vma_link)
> +	list_for_each_entry(vma, &obj->vma_list, obj_link)
>   		if (vma->vm == ggtt &&
>   		    i915_ggtt_view_equal(&vma->ggtt_view, view))
>   			return vma;
> @@ -3892,7 +3892,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>   	if (!i915_is_ggtt(vm))
>   		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
>
> -	list_del(&vma->vma_link);
> +	list_del(&vma->obj_link);
>
>   	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
>   }
> @@ -4444,7 +4444,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
>
>   	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
>
> -	list_for_each_entry(vma, &o->vma_list, vma_link) {
> +	list_for_each_entry(vma, &o->vma_list, obj_link) {
>   		if (i915_is_ggtt(vma->vm) &&
>   		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
>   			continue;
> @@ -4463,7 +4463,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
>   	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
>   	struct i915_vma *vma;
>
> -	list_for_each_entry(vma, &o->vma_list, vma_link)
> +	list_for_each_entry(vma, &o->vma_list, obj_link)
>   		if (vma->vm == ggtt &&
>   		    i915_ggtt_view_equal(&vma->ggtt_view, view))
>   			return vma->node.start;
> @@ -4477,7 +4477,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
>   {
>   	struct i915_vma *vma;
>
> -	list_for_each_entry(vma, &o->vma_list, vma_link) {
> +	list_for_each_entry(vma, &o->vma_list, obj_link) {
>   		if (i915_is_ggtt(vma->vm) &&
>   		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
>   			continue;
> @@ -4494,7 +4494,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
>   	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
>   	struct i915_vma *vma;
>
> -	list_for_each_entry(vma, &o->vma_list, vma_link)
> +	list_for_each_entry(vma, &o->vma_list, obj_link)
>   		if (vma->vm == ggtt &&
>   		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
>   		    drm_mm_node_allocated(&vma->node))
> @@ -4507,7 +4507,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
>   {
>   	struct i915_vma *vma;
>
> -	list_for_each_entry(vma, &o->vma_list, vma_link)
> +	list_for_each_entry(vma, &o->vma_list, obj_link)
>   		if (drm_mm_node_allocated(&vma->node))
>   			return true;
>
> @@ -4524,7 +4524,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
>
>   	BUG_ON(list_empty(&o->vma_list));
>
> -	list_for_each_entry(vma, &o->vma_list, vma_link) {
> +	list_for_each_entry(vma, &o->vma_list, obj_link) {
>   		if (i915_is_ggtt(vma->vm) &&
>   		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
>   			continue;
> @@ -4537,7 +4537,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
>   bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
>   {
>   	struct i915_vma *vma;
> -	list_for_each_entry(vma, &obj->vma_list, vma_link)
> +	list_for_each_entry(vma, &obj->vma_list, obj_link)
>   		if (vma->pin_count > 0)
>   			return true;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 72b0875a95a4..05b4e0e85f24 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
>   		return;
>
>   	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
> -				 mm_list) {
> +				 vm_link) {
>   		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
>   			break;
>   	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 07c6e4d320c9..ea1f8d1bd228 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
>
>   search_again:
>   	/* First see if there is a large enough contiguous idle region... */
> -	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
> +	list_for_each_entry(vma, &vm->inactive_list, vm_link) {
>   		if (mark_free(vma, &unwind_list))
>   			goto found;
>   	}
> @@ -125,7 +125,7 @@ search_again:
>   		goto none;
>
>   	/* Now merge in the soon-to-be-expired objects... */
> -	list_for_each_entry(vma, &vm->active_list, mm_list) {
> +	list_for_each_entry(vma, &vm->active_list, vm_link) {
>   		if (mark_free(vma, &unwind_list))
>   			goto found;
>   	}
> @@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
>   		WARN_ON(!list_empty(&vm->active_list));
>   	}
>
> -	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
> +	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
>   		if (vma->pin_count == 0)
>   			WARN_ON(i915_vma_unbind(vma));
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index cddbd8c00663..6168182a87d8 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2736,7 +2736,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
>   		}
>   		vma->bound |= GLOBAL_BIND;
>   		__i915_vma_set_map_and_fenceable(vma);
> -		list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
> +		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
>   	}
>
>   	/* Clear any non-preallocated blocks */
> @@ -3221,7 +3221,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
>   	vm = &dev_priv->gtt.base;
>   	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
>   		flush = false;
> -		list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   			if (vma->vm != vm)
>   				continue;
>
> @@ -3277,8 +3277,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>   	if (vma == NULL)
>   		return ERR_PTR(-ENOMEM);
>
> -	INIT_LIST_HEAD(&vma->vma_link);
> -	INIT_LIST_HEAD(&vma->mm_list);
> +	INIT_LIST_HEAD(&vma->vm_link);
> +	INIT_LIST_HEAD(&vma->obj_link);
>   	INIT_LIST_HEAD(&vma->exec_list);
>   	vma->vm = vm;
>   	vma->obj = obj;
> @@ -3286,7 +3286,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>   	if (i915_is_ggtt(vm))
>   		vma->ggtt_view = *ggtt_view;
>
> -	list_add_tail(&vma->vma_link, &obj->vma_list);
> +	list_add_tail(&vma->obj_link, &obj->vma_list);
>   	if (!i915_is_ggtt(vm))
>   		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index b448ad832dcf..2497671d1e1a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -195,9 +195,9 @@ struct i915_vma {
>   	struct i915_ggtt_view ggtt_view;
>
>   	/** This object's place on the active/inactive lists */
> -	struct list_head mm_list;
> +	struct list_head vm_link;
>
> -	struct list_head vma_link; /* Link in the object's VMA list */
> +	struct list_head obj_link; /* Link in the object's VMA list */
>
>   	/** This vma's place in the batchbuffer or on the eviction list */
>   	struct list_head exec_list;
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 16da9c1422cc..777959b47ccf 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -52,7 +52,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
>   	struct i915_vma *vma;
>   	int count = 0;
>
> -	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   		if (drm_mm_node_allocated(&vma->node))
>   			count++;
>   		if (vma->pin_count)
> @@ -176,7 +176,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>
>   			/* For the unbound phase, this should be a no-op! */
>   			list_for_each_entry_safe(vma, v,
> -						 &obj->vma_list, vma_link)
> +						 &obj->vma_list, obj_link)
>   				if (i915_vma_unbind(vma))
>   					break;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index c384dc9c8a63..590e635cb65c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -692,7 +692,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>
>   		vma->bound |= GLOBAL_BIND;
>   		__i915_vma_set_map_and_fenceable(vma);
> -		list_add_tail(&vma->mm_list, &ggtt->inactive_list);
> +		list_add_tail(&vma->vm_link, &ggtt->inactive_list);
>   	}
>
>   	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index 251e81c4b0ea..2f3638d02bdd 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -81,7 +81,7 @@ static void __cancel_userptr__worker(struct work_struct *work)
>   		was_interruptible = dev_priv->mm.interruptible;
>   		dev_priv->mm.interruptible = false;
>
> -		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link)
> +		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
>   			WARN_ON(i915_vma_unbind(vma));
>   		WARN_ON(i915_gem_object_put_pages(obj));
>
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index c812079bc25c..706d956b6eb3 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -731,7 +731,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
>   	struct i915_vma *vma;
>   	int i = 0;
>
> -	list_for_each_entry(vma, head, mm_list) {
> +	list_for_each_entry(vma, head, vm_link) {
>   		capture_bo(err++, vma);
>   		if (++i == count)
>   			break;
> @@ -754,7 +754,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
>   		if (err == last)
>   			break;
>
> -		list_for_each_entry(vma, &obj->vma_list, vma_link)
> +		list_for_each_entry(vma, &obj->vma_list, obj_link)
>   			if (vma->vm == vm && vma->pin_count > 0)
>   				capture_bo(err++, vma);
>   	}
> @@ -1113,12 +1113,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
>   	int i;
>
>   	i = 0;
> -	list_for_each_entry(vma, &vm->active_list, mm_list)
> +	list_for_each_entry(vma, &vm->active_list, vm_link)
>   		i++;
>   	error->active_bo_count[ndx] = i;
>
>   	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> -		list_for_each_entry(vma, &obj->vma_list, vma_link)
> +		list_for_each_entry(vma, &obj->vma_list, obj_link)
>   			if (vma->vm == vm && vma->pin_count > 0)
>   				i++;
>   	}
>

Patch
diff mbox

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index efa9572fc217..f311df758195 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,9 +117,8 @@  static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 	u64 size = 0;
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (i915_is_ggtt(vma->vm) &&
-		    drm_mm_node_allocated(&vma->node))
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (i915_is_ggtt(vma->vm) && drm_mm_node_allocated(&vma->node))
 			size += vma->node.size;
 	}
 
@@ -155,7 +154,7 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (vma->pin_count > 0)
 			pin_count++;
 	}
@@ -164,7 +163,7 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (display)");
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
 			   i915_is_ggtt(vma->vm) ? "g" : "pp",
 			   vma->node.start, vma->node.size);
@@ -229,7 +228,7 @@  static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	}
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(vma, head, mm_list) {
+	list_for_each_entry(vma, head, vm_link) {
 		seq_printf(m, "   ");
 		describe_obj(m, vma->obj);
 		seq_printf(m, "\n");
@@ -341,7 +340,7 @@  static int per_file_stats(int id, void *ptr, void *data)
 		stats->shared += obj->base.size;
 
 	if (USES_FULL_PPGTT(obj->base.dev)) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			struct i915_hw_ppgtt *ppgtt;
 
 			if (!drm_mm_node_allocated(&vma->node))
@@ -453,12 +452,12 @@  static int i915_gem_object_info(struct seq_file *m, void* data)
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&vm->active_list, mm_list);
+	count_vmas(&vm->active_list, vm_link);
 	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&vm->inactive_list, mm_list);
+	count_vmas(&vm->inactive_list, vm_link);
 	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4eef13ebdaf3..e4d7c7f5aca2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,10 +128,10 @@  i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
 		if (vma->pin_count)
 			pinned += vma->node.size;
-	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
 		if (vma->pin_count)
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
@@ -261,7 +261,7 @@  drop_pages(struct drm_i915_gem_object *obj)
 	int ret;
 
 	drm_gem_object_reference(&obj->base);
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
 		if (i915_vma_unbind(vma))
 			break;
 
@@ -2038,7 +2038,7 @@  void i915_vma_move_to_active(struct i915_vma *vma,
 	obj->active |= intel_engine_flag(engine);
 
 	i915_gem_request_mark_active(req, &obj->last_read[engine->id]);
-	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
 static void
@@ -2079,9 +2079,9 @@  i915_gem_object_retire__read(struct i915_gem_active *active,
 	 */
 	list_move_tail(&obj->global_list, &request->i915->mm.bound_list);
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (!list_empty(&vma->mm_list))
-			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!list_empty(&vma->vm_link))
+			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	}
 
 	drm_gem_object_unreference(&obj->base);
@@ -2576,7 +2576,7 @@  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
-	if (list_empty(&vma->vma_link))
+	if (list_empty(&vma->obj_link))
 		return 0;
 
 	if (!drm_mm_node_allocated(&vma->node)) {
@@ -2610,7 +2610,7 @@  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	vma->vm->unbind_vma(vma);
 	vma->bound = 0;
 
-	list_del_init(&vma->mm_list);
+	list_del_init(&vma->vm_link);
 	if (i915_is_ggtt(vma->vm)) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 			obj->map_and_fenceable = false;
@@ -2864,7 +2864,7 @@  search_free:
 		goto err_remove_node;
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_add_tail(&vma->mm_list, &vm->inactive_list);
+	list_add_tail(&vma->vm_link, &vm->inactive_list);
 
 	return vma;
 
@@ -3029,7 +3029,7 @@  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	/* And bump the LRU for this access */
 	vma = i915_gem_obj_to_ggtt(obj);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
-		list_move_tail(&vma->mm_list,
+		list_move_tail(&vma->vm_link,
 			       &to_i915(obj->base.dev)->gtt.base.inactive_list);
 
 	return 0;
@@ -3064,7 +3064,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	 * catch the issue of the CS prefetch crossing page boundaries and
 	 * reading an invalid PTE on older architectures.
 	 */
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
@@ -3127,7 +3127,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			 */
 		}
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
@@ -3137,7 +3137,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		}
 	}
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		vma->node.color = cache_level;
 	obj->cache_level = cache_level;
 
@@ -3797,7 +3797,7 @@  void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
 	trace_i915_gem_object_destroy(obj);
 
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		int ret;
 
 		vma->pin_count = 0;
@@ -3854,7 +3854,7 @@  struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm)
 {
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
 		    vma->vm == vm)
 			return vma;
@@ -3871,7 +3871,7 @@  struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 	if (WARN_ONCE(!view, "no view specified"))
 		return ERR_PTR(-EINVAL);
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma;
@@ -3892,7 +3892,7 @@  void i915_gem_vma_destroy(struct i915_vma *vma)
 	if (!i915_is_ggtt(vm))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
 
-	list_del(&vma->vma_link);
+	list_del(&vma->obj_link);
 
 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 }
@@ -4444,7 +4444,7 @@  u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 
 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
+	list_for_each_entry(vma, &o->vma_list, obj_link) {
 		if (i915_is_ggtt(vma->vm) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
@@ -4463,7 +4463,7 @@  u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma->node.start;
@@ -4477,7 +4477,7 @@  bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
+	list_for_each_entry(vma, &o->vma_list, obj_link) {
 		if (i915_is_ggtt(vma->vm) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
@@ -4494,7 +4494,7 @@  bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
 		    drm_mm_node_allocated(&vma->node))
@@ -4507,7 +4507,7 @@  bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (drm_mm_node_allocated(&vma->node))
 			return true;
 
@@ -4524,7 +4524,7 @@  unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 
 	BUG_ON(list_empty(&o->vma_list));
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
+	list_for_each_entry(vma, &o->vma_list, obj_link) {
 		if (i915_is_ggtt(vma->vm) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
@@ -4537,7 +4537,7 @@  unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		if (vma->pin_count > 0)
 			return true;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72b0875a95a4..05b4e0e85f24 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -142,7 +142,7 @@  static void i915_gem_context_clean(struct intel_context *ctx)
 		return;
 
 	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
-				 mm_list) {
+				 vm_link) {
 		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
 			break;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 07c6e4d320c9..ea1f8d1bd228 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -116,7 +116,7 @@  i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 
 search_again:
 	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+	list_for_each_entry(vma, &vm->inactive_list, vm_link) {
 		if (mark_free(vma, &unwind_list))
 			goto found;
 	}
@@ -125,7 +125,7 @@  search_again:
 		goto none;
 
 	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(vma, &vm->active_list, mm_list) {
+	list_for_each_entry(vma, &vm->active_list, vm_link) {
 		if (mark_free(vma, &unwind_list))
 			goto found;
 	}
@@ -270,7 +270,7 @@  int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 		WARN_ON(!list_empty(&vm->active_list));
 	}
 
-	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
 		if (vma->pin_count == 0)
 			WARN_ON(i915_vma_unbind(vma));
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cddbd8c00663..6168182a87d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2736,7 +2736,7 @@  static int i915_gem_setup_global_gtt(struct drm_device *dev,
 		}
 		vma->bound |= GLOBAL_BIND;
 		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
 	}
 
 	/* Clear any non-preallocated blocks */
@@ -3221,7 +3221,7 @@  void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	vm = &dev_priv->gtt.base;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		flush = false;
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (vma->vm != vm)
 				continue;
 
@@ -3277,8 +3277,8 @@  __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->vma_link);
-	INIT_LIST_HEAD(&vma->mm_list);
+	INIT_LIST_HEAD(&vma->vm_link);
+	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
 	vma->vm = vm;
 	vma->obj = obj;
@@ -3286,7 +3286,7 @@  __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (i915_is_ggtt(vm))
 		vma->ggtt_view = *ggtt_view;
 
-	list_add_tail(&vma->vma_link, &obj->vma_list);
+	list_add_tail(&vma->obj_link, &obj->vma_list);
 	if (!i915_is_ggtt(vm))
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b448ad832dcf..2497671d1e1a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -195,9 +195,9 @@  struct i915_vma {
 	struct i915_ggtt_view ggtt_view;
 
 	/** This object's place on the active/inactive lists */
-	struct list_head mm_list;
+	struct list_head vm_link;
 
-	struct list_head vma_link; /* Link in the object's VMA list */
+	struct list_head obj_link; /* Link in the object's VMA list */
 
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 16da9c1422cc..777959b47ccf 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -52,7 +52,7 @@  static int num_vma_bound(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 	int count = 0;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (drm_mm_node_allocated(&vma->node))
 			count++;
 		if (vma->pin_count)
@@ -176,7 +176,7 @@  i915_gem_shrink(struct drm_i915_private *dev_priv,
 
 			/* For the unbound phase, this should be a no-op! */
 			list_for_each_entry_safe(vma, v,
-						 &obj->vma_list, vma_link)
+						 &obj->vma_list, obj_link)
 				if (i915_vma_unbind(vma))
 					break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index c384dc9c8a63..590e635cb65c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -692,7 +692,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
 		vma->bound |= GLOBAL_BIND;
 		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+		list_add_tail(&vma->vm_link, &ggtt->inactive_list);
 	}
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 251e81c4b0ea..2f3638d02bdd 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -81,7 +81,7 @@  static void __cancel_userptr__worker(struct work_struct *work)
 		was_interruptible = dev_priv->mm.interruptible;
 		dev_priv->mm.interruptible = false;
 
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link)
+		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
 			WARN_ON(i915_vma_unbind(vma));
 		WARN_ON(i915_gem_object_put_pages(obj));
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index c812079bc25c..706d956b6eb3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -731,7 +731,7 @@  static u32 capture_active_bo(struct drm_i915_error_buffer *err,
 	struct i915_vma *vma;
 	int i = 0;
 
-	list_for_each_entry(vma, head, mm_list) {
+	list_for_each_entry(vma, head, vm_link) {
 		capture_bo(err++, vma);
 		if (++i == count)
 			break;
@@ -754,7 +754,7 @@  static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 		if (err == last)
 			break;
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (vma->vm == vm && vma->pin_count > 0)
 				capture_bo(err++, vma);
 	}
@@ -1113,12 +1113,12 @@  static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 	int i;
 
 	i = 0;
-	list_for_each_entry(vma, &vm->active_list, mm_list)
+	list_for_each_entry(vma, &vm->active_list, vm_link)
 		i++;
 	error->active_bo_count[ndx] = i;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (vma->vm == vm && vma->pin_count > 0)
 				i++;
 	}