diff mbox

[6/6] drm/i915: Create VMAs

Message ID 1374088743-23250-2-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky July 17, 2013, 7:19 p.m. UTC
Formerly: "drm/i915: Create VMAs (part 1)"

In a previous patch, the notion of a VM was introduced. A VMA describes
an area of part of the VM address space. A VMA is similar to the concept
in the linux mm. However, instead of representing regular memory, a VMA
is backed by a GEM BO. There may be many VMAs for a given object, one
for each VM the object is to be used in. This may occur through flink,
dma-buf, or a number of other transient states.

Currently the code depends on only 1 VMA per object, for the global GTT
(and aliasing PPGTT). The following patches will address this and make
the rest of the infrastructure more suited

v2: s/i915_obj/i915_gem_obj (Chris)

v3: Only move an object to the now global unbound list if there are no
more VMAs for the object which are bound into a VM (ie. the list is
empty).

v4: killed obj->gtt_space
some reworks due to rebase

v5: Free vma on error path (Imre)

v6: Another missed vma free in i915_gem_object_bind_to_gtt error path
(Imre)
Fixed vma freeing in stolen preallocation (Imre)

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h        | 48 +++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem.c        | 74 +++++++++++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_gem_evict.c  | 12 ++++--
 drivers/gpu/drm/i915/i915_gem_gtt.c    |  5 ++-
 drivers/gpu/drm/i915/i915_gem_stolen.c | 15 +++++--
 5 files changed, 120 insertions(+), 34 deletions(-)

Comments

Daniel Vetter July 17, 2013, 8:27 p.m. UTC | #1
On Wed, Jul 17, 2013 at 12:19:03PM -0700, Ben Widawsky wrote:
> Formerly: "drm/i915: Create VMAs (part 1)"
> 
> In a previous patch, the notion of a VM was introduced. A VMA describes
> an area of part of the VM address space. A VMA is similar to the concept
> in the linux mm. However, instead of representing regular memory, a VMA
> is backed by a GEM BO. There may be many VMAs for a given object, one
> for each VM the object is to be used in. This may occur through flink,
> dma-buf, or a number of other transient states.
> 
> Currently the code depends on only 1 VMA per object, for the global GTT
> (and aliasing PPGTT). The following patches will address this and make
> the rest of the infrastructure more suited
> 
> v2: s/i915_obj/i915_gem_obj (Chris)
> 
> v3: Only move an object to the now global unbound list if there are no
> more VMAs for the object which are bound into a VM (ie. the list is
> empty).
> 
> v4: killed obj->gtt_space
> some reworks due to rebase
> 
> v5: Free vma on error path (Imre)
> 
> v6: Another missed vma free in i915_gem_object_bind_to_gtt error path
> (Imre)
> Fixed vma freeing in stolen preallocation (Imre)
> 
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>

Entire series merged to dinq, thanks a lot for the patches and review. On
to the next step in this journey then!

Cheers, Daniel

> ---
>  drivers/gpu/drm/i915/i915_drv.h        | 48 +++++++++++++++++-----
>  drivers/gpu/drm/i915/i915_gem.c        | 74 +++++++++++++++++++++++++++-------
>  drivers/gpu/drm/i915/i915_gem_evict.c  | 12 ++++--
>  drivers/gpu/drm/i915/i915_gem_gtt.c    |  5 ++-
>  drivers/gpu/drm/i915/i915_gem_stolen.c | 15 +++++--
>  5 files changed, 120 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index b3ba428..1a32412 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -533,6 +533,17 @@ struct i915_hw_ppgtt {
>  	int (*enable)(struct drm_device *dev);
>  };
>  
> +/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
> + * will always be <= an objects lifetime. So object refcounting should cover us.
> + */
> +struct i915_vma {
> +	struct drm_mm_node node;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_address_space *vm;
> +
> +	struct list_head vma_link; /* Link in the object's VMA list */
> +};
> +
>  struct i915_ctx_hang_stats {
>  	/* This context had batch pending when hang was declared */
>  	unsigned batch_pending;
> @@ -1229,8 +1240,9 @@ struct drm_i915_gem_object {
>  
>  	const struct drm_i915_gem_object_ops *ops;
>  
> -	/** Current space allocated to this object in the GTT, if any. */
> -	struct drm_mm_node gtt_space;
> +	/** List of VMAs backed by this object */
> +	struct list_head vma_list;
> +
>  	/** Stolen memory for this object, instead of being backed by shmem. */
>  	struct drm_mm_node *stolen;
>  	struct list_head global_list;
> @@ -1356,18 +1368,32 @@ struct drm_i915_gem_object {
>  
>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>  
> -/* Offset of the first PTE pointing to this object */
> -static inline unsigned long
> -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> +/* This is a temporary define to help transition us to real VMAs. If you see
> + * this, you're either reviewing code, or bisecting it. */
> +static inline struct i915_vma *
> +__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
>  {
> -	return o->gtt_space.start;
> +	if (list_empty(&obj->vma_list))
> +		return NULL;
> +	return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
>  }
>  
>  /* Whether or not this object is currently mapped by the translation tables */
>  static inline bool
>  i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
>  {
> -	return drm_mm_node_allocated(&o->gtt_space);
> +	struct i915_vma *vma = __i915_gem_obj_to_vma(o);
> +	if (vma == NULL)
> +		return false;
> +	return drm_mm_node_allocated(&vma->node);
> +}
> +
> +/* Offset of the first PTE pointing to this object */
> +static inline unsigned long
> +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> +{
> +	BUG_ON(list_empty(&o->vma_list));
> +	return __i915_gem_obj_to_vma(o)->node.start;
>  }
>  
>  /* The size used in the translation tables may be larger than the actual size of
> @@ -1377,14 +1403,15 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
>  static inline unsigned long
>  i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
>  {
> -	return o->gtt_space.size;
> +	BUG_ON(list_empty(&o->vma_list));
> +	return __i915_gem_obj_to_vma(o)->node.size;
>  }
>  
>  static inline void
>  i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
>  			    enum i915_cache_level color)
>  {
> -	o->gtt_space.color = color;
> +	__i915_gem_obj_to_vma(o)->node.color = color;
>  }
>  
>  /**
> @@ -1691,6 +1718,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
>  						  size_t size);
>  void i915_gem_free_object(struct drm_gem_object *obj);
> +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
> +				     struct i915_address_space *vm);
> +void i915_gem_vma_destroy(struct i915_vma *vma);
>  
>  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
>  				     uint32_t alignment,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 812275a..fe7ee32 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2580,6 +2580,7 @@ int
>  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  {
>  	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	if (!i915_gem_obj_ggtt_bound(obj))
> @@ -2617,11 +2618,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  	i915_gem_object_unpin_pages(obj);
>  
>  	list_del(&obj->mm_list);
> -	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>  	/* Avoid an unnecessary call to unbind on rebind. */
>  	obj->map_and_fenceable = true;
>  
> -	drm_mm_remove_node(&obj->gtt_space);
> +	vma = __i915_gem_obj_to_vma(obj);
> +	list_del(&vma->vma_link);
> +	drm_mm_remove_node(&vma->node);
> +	i915_gem_vma_destroy(vma);
> +
> +	/* Since the unbound list is global, only move to that list if
> +	 * no more VMAs exist.
> +	 * NB: Until we have real VMAs there will only ever be one */
> +	WARN_ON(!list_empty(&obj->vma_list));
> +	if (list_empty(&obj->vma_list))
> +		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>  
>  	return 0;
>  }
> @@ -3051,8 +3061,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>  	bool mappable, fenceable;
>  	size_t gtt_max = map_and_fenceable ?
>  		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
> +	struct i915_vma *vma;
>  	int ret;
>  
> +	if (WARN_ON(!list_empty(&obj->vma_list)))
> +		return -EBUSY;
> +
>  	fence_size = i915_gem_get_gtt_size(dev,
>  					   obj->base.size,
>  					   obj->tiling_mode);
> @@ -3091,9 +3105,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>  
>  	i915_gem_object_pin_pages(obj);
>  
> +	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
> +	if (vma == NULL) {
> +		i915_gem_object_unpin_pages(obj);
> +		return -ENOMEM;
> +	}
> +
>  search_free:
>  	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
> -						  &obj->gtt_space,
> +						  &vma->node,
>  						  size, alignment,
>  						  obj->cache_level, 0, gtt_max);
>  	if (ret) {
> @@ -3104,25 +3124,21 @@ search_free:
>  		if (ret == 0)
>  			goto search_free;
>  
> -		i915_gem_object_unpin_pages(obj);
> -		return ret;
> +		goto err_out;
>  	}
> -	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
> +	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
>  					      obj->cache_level))) {
> -		i915_gem_object_unpin_pages(obj);
> -		drm_mm_remove_node(&obj->gtt_space);
> -		return -EINVAL;
> +		ret = -EINVAL;
> +		goto err_out;
>  	}
>  
>  	ret = i915_gem_gtt_prepare_object(obj);
> -	if (ret) {
> -		i915_gem_object_unpin_pages(obj);
> -		drm_mm_remove_node(&obj->gtt_space);
> -		return ret;
> -	}
> +	if (ret)
> +		goto err_out;
>  
>  	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
>  	list_add_tail(&obj->mm_list, &vm->inactive_list);
> +	list_add(&vma->vma_link, &obj->vma_list);
>  
>  	fenceable =
>  		i915_gem_obj_ggtt_size(obj) == fence_size &&
> @@ -3136,6 +3152,12 @@ search_free:
>  	trace_i915_gem_object_bind(obj, map_and_fenceable);
>  	i915_gem_verify_gtt(dev);
>  	return 0;
> +
> +err_out:
> +	i915_gem_vma_destroy(vma);
> +	i915_gem_object_unpin_pages(obj);
> +	drm_mm_remove_node(&vma->node);
> +	return ret;
>  }
>  
>  void
> @@ -3281,6 +3303,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>  {
>  	struct drm_device *dev = obj->base.dev;
>  	drm_i915_private_t *dev_priv = dev->dev_private;
> +	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
>  	int ret;
>  
>  	if (obj->cache_level == cache_level)
> @@ -3291,7 +3314,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>  		return -EBUSY;
>  	}
>  
> -	if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
> +	if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
>  		ret = i915_gem_object_unbind(obj);
>  		if (ret)
>  			return ret;
> @@ -3836,6 +3859,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>  	INIT_LIST_HEAD(&obj->global_list);
>  	INIT_LIST_HEAD(&obj->ring_list);
>  	INIT_LIST_HEAD(&obj->exec_list);
> +	INIT_LIST_HEAD(&obj->vma_list);
>  
>  	obj->ops = ops;
>  
> @@ -3956,6 +3980,26 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  	i915_gem_object_free(obj);
>  }
>  
> +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
> +				     struct i915_address_space *vm)
> +{
> +	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
> +	if (vma == NULL)
> +		return ERR_PTR(-ENOMEM);
> +
> +	INIT_LIST_HEAD(&vma->vma_link);
> +	vma->vm = vm;
> +	vma->obj = obj;
> +
> +	return vma;
> +}
> +
> +void i915_gem_vma_destroy(struct i915_vma *vma)
> +{
> +	WARN_ON(vma->node.allocated);
> +	kfree(vma);
> +}
> +
>  int
>  i915_gem_idle(struct drm_device *dev)
>  {
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 43b8235..df61f33 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -34,11 +34,13 @@
>  static bool
>  mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
>  {
> +	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
> +
>  	if (obj->pin_count)
>  		return false;
>  
>  	list_add(&obj->exec_list, unwind);
> -	return drm_mm_scan_add_block(&obj->gtt_space);
> +	return drm_mm_scan_add_block(&vma->node);
>  }
>  
>  int
> @@ -49,6 +51,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
>  	drm_i915_private_t *dev_priv = dev->dev_private;
>  	struct i915_address_space *vm = &dev_priv->gtt.base;
>  	struct list_head eviction_list, unwind_list;
> +	struct i915_vma *vma;
>  	struct drm_i915_gem_object *obj;
>  	int ret = 0;
>  
> @@ -106,8 +109,8 @@ none:
>  		obj = list_first_entry(&unwind_list,
>  				       struct drm_i915_gem_object,
>  				       exec_list);
> -
> -		ret = drm_mm_scan_remove_block(&obj->gtt_space);
> +		vma = __i915_gem_obj_to_vma(obj);
> +		ret = drm_mm_scan_remove_block(&vma->node);
>  		BUG_ON(ret);
>  
>  		list_del_init(&obj->exec_list);
> @@ -127,7 +130,8 @@ found:
>  		obj = list_first_entry(&unwind_list,
>  				       struct drm_i915_gem_object,
>  				       exec_list);
> -		if (drm_mm_scan_remove_block(&obj->gtt_space)) {
> +		vma = __i915_gem_obj_to_vma(obj);
> +		if (drm_mm_scan_remove_block(&vma->node)) {
>  			list_move(&obj->exec_list, &eviction_list);
>  			drm_gem_object_reference(&obj->base);
>  			continue;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 999ecfe..3b639a9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -662,16 +662,17 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
>  
>  	/* Mark any preallocated objects as occupied */
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> +		struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
>  		int ret;
>  		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
>  			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
>  
>  		WARN_ON(i915_gem_obj_ggtt_bound(obj));
> -		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
> -					  &obj->gtt_space);
> +		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
>  		if (ret)
>  			DRM_DEBUG_KMS("Reservation failed\n");
>  		obj->has_global_gtt_mapping = 1;
> +		list_add(&vma->vma_link, &obj->vma_list);
>  	}
>  
>  	dev_priv->gtt.base.start = start;
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index a893834..f526136 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -354,6 +354,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>  	struct i915_address_space *vm = &dev_priv->gtt.base;
>  	struct drm_i915_gem_object *obj;
>  	struct drm_mm_node *stolen;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	if (!drm_mm_initialized(&dev_priv->mm.stolen))
> @@ -393,18 +394,24 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>  	if (gtt_offset == I915_GTT_OFFSET_NONE)
>  		return obj;
>  
> +	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
> +	if (!vma) {
> +		ret = -ENOMEM;
> +		goto err_out;
> +	}
> +
>  	/* To simplify the initialisation sequence between KMS and GTT,
>  	 * we allow construction of the stolen object prior to
>  	 * setting up the GTT space. The actual reservation will occur
>  	 * later.
>  	 */
> -	obj->gtt_space.start = gtt_offset;
> -	obj->gtt_space.size = size;
> +	vma->node.start = gtt_offset;
> +	vma->node.size = size;
>  	if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
> -		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
> -					  &obj->gtt_space);
> +		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
>  		if (ret) {
>  			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> +			i915_gem_vma_destroy(vma);
>  			goto err_out;
>  		}
>  	}
> -- 
> 1.8.3.3
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson July 18, 2013, 12:12 a.m. UTC | #2
On Wed, Jul 17, 2013 at 12:19:03PM -0700, Ben Widawsky wrote:
> Formerly: "drm/i915: Create VMAs (part 1)"
> 
> In a previous patch, the notion of a VM was introduced. A VMA describes
> an area of part of the VM address space. A VMA is similar to the concept
> in the linux mm. However, instead of representing regular memory, a VMA
> is backed by a GEM BO. There may be many VMAs for a given object, one
> for each VM the object is to be used in. This may occur through flink,
> dma-buf, or a number of other transient states.
> 
> Currently the code depends on only 1 VMA per object, for the global GTT
> (and aliasing PPGTT). The following patches will address this and make
> the rest of the infrastructure more suited
> 
> v2: s/i915_obj/i915_gem_obj (Chris)
> 
> v3: Only move an object to the now global unbound list if there are no
> more VMAs for the object which are bound into a VM (ie. the list is
> empty).
> 
> v4: killed obj->gtt_space
> some reworks due to rebase
> 
> v5: Free vma on error path (Imre)
> 
> v6: Another missed vma free in i915_gem_object_bind_to_gtt error path
> (Imre)
> Fixed vma freeing in stolen preallocation (Imre)

Big-bada-boom; set-cache-level needs to iterate over vma, and in
particular should not dereference a non-existent one. Or if we decided
that set-cache-level was a ggtt only property, just not explode if there
is no global vma.
-Chris
Ben Widawsky July 18, 2013, 2:31 a.m. UTC | #3
On Thu, Jul 18, 2013 at 01:12:17AM +0100, Chris Wilson wrote:
> On Wed, Jul 17, 2013 at 12:19:03PM -0700, Ben Widawsky wrote:
> > Formerly: "drm/i915: Create VMAs (part 1)"
> > 
> > In a previous patch, the notion of a VM was introduced. A VMA describes
> > an area of part of the VM address space. A VMA is similar to the concept
> > in the linux mm. However, instead of representing regular memory, a VMA
> > is backed by a GEM BO. There may be many VMAs for a given object, one
> > for each VM the object is to be used in. This may occur through flink,
> > dma-buf, or a number of other transient states.
> > 
> > Currently the code depends on only 1 VMA per object, for the global GTT
> > (and aliasing PPGTT). The following patches will address this and make
> > the rest of the infrastructure more suited
> > 
> > v2: s/i915_obj/i915_gem_obj (Chris)
> > 
> > v3: Only move an object to the now global unbound list if there are no
> > more VMAs for the object which are bound into a VM (ie. the list is
> > empty).
> > 
> > v4: killed obj->gtt_space
> > some reworks due to rebase
> > 
> > v5: Free vma on error path (Imre)
> > 
> > v6: Another missed vma free in i915_gem_object_bind_to_gtt error path
> > (Imre)
> > Fixed vma freeing in stolen preallocation (Imre)
> 
> Big-bada-boom;

Just from looking at the code I think I see a bug. A bug which didn't
exist in the original version of the code, and doesn't exist after the
very next patch in the overall series.

Now I am terribly curious - why in the world (if that's indeed the bug)
can I not seem to hit this locally on my machine? I'll send the patch
for the fix now, but I'd really like to know what's different in our
setup. I've tried, UXA, SNA, and the igt test suite...

> set-cache-level needs to iterate over vma, and in
> particular should not dereference a non-existent one. Or if we decided
> that set-cache-level was a ggtt only property, just not explode if there
> is no global vma.
> -Chris

The current state is that cache level is still per object, but this
function itself will iterate over all VMAs. As I said, it happens in the
very next patch in the series. In the original patch series cover
letter, I made cache levels per VMA an optional TODO. For the time
being, I don't see much benefit.

> 
> -- 
> Chris Wilson, Intel Open Source Technology Centre
Chris Wilson July 18, 2013, 8:19 a.m. UTC | #4
On Wed, Jul 17, 2013 at 07:31:37PM -0700, Ben Widawsky wrote:
> On Thu, Jul 18, 2013 at 01:12:17AM +0100, Chris Wilson wrote:
> > Big-bada-boom;
> 
> Just from looking at the code I think I see a bug. A bug which didn't
> exist in the original version of the code, and doesn't exist after the
> very next patch in the overall series.

Indeed, that was the bug.
 
> Now I am terribly curious - why in the world (if that's indeed the bug)
> can I not seem to hit this locally on my machine? I'll send the patch
> for the fix now, but I'd really like to know what's different in our
> setup. I've tried, UXA, SNA, and the igt test suite...

Just requires testing on the forgotten non-LLC generations. Or Baytrail.
-Chris
Imre Deak July 18, 2013, 12:08 p.m. UTC | #5
On Wed, 2013-07-17 at 12:19 -0700, Ben Widawsky wrote:
> Formerly: "drm/i915: Create VMAs (part 1)"
> 
> In a previous patch, the notion of a VM was introduced. A VMA describes
> an area of part of the VM address space. A VMA is similar to the concept
> in the linux mm. However, instead of representing regular memory, a VMA
> is backed by a GEM BO. There may be many VMAs for a given object, one
> for each VM the object is to be used in. This may occur through flink,
> dma-buf, or a number of other transient states.
> 
> Currently the code depends on only 1 VMA per object, for the global GTT
> (and aliasing PPGTT). The following patches will address this and make
> the rest of the infrastructure more suited
> 
> v2: s/i915_obj/i915_gem_obj (Chris)
> 
> v3: Only move an object to the now global unbound list if there are no
> more VMAs for the object which are bound into a VM (ie. the list is
> empty).
> 
> v4: killed obj->gtt_space
> some reworks due to rebase
> 
> v5: Free vma on error path (Imre)
> 
> v6: Another missed vma free in i915_gem_object_bind_to_gtt error path
> (Imre)
> Fixed vma freeing in stolen preallocation (Imre)
> 
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>

Looks ok, so on patches 5-6:
Reviewed-by: Imre Deak <imre.deak@intel.com>

> ---
>  drivers/gpu/drm/i915/i915_drv.h        | 48 +++++++++++++++++-----
>  drivers/gpu/drm/i915/i915_gem.c        | 74 +++++++++++++++++++++++++++-------
>  drivers/gpu/drm/i915/i915_gem_evict.c  | 12 ++++--
>  drivers/gpu/drm/i915/i915_gem_gtt.c    |  5 ++-
>  drivers/gpu/drm/i915/i915_gem_stolen.c | 15 +++++--
>  5 files changed, 120 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index b3ba428..1a32412 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -533,6 +533,17 @@ struct i915_hw_ppgtt {
>  	int (*enable)(struct drm_device *dev);
>  };
>  
> +/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
> + * will always be <= an objects lifetime. So object refcounting should cover us.
> + */
> +struct i915_vma {
> +	struct drm_mm_node node;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_address_space *vm;
> +
> +	struct list_head vma_link; /* Link in the object's VMA list */
> +};
> +
>  struct i915_ctx_hang_stats {
>  	/* This context had batch pending when hang was declared */
>  	unsigned batch_pending;
> @@ -1229,8 +1240,9 @@ struct drm_i915_gem_object {
>  
>  	const struct drm_i915_gem_object_ops *ops;
>  
> -	/** Current space allocated to this object in the GTT, if any. */
> -	struct drm_mm_node gtt_space;
> +	/** List of VMAs backed by this object */
> +	struct list_head vma_list;
> +
>  	/** Stolen memory for this object, instead of being backed by shmem. */
>  	struct drm_mm_node *stolen;
>  	struct list_head global_list;
> @@ -1356,18 +1368,32 @@ struct drm_i915_gem_object {
>  
>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>  
> -/* Offset of the first PTE pointing to this object */
> -static inline unsigned long
> -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> +/* This is a temporary define to help transition us to real VMAs. If you see
> + * this, you're either reviewing code, or bisecting it. */
> +static inline struct i915_vma *
> +__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
>  {
> -	return o->gtt_space.start;
> +	if (list_empty(&obj->vma_list))
> +		return NULL;
> +	return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
>  }
>  
>  /* Whether or not this object is currently mapped by the translation tables */
>  static inline bool
>  i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
>  {
> -	return drm_mm_node_allocated(&o->gtt_space);
> +	struct i915_vma *vma = __i915_gem_obj_to_vma(o);
> +	if (vma == NULL)
> +		return false;
> +	return drm_mm_node_allocated(&vma->node);
> +}
> +
> +/* Offset of the first PTE pointing to this object */
> +static inline unsigned long
> +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> +{
> +	BUG_ON(list_empty(&o->vma_list));
> +	return __i915_gem_obj_to_vma(o)->node.start;
>  }
>  
>  /* The size used in the translation tables may be larger than the actual size of
> @@ -1377,14 +1403,15 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
>  static inline unsigned long
>  i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
>  {
> -	return o->gtt_space.size;
> +	BUG_ON(list_empty(&o->vma_list));
> +	return __i915_gem_obj_to_vma(o)->node.size;
>  }
>  
>  static inline void
>  i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
>  			    enum i915_cache_level color)
>  {
> -	o->gtt_space.color = color;
> +	__i915_gem_obj_to_vma(o)->node.color = color;
>  }
>  
>  /**
> @@ -1691,6 +1718,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
>  						  size_t size);
>  void i915_gem_free_object(struct drm_gem_object *obj);
> +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
> +				     struct i915_address_space *vm);
> +void i915_gem_vma_destroy(struct i915_vma *vma);
>  
>  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
>  				     uint32_t alignment,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 812275a..fe7ee32 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2580,6 +2580,7 @@ int
>  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  {
>  	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	if (!i915_gem_obj_ggtt_bound(obj))
> @@ -2617,11 +2618,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  	i915_gem_object_unpin_pages(obj);
>  
>  	list_del(&obj->mm_list);
> -	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>  	/* Avoid an unnecessary call to unbind on rebind. */
>  	obj->map_and_fenceable = true;
>  
> -	drm_mm_remove_node(&obj->gtt_space);
> +	vma = __i915_gem_obj_to_vma(obj);
> +	list_del(&vma->vma_link);
> +	drm_mm_remove_node(&vma->node);
> +	i915_gem_vma_destroy(vma);
> +
> +	/* Since the unbound list is global, only move to that list if
> +	 * no more VMAs exist.
> +	 * NB: Until we have real VMAs there will only ever be one */
> +	WARN_ON(!list_empty(&obj->vma_list));
> +	if (list_empty(&obj->vma_list))
> +		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>  
>  	return 0;
>  }
> @@ -3051,8 +3061,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>  	bool mappable, fenceable;
>  	size_t gtt_max = map_and_fenceable ?
>  		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
> +	struct i915_vma *vma;
>  	int ret;
>  
> +	if (WARN_ON(!list_empty(&obj->vma_list)))
> +		return -EBUSY;
> +
>  	fence_size = i915_gem_get_gtt_size(dev,
>  					   obj->base.size,
>  					   obj->tiling_mode);
> @@ -3091,9 +3105,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>  
>  	i915_gem_object_pin_pages(obj);
>  
> +	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
> +	if (vma == NULL) {
> +		i915_gem_object_unpin_pages(obj);
> +		return -ENOMEM;
> +	}
> +
>  search_free:
>  	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
> -						  &obj->gtt_space,
> +						  &vma->node,
>  						  size, alignment,
>  						  obj->cache_level, 0, gtt_max);
>  	if (ret) {
> @@ -3104,25 +3124,21 @@ search_free:
>  		if (ret == 0)
>  			goto search_free;
>  
> -		i915_gem_object_unpin_pages(obj);
> -		return ret;
> +		goto err_out;
>  	}
> -	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
> +	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
>  					      obj->cache_level))) {
> -		i915_gem_object_unpin_pages(obj);
> -		drm_mm_remove_node(&obj->gtt_space);
> -		return -EINVAL;
> +		ret = -EINVAL;
> +		goto err_out;
>  	}
>  
>  	ret = i915_gem_gtt_prepare_object(obj);
> -	if (ret) {
> -		i915_gem_object_unpin_pages(obj);
> -		drm_mm_remove_node(&obj->gtt_space);
> -		return ret;
> -	}
> +	if (ret)
> +		goto err_out;
>  
>  	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
>  	list_add_tail(&obj->mm_list, &vm->inactive_list);
> +	list_add(&vma->vma_link, &obj->vma_list);
>  
>  	fenceable =
>  		i915_gem_obj_ggtt_size(obj) == fence_size &&
> @@ -3136,6 +3152,12 @@ search_free:
>  	trace_i915_gem_object_bind(obj, map_and_fenceable);
>  	i915_gem_verify_gtt(dev);
>  	return 0;
> +
> +err_out:
> +	i915_gem_vma_destroy(vma);
> +	i915_gem_object_unpin_pages(obj);
> +	drm_mm_remove_node(&vma->node);
> +	return ret;
>  }
>  
>  void
> @@ -3281,6 +3303,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>  {
>  	struct drm_device *dev = obj->base.dev;
>  	drm_i915_private_t *dev_priv = dev->dev_private;
> +	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
>  	int ret;
>  
>  	if (obj->cache_level == cache_level)
> @@ -3291,7 +3314,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
>  		return -EBUSY;
>  	}
>  
> -	if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
> +	if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
>  		ret = i915_gem_object_unbind(obj);
>  		if (ret)
>  			return ret;
> @@ -3836,6 +3859,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>  	INIT_LIST_HEAD(&obj->global_list);
>  	INIT_LIST_HEAD(&obj->ring_list);
>  	INIT_LIST_HEAD(&obj->exec_list);
> +	INIT_LIST_HEAD(&obj->vma_list);
>  
>  	obj->ops = ops;
>  
> @@ -3956,6 +3980,26 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  	i915_gem_object_free(obj);
>  }
>  
> +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
> +				     struct i915_address_space *vm)
> +{
> +	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
> +	if (vma == NULL)
> +		return ERR_PTR(-ENOMEM);
> +
> +	INIT_LIST_HEAD(&vma->vma_link);
> +	vma->vm = vm;
> +	vma->obj = obj;
> +
> +	return vma;
> +}
> +
> +void i915_gem_vma_destroy(struct i915_vma *vma)
> +{
> +	WARN_ON(vma->node.allocated);
> +	kfree(vma);
> +}
> +
>  int
>  i915_gem_idle(struct drm_device *dev)
>  {
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 43b8235..df61f33 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -34,11 +34,13 @@
>  static bool
>  mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
>  {
> +	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
> +
>  	if (obj->pin_count)
>  		return false;
>  
>  	list_add(&obj->exec_list, unwind);
> -	return drm_mm_scan_add_block(&obj->gtt_space);
> +	return drm_mm_scan_add_block(&vma->node);
>  }
>  
>  int
> @@ -49,6 +51,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
>  	drm_i915_private_t *dev_priv = dev->dev_private;
>  	struct i915_address_space *vm = &dev_priv->gtt.base;
>  	struct list_head eviction_list, unwind_list;
> +	struct i915_vma *vma;
>  	struct drm_i915_gem_object *obj;
>  	int ret = 0;
>  
> @@ -106,8 +109,8 @@ none:
>  		obj = list_first_entry(&unwind_list,
>  				       struct drm_i915_gem_object,
>  				       exec_list);
> -
> -		ret = drm_mm_scan_remove_block(&obj->gtt_space);
> +		vma = __i915_gem_obj_to_vma(obj);
> +		ret = drm_mm_scan_remove_block(&vma->node);
>  		BUG_ON(ret);
>  
>  		list_del_init(&obj->exec_list);
> @@ -127,7 +130,8 @@ found:
>  		obj = list_first_entry(&unwind_list,
>  				       struct drm_i915_gem_object,
>  				       exec_list);
> -		if (drm_mm_scan_remove_block(&obj->gtt_space)) {
> +		vma = __i915_gem_obj_to_vma(obj);
> +		if (drm_mm_scan_remove_block(&vma->node)) {
>  			list_move(&obj->exec_list, &eviction_list);
>  			drm_gem_object_reference(&obj->base);
>  			continue;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 999ecfe..3b639a9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -662,16 +662,17 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
>  
>  	/* Mark any preallocated objects as occupied */
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> +		struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
>  		int ret;
>  		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
>  			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
>  
>  		WARN_ON(i915_gem_obj_ggtt_bound(obj));
> -		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
> -					  &obj->gtt_space);
> +		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
>  		if (ret)
>  			DRM_DEBUG_KMS("Reservation failed\n");
>  		obj->has_global_gtt_mapping = 1;
> +		list_add(&vma->vma_link, &obj->vma_list);
>  	}
>  
>  	dev_priv->gtt.base.start = start;
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index a893834..f526136 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -354,6 +354,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>  	struct i915_address_space *vm = &dev_priv->gtt.base;
>  	struct drm_i915_gem_object *obj;
>  	struct drm_mm_node *stolen;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	if (!drm_mm_initialized(&dev_priv->mm.stolen))
> @@ -393,18 +394,24 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>  	if (gtt_offset == I915_GTT_OFFSET_NONE)
>  		return obj;
>  
> +	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
> +	if (!vma) {
> +		ret = -ENOMEM;
> +		goto err_out;
> +	}
> +
>  	/* To simplify the initialisation sequence between KMS and GTT,
>  	 * we allow construction of the stolen object prior to
>  	 * setting up the GTT space. The actual reservation will occur
>  	 * later.
>  	 */
> -	obj->gtt_space.start = gtt_offset;
> -	obj->gtt_space.size = size;
> +	vma->node.start = gtt_offset;
> +	vma->node.size = size;
>  	if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
> -		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
> -					  &obj->gtt_space);
> +		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
>  		if (ret) {
>  			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> +			i915_gem_vma_destroy(vma);
>  			goto err_out;
>  		}
>  	}
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b3ba428..1a32412 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -533,6 +533,17 @@  struct i915_hw_ppgtt {
 	int (*enable)(struct drm_device *dev);
 };
 
+/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+	struct drm_mm_node node;
+	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+
+	struct list_head vma_link; /* Link in the object's VMA list */
+};
+
 struct i915_ctx_hang_stats {
 	/* This context had batch pending when hang was declared */
 	unsigned batch_pending;
@@ -1229,8 +1240,9 @@  struct drm_i915_gem_object {
 
 	const struct drm_i915_gem_object_ops *ops;
 
-	/** Current space allocated to this object in the GTT, if any. */
-	struct drm_mm_node gtt_space;
+	/** List of VMAs backed by this object */
+	struct list_head vma_list;
+
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
 	struct list_head global_list;
@@ -1356,18 +1368,32 @@  struct drm_i915_gem_object {
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
-/* Offset of the first PTE pointing to this object */
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+/* This is a temporary define to help transition us to real VMAs. If you see
+ * this, you're either reviewing code, or bisecting it. */
+static inline struct i915_vma *
+__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
 {
-	return o->gtt_space.start;
+	if (list_empty(&obj->vma_list))
+		return NULL;
+	return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
 }
 
 /* Whether or not this object is currently mapped by the translation tables */
 static inline bool
 i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
 {
-	return drm_mm_node_allocated(&o->gtt_space);
+	struct i915_vma *vma = __i915_gem_obj_to_vma(o);
+	if (vma == NULL)
+		return false;
+	return drm_mm_node_allocated(&vma->node);
+}
+
+/* Offset of the first PTE pointing to this object */
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+	BUG_ON(list_empty(&o->vma_list));
+	return __i915_gem_obj_to_vma(o)->node.start;
 }
 
 /* The size used in the translation tables may be larger than the actual size of
@@ -1377,14 +1403,15 @@  i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
 static inline unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 {
-	return o->gtt_space.size;
+	BUG_ON(list_empty(&o->vma_list));
+	return __i915_gem_obj_to_vma(o)->node.size;
 }
 
 static inline void
 i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
 			    enum i915_cache_level color)
 {
-	o->gtt_space.color = color;
+	__i915_gem_obj_to_vma(o)->node.color = color;
 }
 
 /**
@@ -1691,6 +1718,9 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+				     struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				     uint32_t alignment,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 812275a..fe7ee32 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2580,6 +2580,7 @@  int
 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+	struct i915_vma *vma;
 	int ret;
 
 	if (!i915_gem_obj_ggtt_bound(obj))
@@ -2617,11 +2618,20 @@  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	i915_gem_object_unpin_pages(obj);
 
 	list_del(&obj->mm_list);
-	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 	/* Avoid an unnecessary call to unbind on rebind. */
 	obj->map_and_fenceable = true;
 
-	drm_mm_remove_node(&obj->gtt_space);
+	vma = __i915_gem_obj_to_vma(obj);
+	list_del(&vma->vma_link);
+	drm_mm_remove_node(&vma->node);
+	i915_gem_vma_destroy(vma);
+
+	/* Since the unbound list is global, only move to that list if
+	 * no more VMAs exist.
+	 * NB: Until we have real VMAs there will only ever be one */
+	WARN_ON(!list_empty(&obj->vma_list));
+	if (list_empty(&obj->vma_list))
+		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
 	return 0;
 }
@@ -3051,8 +3061,12 @@  i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 	bool mappable, fenceable;
 	size_t gtt_max = map_and_fenceable ?
 		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+	struct i915_vma *vma;
 	int ret;
 
+	if (WARN_ON(!list_empty(&obj->vma_list)))
+		return -EBUSY;
+
 	fence_size = i915_gem_get_gtt_size(dev,
 					   obj->base.size,
 					   obj->tiling_mode);
@@ -3091,9 +3105,15 @@  i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_pin_pages(obj);
 
+	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+	if (vma == NULL) {
+		i915_gem_object_unpin_pages(obj);
+		return -ENOMEM;
+	}
+
 search_free:
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
-						  &obj->gtt_space,
+						  &vma->node,
 						  size, alignment,
 						  obj->cache_level, 0, gtt_max);
 	if (ret) {
@@ -3104,25 +3124,21 @@  search_free:
 		if (ret == 0)
 			goto search_free;
 
-		i915_gem_object_unpin_pages(obj);
-		return ret;
+		goto err_out;
 	}
-	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
 					      obj->cache_level))) {
-		i915_gem_object_unpin_pages(obj);
-		drm_mm_remove_node(&obj->gtt_space);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_out;
 	}
 
 	ret = i915_gem_gtt_prepare_object(obj);
-	if (ret) {
-		i915_gem_object_unpin_pages(obj);
-		drm_mm_remove_node(&obj->gtt_space);
-		return ret;
-	}
+	if (ret)
+		goto err_out;
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_add_tail(&obj->mm_list, &vm->inactive_list);
+	list_add(&vma->vma_link, &obj->vma_list);
 
 	fenceable =
 		i915_gem_obj_ggtt_size(obj) == fence_size &&
@@ -3136,6 +3152,12 @@  search_free:
 	trace_i915_gem_object_bind(obj, map_and_fenceable);
 	i915_gem_verify_gtt(dev);
 	return 0;
+
+err_out:
+	i915_gem_vma_destroy(vma);
+	i915_gem_object_unpin_pages(obj);
+	drm_mm_remove_node(&vma->node);
+	return ret;
 }
 
 void
@@ -3281,6 +3303,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
 	int ret;
 
 	if (obj->cache_level == cache_level)
@@ -3291,7 +3314,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		return -EBUSY;
 	}
 
-	if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
+	if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
 		ret = i915_gem_object_unbind(obj);
 		if (ret)
 			return ret;
@@ -3836,6 +3859,7 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->global_list);
 	INIT_LIST_HEAD(&obj->ring_list);
 	INIT_LIST_HEAD(&obj->exec_list);
+	INIT_LIST_HEAD(&obj->vma_list);
 
 	obj->ops = ops;
 
@@ -3956,6 +3980,26 @@  void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	i915_gem_object_free(obj);
 }
 
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+				     struct i915_address_space *vm)
+{
+	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+	if (vma == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&vma->vma_link);
+	vma->vm = vm;
+	vma->obj = obj;
+
+	return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+	WARN_ON(vma->node.allocated);
+	kfree(vma);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43b8235..df61f33 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,11 +34,13 @@ 
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
+	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+
 	if (obj->pin_count)
 		return false;
 
 	list_add(&obj->exec_list, unwind);
-	return drm_mm_scan_add_block(&obj->gtt_space);
+	return drm_mm_scan_add_block(&vma->node);
 }
 
 int
@@ -49,6 +51,7 @@  i915_gem_evict_something(struct drm_device *dev, int min_size,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct list_head eviction_list, unwind_list;
+	struct i915_vma *vma;
 	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
@@ -106,8 +109,8 @@  none:
 		obj = list_first_entry(&unwind_list,
 				       struct drm_i915_gem_object,
 				       exec_list);
-
-		ret = drm_mm_scan_remove_block(&obj->gtt_space);
+		vma = __i915_gem_obj_to_vma(obj);
+		ret = drm_mm_scan_remove_block(&vma->node);
 		BUG_ON(ret);
 
 		list_del_init(&obj->exec_list);
@@ -127,7 +130,8 @@  found:
 		obj = list_first_entry(&unwind_list,
 				       struct drm_i915_gem_object,
 				       exec_list);
-		if (drm_mm_scan_remove_block(&obj->gtt_space)) {
+		vma = __i915_gem_obj_to_vma(obj);
+		if (drm_mm_scan_remove_block(&vma->node)) {
 			list_move(&obj->exec_list, &eviction_list);
 			drm_gem_object_reference(&obj->base);
 			continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 999ecfe..3b639a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -662,16 +662,17 @@  void i915_gem_setup_global_gtt(struct drm_device *dev,
 
 	/* Mark any preallocated objects as occupied */
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
 		int ret;
 		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
 			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
 		WARN_ON(i915_gem_obj_ggtt_bound(obj));
-		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
-					  &obj->gtt_space);
+		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
 		if (ret)
 			DRM_DEBUG_KMS("Reservation failed\n");
 		obj->has_global_gtt_mapping = 1;
+		list_add(&vma->vma_link, &obj->vma_list);
 	}
 
 	dev_priv->gtt.base.start = start;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a893834..f526136 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -354,6 +354,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
+	struct i915_vma *vma;
 	int ret;
 
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
@@ -393,18 +394,24 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
+	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+	if (!vma) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
 	/* To simplify the initialisation sequence between KMS and GTT,
 	 * we allow construction of the stolen object prior to
 	 * setting up the GTT space. The actual reservation will occur
 	 * later.
 	 */
-	obj->gtt_space.start = gtt_offset;
-	obj->gtt_space.size = size;
+	vma->node.start = gtt_offset;
+	vma->node.size = size;
 	if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
-		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
-					  &obj->gtt_space);
+		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
 		if (ret) {
 			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+			i915_gem_vma_destroy(vma);
 			goto err_out;
 		}
 	}