diff mbox

[05/11] drm/i915: Create VMAs

Message ID 1373350122-5118-6-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky July 9, 2013, 6:08 a.m. UTC
Formerly: "drm/i915: Create VMAs (part 1)"

In a previous patch, the notion of a VM was introduced. A VMA describes
an area of part of the VM address space. A VMA is similar to the concept
in the linux mm. However, instead of representing regular memory, a VMA
is backed by a GEM BO. There may be many VMAs for a given object, one
for each VM the object is to be used in. This may occur through flink,
dma-buf, or a number of other transient states.

Currently the code depends on only 1 VMA per object, for the global GTT
(and aliasing PPGTT). The following patches will address this and make
the rest of the infrastructure more suited

v2: s/i915_obj/i915_gem_obj (Chris)

v3: Only move an object to the now global unbound list if there are no
more VMAs for the object which are bound into a VM (ie. the list is
empty).

v4: killed obj->gtt_space
some reworks due to rebase

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h        | 48 ++++++++++++++++++++++------
 drivers/gpu/drm/i915/i915_gem.c        | 57 +++++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_evict.c  | 12 ++++---
 drivers/gpu/drm/i915/i915_gem_gtt.c    |  5 +--
 drivers/gpu/drm/i915/i915_gem_stolen.c | 14 ++++++---
 5 files changed, 110 insertions(+), 26 deletions(-)

Comments

Imre Deak July 11, 2013, 11:20 a.m. UTC | #1
On Mon, 2013-07-08 at 23:08 -0700, Ben Widawsky wrote:
> Formerly: "drm/i915: Create VMAs (part 1)"
> 
> In a previous patch, the notion of a VM was introduced. A VMA describes
> an area of part of the VM address space. A VMA is similar to the concept
> in the linux mm. However, instead of representing regular memory, a VMA
> is backed by a GEM BO. There may be many VMAs for a given object, one
> for each VM the object is to be used in. This may occur through flink,
> dma-buf, or a number of other transient states.
> 
> Currently the code depends on only 1 VMA per object, for the global GTT
> (and aliasing PPGTT). The following patches will address this and make
> the rest of the infrastructure more suited
> 
> v2: s/i915_obj/i915_gem_obj (Chris)
> 
> v3: Only move an object to the now global unbound list if there are no
> more VMAs for the object which are bound into a VM (ie. the list is
> empty).
> 
> v4: killed obj->gtt_space
> some reworks due to rebase
> 
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> ---
>  drivers/gpu/drm/i915/i915_drv.h        | 48 ++++++++++++++++++++++------
>  drivers/gpu/drm/i915/i915_gem.c        | 57 +++++++++++++++++++++++++++++-----
>  drivers/gpu/drm/i915/i915_gem_evict.c  | 12 ++++---
>  drivers/gpu/drm/i915/i915_gem_gtt.c    |  5 +--
>  drivers/gpu/drm/i915/i915_gem_stolen.c | 14 ++++++---
>  5 files changed, 110 insertions(+), 26 deletions(-)
> 
> [...]
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 525aa8f..058ad44 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2578,6 +2578,7 @@ int
>  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  {
>  	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> +	struct i915_vma *vma;
>  	int ret;
>  
>  	if (!i915_gem_obj_ggtt_bound(obj))
> @@ -2615,11 +2616,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  	i915_gem_object_unpin_pages(obj);
>  
>  	list_del(&obj->mm_list);
> -	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>  	/* Avoid an unnecessary call to unbind on rebind. */
>  	obj->map_and_fenceable = true;
>  
> -	drm_mm_remove_node(&obj->gtt_space);
> +	vma = __i915_gem_obj_to_vma(obj);
> +	list_del(&vma->vma_link);
> +	drm_mm_remove_node(&vma->node);
> +	i915_gem_vma_destroy(vma);
> +
> +	/* Since the unbound list is global, only move to that list if
> +	 * no more VMAs exist.
> +	 * NB: Until we have real VMAs there will only ever be one */
> +	WARN_ON(!list_empty(&obj->vma_list));
> +	if (list_empty(&obj->vma_list))
> +		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>  
>  	return 0;
>  }
> @@ -3070,8 +3080,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>  	bool mappable, fenceable;
>  	size_t gtt_max = map_and_fenceable ?
>  		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
> +	struct i915_vma *vma;
>  	int ret;
>  
> +	if (WARN_ON(!list_empty(&obj->vma_list)))
> +		return -EBUSY;
> +
>  	fence_size = i915_gem_get_gtt_size(dev,
>  					   obj->base.size,
>  					   obj->tiling_mode);
> @@ -3110,9 +3124,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>  
>  	i915_gem_object_pin_pages(obj);
>  
> +	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
> +	if (vma == NULL) {
> +		i915_gem_object_unpin_pages(obj);
> +		return -ENOMEM;
> +	}
> +
>  search_free:
>  	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
> -						  &obj->gtt_space,
> +						  &vma->node,
>  						  size, alignment,
>  						  obj->cache_level, 0, gtt_max);
>  	if (ret) {
> @@ -3126,22 +3146,23 @@ search_free:
>  		i915_gem_object_unpin_pages(obj);
>  		return ret;
>  	}
> -	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
> +	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
>  					      obj->cache_level))) {
>  		i915_gem_object_unpin_pages(obj);
> -		drm_mm_remove_node(&obj->gtt_space);
> +		drm_mm_remove_node(&vma->node);
>  		return -EINVAL;
>  	}
>  
>  	ret = i915_gem_gtt_prepare_object(obj);
>  	if (ret) {
>  		i915_gem_object_unpin_pages(obj);
> -		drm_mm_remove_node(&obj->gtt_space);
> +		drm_mm_remove_node(&vma->node);
>  		return ret;
>  	}

Freeing vma on the error path is missing.

With this and the issue in 1/5 addressed things look good to me, so on
1-5:

Reviewed-by: Imre Deak <imre.deak@intel.com>

--Imre
Ben Widawsky July 12, 2013, 2:23 a.m. UTC | #2
On Thu, Jul 11, 2013 at 02:20:50PM +0300, Imre Deak wrote:
> On Mon, 2013-07-08 at 23:08 -0700, Ben Widawsky wrote:
> > Formerly: "drm/i915: Create VMAs (part 1)"
> > 
> > In a previous patch, the notion of a VM was introduced. A VMA describes
> > an area of part of the VM address space. A VMA is similar to the concept
> > in the linux mm. However, instead of representing regular memory, a VMA
> > is backed by a GEM BO. There may be many VMAs for a given object, one
> > for each VM the object is to be used in. This may occur through flink,
> > dma-buf, or a number of other transient states.
> > 
> > Currently the code depends on only 1 VMA per object, for the global GTT
> > (and aliasing PPGTT). The following patches will address this and make
> > the rest of the infrastructure more suited
> > 
> > v2: s/i915_obj/i915_gem_obj (Chris)
> > 
> > v3: Only move an object to the now global unbound list if there are no
> > more VMAs for the object which are bound into a VM (ie. the list is
> > empty).
> > 
> > v4: killed obj->gtt_space
> > some reworks due to rebase
> > 
> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> > ---
> >  drivers/gpu/drm/i915/i915_drv.h        | 48 ++++++++++++++++++++++------
> >  drivers/gpu/drm/i915/i915_gem.c        | 57 +++++++++++++++++++++++++++++-----
> >  drivers/gpu/drm/i915/i915_gem_evict.c  | 12 ++++---
> >  drivers/gpu/drm/i915/i915_gem_gtt.c    |  5 +--
> >  drivers/gpu/drm/i915/i915_gem_stolen.c | 14 ++++++---
> >  5 files changed, 110 insertions(+), 26 deletions(-)
> > 
> > [...]
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 525aa8f..058ad44 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -2578,6 +2578,7 @@ int
> >  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> >  {
> >  	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> > +	struct i915_vma *vma;
> >  	int ret;
> >  
> >  	if (!i915_gem_obj_ggtt_bound(obj))
> > @@ -2615,11 +2616,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> >  	i915_gem_object_unpin_pages(obj);
> >  
> >  	list_del(&obj->mm_list);
> > -	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
> >  	/* Avoid an unnecessary call to unbind on rebind. */
> >  	obj->map_and_fenceable = true;
> >  
> > -	drm_mm_remove_node(&obj->gtt_space);
> > +	vma = __i915_gem_obj_to_vma(obj);
> > +	list_del(&vma->vma_link);
> > +	drm_mm_remove_node(&vma->node);
> > +	i915_gem_vma_destroy(vma);
> > +
> > +	/* Since the unbound list is global, only move to that list if
> > +	 * no more VMAs exist.
> > +	 * NB: Until we have real VMAs there will only ever be one */
> > +	WARN_ON(!list_empty(&obj->vma_list));
> > +	if (list_empty(&obj->vma_list))
> > +		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
> >  
> >  	return 0;
> >  }
> > @@ -3070,8 +3080,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
> >  	bool mappable, fenceable;
> >  	size_t gtt_max = map_and_fenceable ?
> >  		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
> > +	struct i915_vma *vma;
> >  	int ret;
> >  
> > +	if (WARN_ON(!list_empty(&obj->vma_list)))
> > +		return -EBUSY;
> > +
> >  	fence_size = i915_gem_get_gtt_size(dev,
> >  					   obj->base.size,
> >  					   obj->tiling_mode);
> > @@ -3110,9 +3124,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
> >  
> >  	i915_gem_object_pin_pages(obj);
> >  
> > +	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
> > +	if (vma == NULL) {
> > +		i915_gem_object_unpin_pages(obj);
> > +		return -ENOMEM;
> > +	}
> > +
> >  search_free:
> >  	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
> > -						  &obj->gtt_space,
> > +						  &vma->node,
> >  						  size, alignment,
> >  						  obj->cache_level, 0, gtt_max);
> >  	if (ret) {
> > @@ -3126,22 +3146,23 @@ search_free:
> >  		i915_gem_object_unpin_pages(obj);
> >  		return ret;
> >  	}
> > -	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
> > +	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
> >  					      obj->cache_level))) {
> >  		i915_gem_object_unpin_pages(obj);
> > -		drm_mm_remove_node(&obj->gtt_space);
> > +		drm_mm_remove_node(&vma->node);
> >  		return -EINVAL;
> >  	}
> >  
> >  	ret = i915_gem_gtt_prepare_object(obj);
> >  	if (ret) {
> >  		i915_gem_object_unpin_pages(obj);
> > -		drm_mm_remove_node(&obj->gtt_space);
> > +		drm_mm_remove_node(&vma->node);
> >  		return ret;
> >  	}
> 
> Freeing vma on the error path is missing.
> 
> With this and the issue in 1/5 addressed things look good to me, so on
> 1-5:
> 
> Reviewed-by: Imre Deak <imre.deak@intel.com>
> 
> --Imre

Nice catch. Rebase fail. I feel no shame in making an excuse that it was
correct in the original series.
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3759c09..38cccc8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -533,6 +533,17 @@  struct i915_hw_ppgtt {
 	int (*enable)(struct drm_device *dev);
 };
 
+/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+	struct drm_mm_node node;
+	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+
+	struct list_head vma_link; /* Link in the object's VMA list */
+};
+
 struct i915_ctx_hang_stats {
 	/* This context had batch pending when hang was declared */
 	unsigned batch_pending;
@@ -1224,8 +1235,9 @@  struct drm_i915_gem_object {
 
 	const struct drm_i915_gem_object_ops *ops;
 
-	/** Current space allocated to this object in the GTT, if any. */
-	struct drm_mm_node gtt_space;
+	/** List of VMAs backed by this object */
+	struct list_head vma_list;
+
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
 	struct list_head global_list;
@@ -1351,18 +1363,32 @@  struct drm_i915_gem_object {
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
-/* Offset of the first PTE pointing to this object */
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+/* This is a temporary define to help transition us to real VMAs. If you see
+ * this, you're either reviewing code, or bisecting it. */
+static inline struct i915_vma *
+__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
 {
-	return o->gtt_space.start;
+	if (list_empty(&obj->vma_list))
+		return NULL;
+	return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
 }
 
 /* Whether or not this object is currently mapped by the translation tables */
 static inline bool
 i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
 {
-	return drm_mm_node_allocated(&o->gtt_space);
+	struct i915_vma *vma = __i915_gem_obj_to_vma(o);
+	if (vma == NULL)
+		return false;
+	return drm_mm_node_allocated(&vma->node);
+}
+
+/* Offset of the first PTE pointing to this object */
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+	BUG_ON(list_empty(&o->vma_list));
+	return __i915_gem_obj_to_vma(o)->node.start;
 }
 
 /* The size used in the translation tables may be larger than the actual size of
@@ -1372,14 +1398,15 @@  i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
 static inline unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 {
-	return o->gtt_space.size;
+	BUG_ON(list_empty(&o->vma_list));
+	return __i915_gem_obj_to_vma(o)->node.size;
 }
 
 static inline void
 i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
 			    enum i915_cache_level color)
 {
-	o->gtt_space.color = color;
+	__i915_gem_obj_to_vma(o)->node.color = color;
 }
 
 /**
@@ -1694,6 +1721,9 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+				     struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				     uint32_t alignment,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 525aa8f..058ad44 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2578,6 +2578,7 @@  int
 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+	struct i915_vma *vma;
 	int ret;
 
 	if (!i915_gem_obj_ggtt_bound(obj))
@@ -2615,11 +2616,20 @@  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	i915_gem_object_unpin_pages(obj);
 
 	list_del(&obj->mm_list);
-	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 	/* Avoid an unnecessary call to unbind on rebind. */
 	obj->map_and_fenceable = true;
 
-	drm_mm_remove_node(&obj->gtt_space);
+	vma = __i915_gem_obj_to_vma(obj);
+	list_del(&vma->vma_link);
+	drm_mm_remove_node(&vma->node);
+	i915_gem_vma_destroy(vma);
+
+	/* Since the unbound list is global, only move to that list if
+	 * no more VMAs exist.
+	 * NB: Until we have real VMAs there will only ever be one */
+	WARN_ON(!list_empty(&obj->vma_list));
+	if (list_empty(&obj->vma_list))
+		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
 	return 0;
 }
@@ -3070,8 +3080,12 @@  i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 	bool mappable, fenceable;
 	size_t gtt_max = map_and_fenceable ?
 		dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+	struct i915_vma *vma;
 	int ret;
 
+	if (WARN_ON(!list_empty(&obj->vma_list)))
+		return -EBUSY;
+
 	fence_size = i915_gem_get_gtt_size(dev,
 					   obj->base.size,
 					   obj->tiling_mode);
@@ -3110,9 +3124,15 @@  i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_pin_pages(obj);
 
+	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+	if (vma == NULL) {
+		i915_gem_object_unpin_pages(obj);
+		return -ENOMEM;
+	}
+
 search_free:
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
-						  &obj->gtt_space,
+						  &vma->node,
 						  size, alignment,
 						  obj->cache_level, 0, gtt_max);
 	if (ret) {
@@ -3126,22 +3146,23 @@  search_free:
 		i915_gem_object_unpin_pages(obj);
 		return ret;
 	}
-	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
 					      obj->cache_level))) {
 		i915_gem_object_unpin_pages(obj);
-		drm_mm_remove_node(&obj->gtt_space);
+		drm_mm_remove_node(&vma->node);
 		return -EINVAL;
 	}
 
 	ret = i915_gem_gtt_prepare_object(obj);
 	if (ret) {
 		i915_gem_object_unpin_pages(obj);
-		drm_mm_remove_node(&obj->gtt_space);
+		drm_mm_remove_node(&vma->node);
 		return ret;
 	}
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_add_tail(&obj->mm_list, &vm->inactive_list);
+	list_add(&vma->vma_link, &obj->vma_list);
 
 	fenceable =
 		i915_gem_obj_ggtt_size(obj) == fence_size &&
@@ -3300,6 +3321,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
 	int ret;
 
 	if (obj->cache_level == cache_level)
@@ -3310,7 +3332,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		return -EBUSY;
 	}
 
-	if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
+	if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
 		ret = i915_gem_object_unbind(obj);
 		if (ret)
 			return ret;
@@ -3855,6 +3877,7 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->global_list);
 	INIT_LIST_HEAD(&obj->ring_list);
 	INIT_LIST_HEAD(&obj->exec_list);
+	INIT_LIST_HEAD(&obj->vma_list);
 
 	obj->ops = ops;
 
@@ -3975,6 +3998,26 @@  void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	i915_gem_object_free(obj);
 }
 
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+				     struct i915_address_space *vm)
+{
+	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+	if (vma == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&vma->vma_link);
+	vma->vm = vm;
+	vma->obj = obj;
+
+	return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+	WARN_ON(vma->node.allocated);
+	kfree(vma);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43b8235..df61f33 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,11 +34,13 @@ 
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
+	struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+
 	if (obj->pin_count)
 		return false;
 
 	list_add(&obj->exec_list, unwind);
-	return drm_mm_scan_add_block(&obj->gtt_space);
+	return drm_mm_scan_add_block(&vma->node);
 }
 
 int
@@ -49,6 +51,7 @@  i915_gem_evict_something(struct drm_device *dev, int min_size,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct list_head eviction_list, unwind_list;
+	struct i915_vma *vma;
 	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
@@ -106,8 +109,8 @@  none:
 		obj = list_first_entry(&unwind_list,
 				       struct drm_i915_gem_object,
 				       exec_list);
-
-		ret = drm_mm_scan_remove_block(&obj->gtt_space);
+		vma = __i915_gem_obj_to_vma(obj);
+		ret = drm_mm_scan_remove_block(&vma->node);
 		BUG_ON(ret);
 
 		list_del_init(&obj->exec_list);
@@ -127,7 +130,8 @@  found:
 		obj = list_first_entry(&unwind_list,
 				       struct drm_i915_gem_object,
 				       exec_list);
-		if (drm_mm_scan_remove_block(&obj->gtt_space)) {
+		vma = __i915_gem_obj_to_vma(obj);
+		if (drm_mm_scan_remove_block(&vma->node)) {
 			list_move(&obj->exec_list, &eviction_list);
 			drm_gem_object_reference(&obj->base);
 			continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b9400e9..298fc42 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -639,16 +639,17 @@  void i915_gem_setup_global_gtt(struct drm_device *dev,
 
 	/* Mark any preallocated objects as occupied */
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
 		int ret;
 		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
 			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
 		WARN_ON(i915_gem_obj_ggtt_bound(obj));
-		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
-					  &obj->gtt_space);
+		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
 		if (ret)
 			DRM_DEBUG_KMS("Reservation failed\n");
 		obj->has_global_gtt_mapping = 1;
+		list_add(&vma->vma_link, &obj->vma_list);
 	}
 
 	dev_priv->gtt.base.start = start;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a4c3136..245eb1d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -351,6 +351,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
+	struct i915_vma *vma;
 	int ret;
 
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
@@ -390,16 +391,21 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
+	vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+	if (!vma) {
+		drm_gem_object_unreference(&obj->base);
+		return NULL;
+	}
+
 	/* To simplify the initialisation sequence between KMS and GTT,
 	 * we allow construction of the stolen object prior to
 	 * setting up the GTT space. The actual reservation will occur
 	 * later.
 	 */
-	obj->gtt_space.start = gtt_offset;
-	obj->gtt_space.size = size;
+	vma->node.start = gtt_offset;
+	vma->node.size = size;
 	if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
-		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
-					  &obj->gtt_space);
+		ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
 		if (ret) {
 			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
 			goto unref_out;