diff mbox

[09/12] drm/i915: create vmas at execbuf

Message ID 1374458899-8635-10-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky July 22, 2013, 2:08 a.m. UTC
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.

The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.

The subsequent patch to fix up the rest of execbuf should be mostly just
moving code around, and this is the major functional change.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h            |  3 +++
 drivers/gpu/drm/i915/i915_gem.c            | 26 ++++++++++++++++++--------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 10 ++++++++--
 3 files changed, 29 insertions(+), 10 deletions(-)

Comments

Chris Wilson July 22, 2013, 1:32 p.m. UTC | #1
On Sun, Jul 21, 2013 at 07:08:16PM -0700, Ben Widawsky wrote:
> @@ -4054,7 +4051,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  				     struct i915_address_space *vm)
>  {
> -	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
> +	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_ATOMIC);

Hmm, possibly best to erradicate the allocations from underneath the
spinlock as the number may be rather large.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8d6aa34..59a8c03 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1867,6 +1867,9 @@  void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
 			    enum i915_cache_level color);
 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm);
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6dc653..0fa6667 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3111,9 +3111,6 @@  i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	if (WARN_ON(!list_empty(&obj->vma_list)))
-		return -EBUSY;
-
 	BUG_ON(!i915_is_ggtt(vm));
 
 	fence_size = i915_gem_get_gtt_size(dev,
@@ -3154,15 +3151,15 @@  i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_pin_pages(obj);
 
-	/* For now we only ever use 1 vma per object */
-	WARN_ON(!list_empty(&obj->vma_list));
-
-	vma = i915_gem_vma_create(obj, vm);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
 	if (IS_ERR(vma)) {
 		i915_gem_object_unpin_pages(obj);
 		return PTR_ERR(vma);
 	}
 
+	/* For now we only ever use 1 vma per object */
+	WARN_ON(!list_is_singular(&obj->vma_list));
+
 search_free:
 	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
 						  size, alignment,
@@ -4054,7 +4051,7 @@  void i915_gem_free_object(struct drm_gem_object *gem_obj)
 struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm)
 {
-	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_ATOMIC);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -4829,3 +4826,16 @@  struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 
 	return NULL;
 }
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+
+	vma = i915_gem_obj_to_vma(obj, vm);
+	if (!vma)
+		vma = i915_gem_vma_create(obj, vm);
+
+	return vma;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6359ef2..1f82a04 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -85,12 +85,14 @@  static int
 eb_lookup_objects(struct eb_objects *eb,
 		  struct drm_i915_gem_exec_object2 *exec,
 		  const struct drm_i915_gem_execbuffer2 *args,
+		  struct i915_address_space *vm,
 		  struct drm_file *file)
 {
 	int i;
 
 	spin_lock(&file->table_lock);
 	for (i = 0; i < args->buffer_count; i++) {
+		struct i915_vma *vma;
 		struct drm_i915_gem_object *obj;
 
 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
@@ -111,6 +113,10 @@  eb_lookup_objects(struct eb_objects *eb,
 		drm_gem_object_reference(&obj->base);
 		list_add_tail(&obj->exec_list, &eb->objects);
 
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+
 		obj->exec_entry = &exec[i];
 		if (eb->and < 0) {
 			eb->lut[i] = obj;
@@ -666,7 +672,7 @@  i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 
 	/* reacquire the objects */
 	eb_reset(eb);
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_objects(eb, exec, args, vm, file);
 	if (ret)
 		goto err;
 
@@ -1001,7 +1007,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	/* Look up object handles */
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_objects(eb, exec, args, vm, file);
 	if (ret)
 		goto err;