@@ -1867,6 +1867,9 @@ void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -3111,9 +3111,6 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (WARN_ON(!list_empty(&obj->vma_list)))
- return -EBUSY;
-
BUG_ON(!i915_is_ggtt(vm));
fence_size = i915_gem_get_gtt_size(dev,
@@ -3154,15 +3151,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- /* For now we only ever use 1 vma per object */
- WARN_ON(!list_empty(&obj->vma_list));
-
- vma = i915_gem_vma_create(obj, vm);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
i915_gem_object_unpin_pages(obj);
return PTR_ERR(vma);
}
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_is_singular(&obj->vma_list));
+
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
@@ -4054,7 +4051,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_ATOMIC);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -4829,3 +4826,16 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
return NULL;
}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
@@ -85,12 +85,14 @@ static int
eb_lookup_objects(struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
+ struct i915_address_space *vm,
struct drm_file *file)
{
int i;
spin_lock(&file->table_lock);
for (i = 0; i < args->buffer_count; i++) {
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
@@ -111,6 +113,10 @@ eb_lookup_objects(struct eb_objects *eb,
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->exec_list, &eb->objects);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
obj->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = obj;
@@ -666,7 +672,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_objects(eb, exec, args, vm, file);
if (ret)
goto err;
@@ -1001,7 +1007,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_objects(eb, exec, args, vm, file);
if (ret)
goto err;
In order to transition more of our code over to using a VMA instead of an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up until now, we've only had a VMA when actually binding an object. The previous patch helped handle the distinction on bound vs. unbound. This patch will help us catch leaks, and other issues before we actually shuffle a bunch of stuff around. The subsequent patch to fix up the rest of execbuf should be mostly just moving code around, and this is the major functional change. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++-------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 10 ++++++++-- 3 files changed, 29 insertions(+), 10 deletions(-)