@@ -9,7 +9,6 @@
struct i915_sleeve {
struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
struct sg_table *pages;
struct i915_page_sizes page_sizes;
};
@@ -72,7 +71,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
- sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
@@ -85,7 +83,7 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
- i915_gem_object_put(sleeve->obj);
+ i915_vma_put(sleeve->vma);
kfree(sleeve);
}
@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->gem_context->i915;
- struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
int err = w->dma.error;
@@ -127,10 +127,10 @@ static void lut_close(struct i915_gem_context *ctx)
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
- if (atomic_dec_and_test(&vma->open_count) &&
- !i915_vma_is_ggtt(vma))
+ GEM_BUG_ON(!atomic_read(&vma->open_count));
+ if (atomic_dec_and_test(&vma->open_count))
i915_vma_close(vma);
- i915_gem_object_put(obj);
+ i915_vma_put(vma);
}
i915_gem_object_put(obj);
@@ -797,8 +797,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
- struct drm_i915_gem_object *obj;
unsigned int i, batch;
+ struct i915_vma *vma;
int err;
if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
@@ -817,8 +817,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
+ struct drm_i915_gem_object *obj;
struct i915_lut_handle *lut;
- struct i915_vma *vma;
vma = radix_tree_lookup(handles_vma, handle);
if (likely(vma))
@@ -831,21 +831,22 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
vma = i915_vma_instance(obj, eb->vm, NULL);
+ i915_gem_object_put(obj);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_obj;
+ goto err_vma;
}
lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
- goto err_obj;
+ goto err_put;
}
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
i915_lut_handle_free(lut);
- goto err_obj;
+ goto err_put;
}
/* transfer ref to lut */
@@ -874,8 +875,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
-err_obj:
- i915_gem_object_put(obj);
+err_put:
+ i915_vma_put(vma);
err_vma:
eb->vma[i] = NULL;
err_ctx:
@@ -1226,7 +1227,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
if (err)
- goto err_unmap;
+ goto err_batch;
rq = i915_request_create(eb->context);
if (IS_ERR(rq)) {
@@ -1267,6 +1268,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
i915_request_add(rq);
err_unpin:
i915_vma_unpin(batch);
+err_batch:
+ i915_vma_put(batch);
err_unmap:
i915_gem_object_unpin_map(obj);
return err;
@@ -122,15 +122,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
- if (atomic_dec_and_test(&vma->open_count) &&
- !i915_vma_is_ggtt(vma))
+ if (atomic_dec_and_test(&vma->open_count))
i915_vma_close(vma);
+ i915_vma_put(vma);
}
mutex_unlock(&ctx->mutex);
i915_gem_context_put(lut->ctx);
i915_lut_handle_free(lut);
- i915_gem_object_put(obj);
}
}
@@ -169,17 +168,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(i915);
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
trace_i915_gem_object_destroy(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
- }
GEM_BUG_ON(!list_empty(&obj->vma.list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
@@ -199,8 +189,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
@@ -61,7 +61,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
- return err;
+ goto out_put;
if (obj->cache_dirty & ~obj->cache_coherent) {
i915_gem_object_lock(obj);
@@ -99,6 +99,8 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
i915_request_add(rq);
out_unpin:
i915_vma_unpin(vma);
+out_put:
+ i915_vma_put(vma);
return err;
}
@@ -413,7 +413,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
@@ -424,7 +424,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
+ i915_vma_put(vma);
i915_gem_object_put(obj);
@@ -435,8 +435,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_device;
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_device:
@@ -498,11 +496,9 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err) {
- i915_vma_close(vma);
goto out_unpin;
}
-
err = igt_check_page_sizes(vma);
if (vma->page_sizes.gtt != page_size) {
@@ -514,7 +510,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_unpin(vma);
if (err) {
- i915_vma_close(vma);
goto out_unpin;
}
@@ -526,13 +521,11 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
for (offset = 4096; offset < page_size; offset += 4096) {
err = i915_vma_unbind(vma);
if (err) {
- i915_vma_close(vma);
goto out_unpin;
}
err = i915_vma_pin(vma, 0, 0, flags | offset);
if (err) {
- i915_vma_close(vma);
goto out_unpin;
}
@@ -547,7 +540,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_unpin(vma);
if (err) {
- i915_vma_close(vma);
goto out_unpin;
}
@@ -557,8 +549,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
break;
}
- i915_vma_close(vma);
-
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
@@ -583,8 +573,9 @@ static void close_object_list(struct list_head *objects,
struct i915_vma *vma;
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (!IS_ERR(vma))
- i915_vma_close(vma);
+ if (!IS_ERR(vma)) {
+ i915_vma_put(vma);
+ }
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
@@ -855,7 +846,6 @@ static int igt_mock_ppgtt_64K(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
@@ -868,7 +858,6 @@ static int igt_mock_ppgtt_64K(void *arg)
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
- i915_vma_close(vma);
out_object_unpin:
i915_gem_object_unpin_pages(obj);
out_object_put:
@@ -993,7 +982,6 @@ static int gpu_write(struct i915_vma *vma,
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
- i915_vma_close(batch);
i915_vma_put(batch);
return err;
@@ -1082,7 +1070,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
- i915_vma_destroy(vma);
+ i915_vma_put(vma);
return err;
}
@@ -1492,7 +1480,6 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
i915_vma_unpin(vma);
- i915_vma_close(vma);
i915_gem_object_put(obj);
}
@@ -1527,7 +1514,6 @@ static int igt_ppgtt_pin_update(void *arg)
out_unpin:
i915_vma_unpin(vma);
out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
@@ -1583,7 +1569,6 @@ static int igt_tmpfs_fallback(void *arg)
i915_vma_unpin(vma);
out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_restore:
@@ -1667,7 +1652,6 @@ static int igt_shrink_thp(void *arg)
out_unpin:
i915_vma_unpin(vma);
out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
@@ -210,14 +210,15 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(vma))
+ return vma;
err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
return vma;
@@ -314,7 +315,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
i915_request_add(rq);
i915_vma_unpin(batch);
- i915_vma_close(batch);
i915_vma_put(batch);
i915_vma_unpin(vma);
@@ -795,7 +795,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto skip_request;
i915_vma_unpin(batch);
- i915_vma_close(batch);
i915_vma_put(batch);
i915_vma_unpin(vma);
@@ -1359,7 +1358,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto skip_request;
i915_vma_unpin(vma);
- i915_vma_close(vma);
i915_vma_put(vma);
i915_request_add(rq);
@@ -1456,7 +1454,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto skip_request;
i915_vma_unpin(vma);
- i915_vma_close(vma);
i915_request_add(rq);
@@ -165,10 +165,10 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
*cpu = 0;
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap(p);
- if (err)
- return err;
i915_vma_destroy(vma);
+ if (err)
+ return err;
}
return 0;
@@ -337,12 +337,12 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- return err;
+ goto out_put;
rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
+ err = PTR_ERR(rq);
+ goto out_unpin;
}
i915_vma_lock(vma);
@@ -351,9 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
i915_request_add(rq);
+out_unpin:
i915_vma_unpin(vma);
- i915_gem_object_put(obj); /* leave it only alive via its active ref */
-
+out_put:
+ i915_vma_put(vma); /* leave it only alive via its active ref */
return err;
}
@@ -531,7 +531,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj);
- i915_gem_object_put(vma->obj);
+ i915_vma_put(vma);
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
@@ -590,7 +590,7 @@ static int init_status_page(struct intel_engine_cs *engine)
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto err;
+ goto err_vma;
}
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
@@ -602,10 +602,13 @@ static int init_status_page(struct intel_engine_cs *engine)
goto err_unpin;
}
+ i915_gem_object_put(obj);
return 0;
err_unpin:
i915_gem_object_unpin_map(obj);
+err_vma:
+ i915_vma_put(vma);
err:
i915_gem_object_put(obj);
return ret;
@@ -1981,10 +1981,9 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
return PTR_ERR(obj);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -1994,7 +1993,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
return 0;
err:
- i915_gem_object_put(obj);
+ i915_vma_put(vma);
return err;
}
@@ -3063,15 +3062,14 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto error_deref_obj;
- }
+ i915_gem_object_put(ctx_obj);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
timeline = get_timeline(ce->gem_context);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
- goto error_deref_obj;
+ goto err_vma;
}
ring = intel_engine_create_ring(engine,
@@ -3080,13 +3078,13 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
- goto error_deref_obj;
+ goto err_vma;
}
ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
- goto error_ring_free;
+ goto err_ring;
}
ce->ring = ring;
@@ -3094,10 +3092,10 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
return 0;
-error_ring_free:
+err_ring:
intel_ring_put(ring);
-error_deref_obj:
- i915_gem_object_put(ctx_obj);
+err_vma:
+ i915_vma_put(vma);
return ret;
}
@@ -1246,13 +1246,8 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
i915_gem_object_set_readonly(obj);
vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- goto err;
-
- return vma;
-
-err:
i915_gem_object_put(obj);
+
return vma;
}
@@ -1300,7 +1295,6 @@ void intel_ring_free(struct kref *ref)
{
struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
- i915_vma_close(ring->vma);
i915_vma_put(ring->vma);
i915_timeline_put(ring->timeline);
@@ -1404,10 +1398,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
return vma;
@@ -1310,20 +1310,19 @@ create_scratch(struct i915_address_space *vm, int count)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(vma))
+ return vma;
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err)
- goto err_obj;
+ goto err_put;
return vma;
-err_obj:
- i915_gem_object_put(obj);
+err_put:
+ i915_vma_put(vma);
return ERR_PTR(err);
}
@@ -1403,8 +1402,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
i915_gem_object_unpin_map(vma->obj);
err_vma:
- i915_vma_unpin(vma);
- i915_vma_put(vma);
+ i915_vma_destroy(vma);
return err;
}
@@ -110,7 +110,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
- goto err_obj;
+ goto err_vma;
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -144,6 +144,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_request_add(rq);
i915_vma_unpin(vma);
+ i915_vma_put(vma);
return result;
@@ -151,6 +152,8 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_request_add(rq);
err_pin:
i915_vma_unpin(vma);
+err_vma:
+ i915_vma_put(vma);
err_obj:
i915_gem_object_put(result);
return ERR_PTR(err);
@@ -359,19 +362,18 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
return ERR_CAST(obj);
vma = i915_vma_instance(obj, ctx->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(vma))
+ return vma;
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto err_obj;
+ goto err_put;
return vma;
-err_obj:
- i915_gem_object_put(obj);
+err_put:
+ i915_vma_put(vma);
return ERR_PTR(err);
}
@@ -595,7 +595,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (bb->vma && !IS_ERR(bb->vma)) {
i915_vma_unpin(bb->vma);
- i915_vma_close(bb->vma);
+ i915_vma_put(bb->vma);
}
i915_gem_object_put(bb->obj);
}
@@ -112,11 +112,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
+ i915_vma_get(vma);
+
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
ret = i915_vma_unbind(vma);
+ i915_vma_put(vma);
spin_lock(&obj->vma.lock);
}
list_splice(&still_in_list, &obj->vma.list);
@@ -1095,11 +1098,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
!!(flags & PIN_MAPPABLE),
i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
- if (ret)
+ if (ret) {
+ i915_vma_put(vma);
return ERR_PTR(ret);
+ }
}
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ i915_vma_put(vma);
if (ret)
return ERR_PTR(ret);
@@ -1476,20 +1482,19 @@ i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
- goto err_unref;
+ goto err_put;
i915->gt.scratch = vma;
return 0;
-err_unref:
- i915_gem_object_put(obj);
+err_put:
+ i915_vma_put(vma);
return ret;
}
@@ -1931,12 +1931,9 @@ static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
{
struct gen6_ppgtt_cleanup_work *work =
container_of(wrk, typeof(*work), base);
- /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
- struct drm_i915_private *i915 = work->vma->vm->i915;
- mutex_lock(&i915->drm.struct_mutex);
+ /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
i915_vma_destroy(work->vma);
- mutex_unlock(&i915->drm.struct_mutex);
kfree(work);
}
@@ -2060,6 +2057,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
+ kref_init(&vma->ref);
i915_active_init(&vma->active, NULL, NULL);
vma->vm = &ggtt->vm;
@@ -2248,16 +2246,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
}
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
-{
- struct i915_vma *vma, *vn;
-
- vm->closed = true;
- list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link)
- i915_vma_destroy(vma);
- GEM_BUG_ON(!list_empty(&vm->bound_list));
-}
-
void i915_vm_release(struct kref *kref)
{
struct i915_address_space *vm =
@@ -2266,8 +2254,6 @@ void i915_vm_release(struct kref *kref)
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
- ppgtt_destroy_vma(vm);
-
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -228,7 +228,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
- i915_vma_close(so.vma);
+ i915_vma_put(so.vma);
err_obj:
i915_gem_object_put(so.obj);
return err;
@@ -46,8 +46,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- i915_gem_object_put(obj);
+ i915_gem_object_put(obj);
return vma;
}
@@ -108,9 +108,11 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- vma->vm = vm;
+ kref_init(&vma->ref);
+
+ vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
- vma->obj = obj;
+ vma->obj = i915_gem_object_get(obj);
vma->resv = obj->resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
@@ -182,6 +184,7 @@ vma_create(struct drm_i915_gem_object *obj,
*/
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
+ i915_vma_get(pos);
spin_unlock(&obj->vma.lock);
i915_vma_free(vma);
return pos;
@@ -267,11 +270,14 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
spin_lock(&obj->vma.lock);
vma = vma_lookup(obj, vm, view);
+ if (likely(vma))
+ i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
+ if (likely(vma))
+ return vma;
/* vma_create() will resolve the race if another creates the vma */
- if (unlikely(!vma))
- vma = vma_create(obj, vm, view);
+ vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
return vma;
@@ -326,7 +332,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (ret)
return ret;
+ if (!vma_flags)
+ i915_vma_get(vma);
vma->flags |= bind_flags;
+
return 0;
}
@@ -400,22 +409,16 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
{
struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma);
if (!vma)
return;
- obj = vma->obj;
- GEM_BUG_ON(!obj);
-
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
if (flags & I915_VMA_RELEASE_MAP)
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(vma->obj);
- i915_gem_object_put(obj);
+ i915_vma_put(vma);
}
bool i915_vma_misplaced(const struct i915_vma *vma,
@@ -760,11 +763,11 @@ void i915_vma_close(struct i915_vma *vma)
* of wasted work for the steady state.
*/
spin_lock_irqsave(&i915->gt.closed_lock, flags);
- list_add(&vma->closed_link, &i915->gt.closed_vma);
+ list_add(&i915_vma_get(vma)->closed_link, &i915->gt.closed_vma);
spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
}
-static void __i915_vma_remove_closed(struct i915_vma *vma)
+void i915_vma_reopen(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
@@ -774,15 +777,25 @@ static void __i915_vma_remove_closed(struct i915_vma *vma)
spin_lock_irq(&i915->gt.closed_lock);
list_del_init(&vma->closed_link);
spin_unlock_irq(&i915->gt.closed_lock);
+
+ i915_vma_put(vma);
}
-void i915_vma_reopen(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
{
- __i915_vma_remove_closed(vma);
+ mutex_lock(&vma->vm->i915->drm.struct_mutex);
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ WARN_ON(i915_vma_unbind(vma));
+ mutex_unlock(&vma->vm->i915->drm.struct_mutex);
+
+ i915_vma_put(vma);
}
-static void __i915_vma_destroy(struct i915_vma *vma)
+void __i915_vma_release(struct kref *ref)
{
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
@@ -791,29 +804,18 @@ static void __i915_vma_destroy(struct i915_vma *vma)
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
+
+ i915_gem_object_put(obj);
}
i915_active_fini(&vma->active);
+ i915_vm_put(vma->vm);
i915_vma_free(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
- GEM_BUG_ON(i915_vma_is_pinned(vma));
-
- __i915_vma_remove_closed(vma);
-
- WARN_ON(i915_vma_unbind(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
-
- __i915_vma_destroy(vma);
-}
-
void i915_vma_parked(struct drm_i915_private *i915)
{
struct i915_vma *vma, *next;
@@ -823,7 +825,7 @@ void i915_vma_parked(struct drm_i915_private *i915)
list_del_init(&vma->closed_link);
spin_unlock_irq(&i915->gt.closed_lock);
- i915_vma_destroy(vma);
+ i915_vma_unbind(vma);
spin_lock_irq(&i915->gt.closed_lock);
}
@@ -975,6 +977,7 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
i915_vma_remove(vma);
+ i915_vma_put(vma);
return 0;
}
@@ -51,14 +51,19 @@ enum i915_cache_level;
*/
struct i915_vma {
struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
+
+ struct kref ref;
const struct i915_vma_ops *ops;
- struct i915_fence_reg *fence;
- struct reservation_object *resv; /** Alias of obj->resv */
+ struct i915_address_space *vm;
+ void *private; /* owned by creator */
+
struct sg_table *pages;
+ struct i915_fence_reg *fence;
void __iomem *iomap;
- void *private; /* owned by creator */
+
+ struct drm_i915_gem_object *obj;
+ struct reservation_object *resv; /** Alias of obj->resv */
+
u64 size;
u64 display_alignment;
struct i915_page_sizes page_sizes;
@@ -66,11 +71,6 @@ struct i915_vma {
u32 fence_size;
u32 fence_alignment;
- /**
- * Count of the number of times this vma has been opened by different
- * handles (but same file) for execbuf, i.e. the number of aliases
- * that exist in the ctx->handle_vmas LUT for this vma.
- */
atomic_t open_count;
unsigned long flags;
/**
@@ -206,6 +206,10 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_reopen(struct i915_vma *vma);
+void i915_vma_parked(struct drm_i915_private *i915);
+
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{
return !list_empty(&vma->closed_link);
@@ -227,15 +231,18 @@ static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
- i915_gem_object_get(vma->obj);
+ kref_get(&vma->ref);
return vma;
}
+void __i915_vma_release(struct kref *ref);
static inline void i915_vma_put(struct i915_vma *vma)
{
- i915_gem_object_put(vma->obj);
+ kref_put(&vma->ref, __i915_vma_release);
}
+void i915_vma_destroy(struct i915_vma *vma);
+
static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
{
return a - b;
@@ -292,11 +299,8 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
-int __must_check i915_vma_unbind(struct i915_vma *vma);
+int i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
-void i915_vma_close(struct i915_vma *vma);
-void i915_vma_reopen(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
@@ -438,8 +442,6 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
-void i915_vma_parked(struct drm_i915_private *i915);
-
#define for_each_until(cond) if (cond) break; else
/**
@@ -3080,12 +3080,14 @@ initial_plane_vma(struct drm_i915_private *i915,
break;
default:
MISSING_CASE(plane_config->tiling);
- goto err_obj;
+ i915_gem_object_put(obj);
+ return NULL;
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ i915_gem_object_put(obj);
if (IS_ERR(vma))
- goto err_obj;
+ return NULL;
mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0,
@@ -3093,16 +3095,16 @@ initial_plane_vma(struct drm_i915_private *i915,
base | PIN_OFFSET_FIXED);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
- goto err_obj;
+ goto err_vma;
- if (i915_gem_object_is_tiled(obj) &&
+ if (i915_gem_object_is_tiled(vma->obj) &&
!i915_vma_is_map_and_fenceable(vma))
- goto err_obj;
+ goto err_vma;
return vma;
-err_obj:
- i915_gem_object_put(obj);
+err_vma:
+ i915_vma_put(vma);
return NULL;
}
@@ -669,19 +669,16 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
return ERR_CAST(obj);
vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+ i915_gem_object_put(obj);
if (IS_ERR(vma))
- goto err;
+ return vma;
flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
ret = i915_vma_pin(vma, 0, 0, flags);
if (ret) {
+ i915_vma_put(vma);
vma = ERR_PTR(ret);
- goto err;
}
return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
}
@@ -186,7 +186,7 @@ struct intel_overlay {
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
/* register access */
- struct drm_i915_gem_object *reg_bo;
+ struct i915_vma *reg_vma;
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
@@ -1319,13 +1319,14 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ i915_gem_object_put(obj);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_put_bo;
+ goto err_unlock;
}
if (use_phys)
- overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ overlay->flip_addr = sg_dma_address(vma->obj->mm.pages->sgl);
else
overlay->flip_addr = i915_ggtt_offset(vma);
overlay->regs = i915_vma_pin_iomap(vma);
@@ -1333,15 +1334,15 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
if (IS_ERR(overlay->regs)) {
err = PTR_ERR(overlay->regs);
- goto err_put_bo;
+ goto err_vma;
}
- overlay->reg_bo = obj;
+ overlay->reg_vma = vma;
mutex_unlock(&i915->drm.struct_mutex);
return 0;
-err_put_bo:
- i915_gem_object_put(obj);
+err_vma:
+ i915_vma_destroy(vma);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -1401,7 +1402,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
*/
WARN_ON(overlay->active);
- i915_gem_object_put(overlay->reg_bo);
+ i915_vma_destroy(overlay->reg_vma);
i915_active_fini(&overlay->last_flip);
kfree(overlay);
@@ -332,9 +332,8 @@ static void close_object_list(struct list_head *objects,
vma = i915_vma_instance(obj, vm, NULL);
if (!IS_ERR(vma))
ignored = i915_vma_unbind(vma);
- /* Only ppgtt vma may be closed before the object is freed */
- if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
+ if (!IS_ERR(vma))
+ i915_vma_put(vma);
list_del(&obj->st_link);
i915_gem_object_put(obj);
@@ -624,8 +623,6 @@ static int walk_hole(struct drm_i915_private *i915,
}
err_close:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_put:
i915_gem_object_put(obj);
if (err)
@@ -706,8 +703,6 @@ static int pot_hole(struct drm_i915_private *i915,
}
err:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_obj:
i915_gem_object_put(obj);
return err;
@@ -809,8 +804,6 @@ static int drunk_hole(struct drm_i915_private *i915,
}
err:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_obj:
i915_gem_object_put(obj);
kfree(order);
@@ -317,6 +317,7 @@ static int igt_vma_pin1(void *arg)
return PTR_ERR(obj);
vma = checked_vma_instance(obj, &ggtt->vm, NULL);
+ i915_gem_object_put(obj);
if (IS_ERR(vma))
goto out;
@@ -345,7 +346,7 @@ static int igt_vma_pin1(void *arg)
err = 0;
out:
- i915_gem_object_put(obj);
+ i915_vma_destroy(vma);
return err;
}