@@ -534,7 +534,6 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base = *p_base;
- struct ttm_buffer_object *bo;
*p_base = NULL;
@@ -543,8 +542,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
- bo = &vmw_user_bo->vbo.base;
- ttm_bo_unref(&bo);
+ ttm_bo_put(&vmw_user_bo->vbo.base);
}
@@ -597,7 +595,6 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *user_bo;
- struct ttm_buffer_object *tmp;
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
@@ -615,7 +612,6 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
return ret;
ttm_bo_get(&user_bo->vbo.base);
- tmp = &user_bo->vbo.base;
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
@@ -624,7 +620,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
&vmw_user_bo_release,
&vmw_user_bo_ref_obj_release);
if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
+ ttm_bo_put(&user_bo->vbo.base);
goto out_no_base_object;
}
@@ -765,7 +765,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
if (info->done)
return true;
-
+
memset(info->node, 0, sizeof(*info->node));
spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
@@ -1276,8 +1276,10 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
return 0;
out_no_map:
- if (man->using_mob)
- ttm_bo_unref(&man->cmd_space);
+ if (man->using_mob) {
+ ttm_bo_put(man->cmd_space);
+ man->cmd_space = NULL;
+ }
return ret;
}
@@ -1380,7 +1382,8 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) {
(void) ttm_bo_kunmap(&man->map_obj);
- ttm_bo_unref(&man->cmd_space);
+ ttm_bo_put(man->cmd_space);
+ man->cmd_space = NULL;
} else {
dma_free_coherent(&man->dev_priv->dev->pdev->dev,
man->size, man->map, man->handle);
@@ -1337,9 +1337,7 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
*buf = NULL;
if (tmp_buf != NULL) {
- struct ttm_buffer_object *bo = &tmp_buf->base;
-
- ttm_bo_unref(&bo);
+ ttm_bo_put(&tmp_buf->base);
}
}
@@ -300,7 +300,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
&batch->otables[i]);
}
- ttm_bo_unref(&batch->otable_bo);
+ ttm_bo_put(batch->otable_bo);
+ batch->otable_bo = NULL;
out_no_bo:
return ret;
}
@@ -365,7 +366,8 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&batch->otable_bo);
+ ttm_bo_put(batch->otable_bo);
+ batch->otable_bo = NULL;
}
/*
@@ -463,7 +465,8 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
out_unreserve:
ttm_bo_unreserve(mob->pt_bo);
- ttm_bo_unref(&mob->pt_bo);
+ ttm_bo_put(mob->pt_bo);
+ mob->pt_bo = NULL;
return ret;
}
@@ -580,8 +583,10 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/
void vmw_mob_destroy(struct vmw_mob *mob)
{
- if (mob->pt_bo)
- ttm_bo_unref(&mob->pt_bo);
+ if (mob->pt_bo) {
+ ttm_bo_put(mob->pt_bo);
+ mob->pt_bo = NULL;
+ }
kfree(mob);
}
@@ -698,8 +703,10 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
- if (pt_set_up)
- ttm_bo_unref(&mob->pt_bo);
+ if (pt_set_up) {
+ ttm_bo_put(mob->pt_bo);
+ mob->pt_bo = NULL;
+ }
return -ENOMEM;
}
@@ -485,7 +485,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_unref(&val_buf->bo);
+ ttm_bo_put(val_buf->bo);
+ val_buf->bo = NULL;
if (backup_dirty)
vmw_bo_unreference(&res->backup);
@@ -545,7 +546,8 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_unref(&val_buf->bo);
+ ttm_bo_put(val_buf->bo);
+ val_buf->bo = NULL;
}
/**
@@ -628,8 +628,10 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val;
- list_for_each_entry(entry, &ctx->bo_list, base.head)
- ttm_bo_unref(&entry->base.bo);
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ ttm_bo_put(entry->base.bo);
+ entry->base.bo = NULL;
+ }
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head)
The function ttm_bo_put releases a reference to a TTM buffer object. The function's name is more aligned to the Linux kernel convention of naming ref-counting function _get and _put. A call to ttm_bo_unref takes the address of the TTM BO object's pointer and clears the pointer's value to NULL. This is not necessary in most cases and sometimes even worked around by the calling code. A call to ttm_bo_put only releases the reference without clearing the pointer. In places where is might be necessary, the current behaviour of cleaning the pointer is kept. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> --- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 8 ++------ drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | 11 +++++++---- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +--- drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 21 ++++++++++++++------- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 ++++-- drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | 6 ++++-- 6 files changed, 32 insertions(+), 24 deletions(-)