@@ -307,7 +307,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return NULL;
}
@@ -52,7 +52,7 @@ static int virtio_gpu_gem_create(struct drm_file *file,
*obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->base.base);
+ drm_gem_object_put(&obj->base.base);
*handle_p = handle;
return 0;
@@ -102,7 +102,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
if (gobj == NULL)
return -ENOENT;
*offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -237,7 +237,7 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
u32 i;
for (i = 0; i < objs->nents; i++)
- drm_gem_object_put_unlocked(objs->objs[i]);
+ drm_gem_object_put(objs->objs[i]);
virtio_gpu_array_free(objs);
}
@@ -278,7 +278,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
@@ -300,7 +300,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -417,7 +417,7 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
else if (ret > 0)
ret = 0;
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}