@@ -590,7 +590,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!bp->destroy)
bp->destroy = &amdgpu_bo_destroy;
- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ r = ttm_bo_init_validate(&adev->mman.bdev, &bo->tbo, bp->type,
&bo->placement, page_align, &ctx, NULL,
bp->resv, bp->destroy);
if (unlikely(r != 0))
@@ -226,7 +226,7 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
- ret = ttm_bo_init_reserved(bdev, &gbo->bo, size, ttm_bo_type_device,
+ ret = ttm_bo_init_validate(bdev, &gbo->bo, ttm_bo_type_device,
&gbo->placement, pg_align, &ctx, NULL, NULL,
ttm_buffer_object_destroy);
if (ret)
@@ -1229,9 +1229,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* Similarly, in delayed_destroy, we can't call ttm_bo_put()
* until successful initialization.
*/
- ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
- bo_type, &i915_sys_placement,
- page_size >> PAGE_SHIFT,
+ ret = ttm_bo_init_validate(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+ &i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
if (ret)
return i915_ttm_err_to_gem(ret);
@@ -308,7 +308,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
- ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, size, type,
+ ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
&nvbo->placement, align >> PAGE_SHIFT, &ctx,
sg, robj, nouveau_bo_del_ttm);
if (ret) {
@@ -141,7 +141,7 @@ int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
qxl_ttm_placement_from_domain(bo, domain);
bo->tbo.priority = priority;
- r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ r = ttm_bo_init_validate(&qdev->mman.bdev, &bo->tbo, type,
&bo->placement, 0, &ctx, NULL, NULL,
&qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
@@ -204,7 +204,7 @@ int radeon_bo_create(struct radeon_device *rdev,
radeon_ttm_placement_from_domain(bo, domain);
down_read(&rdev->pm.mclk_lock);
- r = ttm_bo_init_reserved(&rdev->mman.bdev, &bo->tbo, size, type,
+ r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type,
&bo->placement, page_align, &ctx, sg, resv,
&radeon_ttm_bo_destroy);
if (!r)
@@ -915,36 +915,61 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
-int ttm_bo_init_reserved(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
- struct sg_table *sg,
- struct dma_resv *resv,
+/**
+ * ttm_bo_init_validate
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @ctx: TTM operation context for memory allocation.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function, enables driver-specific objects
+ * derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
+ * and it is the caller's responsibility to call ttm_bo_unreserve.
+ *
+ * If a failure occurs, the function will call the @destroy function. Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
- bool locked;
int ret;
- bo->destroy = destroy;
kref_init(&bo->kref);
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
- bo->page_alignment = page_alignment;
+ bo->page_alignment = alignment;
+ bo->destroy = destroy;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
- if (resv) {
+ if (resv)
bo->base.resv = resv;
- dma_resv_assert_held(bo->base.resv);
- } else {
+ else
bo->base.resv = &bo->base._resv;
- }
atomic_inc(&ttm_glob.bo_count);
ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
@@ -957,33 +982,36 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg)
+ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
- bo->resource->num_pages);
+ PFN_UP(bo->base.size));
+ if (ret)
+ goto err_put;
+ }
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
- if (!resv) {
- locked = dma_resv_trylock(bo->base.resv);
- WARN_ON(!locked);
- }
+ if (!resv)
+ WARN_ON(!dma_resv_trylock(bo->base.resv));
+ else
+ dma_resv_assert_held(resv);
- if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, ctx);
+ ret = ttm_bo_validate(bo, placement, ctx);
+ if (unlikely(ret))
+ goto err_unlock;
- if (unlikely(ret)) {
- if (!resv)
- ttm_bo_unreserve(bo);
+ return 0;
- ttm_bo_put(bo);
- return ret;
- }
+err_unlock:
+ if (!resv)
+ dma_resv_unlock(bo->base.resv);
+err_put:
+ ttm_bo_put(bo);
return ret;
}
-EXPORT_SYMBOL(ttm_bo_init_reserved);
+EXPORT_SYMBOL(ttm_bo_init_validate);
/*
* buffer object vm functions.
@@ -429,9 +429,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
drm_gem_private_object_init(vdev, &bo->base, size);
- ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
- ttm_bo_type_kernel, placement, 0,
- &ctx, NULL, NULL, vmw_bo_default_destroy);
+ ret = ttm_bo_init_validate(&dev_priv->bdev, bo, ttm_bo_type_kernel,
+ placement, 0, &ctx, NULL, NULL,
+ vmw_bo_default_destroy);
if (unlikely(ret))
goto error_free;
@@ -515,10 +515,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,
size = ALIGN(size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
- ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
- ttm_bo_type_device,
- placement,
- 0, &ctx, NULL, NULL, bo_free);
+ ret = ttm_bo_init_validate(bdev, &vmw_bo->base, ttm_bo_type_device,
+ placement, 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
return ret;
}
@@ -317,47 +317,9 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
-/**
- * ttm_bo_init_reserved
- *
- * @bdev: Pointer to a ttm_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement for buffer object.
- * @page_alignment: Data alignment in pages.
- * @ctx: TTM operation context for memory allocation.
- * @sg: Scatter-gather table.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
- * and it is the caller's responsibility to call ttm_bo_unreserve.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-
-int ttm_bo_init_reserved(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
Rename ttm_bo_init_reserved to ttm_bo_init_validate since that better matches what the function is actually doing. Remove the unused size parameter, move the function's kerneldoc to the implementation and cleanup the whole error handling. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/drm_gem_vram_helper.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 5 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/qxl/qxl_object.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 92 ++++++++++++++-------- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 12 ++- include/drm/ttm/ttm_bo_api.h | 44 +---------- 9 files changed, 75 insertions(+), 88 deletions(-)