@@ -115,7 +115,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = !entry->robj->prime_shared_count;
+ entry->tv.shared = true;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = entry->robj;
@@ -716,7 +716,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
+ if (robj->gem_base.import_attach &&
+ args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
break;
@@ -883,7 +883,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count) {
+ if (bo->gem_base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -88,7 +88,6 @@ struct amdgpu_bo {
u64 metadata_flags;
void *metadata;
u32 metadata_size;
- unsigned prime_shared_count;
/* list of all virtual address to which this bo is associated to */
struct list_head va;
/* Constant after initialization */
@@ -178,8 +178,6 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
- bo->prime_shared_count = 1;
ww_mutex_unlock(&resv->lock);
return &bo->gem_base;
@@ -206,7 +204,6 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
r = drm_gem_map_attach(dma_buf, attach);
@@ -217,29 +214,11 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (unlikely(r != 0))
goto error_detach;
-
- if (attach->dev->driver != adev->dev->driver) {
- /*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
- */
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(r < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
- goto error_unreserve;
- }
- }
-
/* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error_unreserve;
- if (attach->dev->driver != adev->dev->driver)
- bo->prime_shared_count++;
-
error_unreserve:
amdgpu_bo_unreserve(bo);
@@ -262,7 +241,6 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int ret = 0;
ret = amdgpu_bo_reserve(bo, true);
@@ -270,8 +248,6 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
goto error;
amdgpu_bo_unpin(bo);
- if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
- bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
error:
We now note that all fences are potential writers. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 24 ------------------------ 5 files changed, 4 insertions(+), 28 deletions(-)