@@ -506,6 +506,8 @@ struct radeon_bo {
struct radeon_mn *mn;
struct interval_tree_node mn_it;
+
+ struct radeon_fence *last_move;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -263,6 +263,17 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
bool is_iomem;
int r;
+ if (bo->last_move) {
+ r = radeon_fence_wait(bo->last_move, false);
+ if (r) {
+ radeon_bo_kunmap(bo);
+ DRM_ERROR("Failed waiting BO move (%d)!\n", r);
+ return r;
+ }
+
+ radeon_fence_unref(&bo->last_move);
+ }
+
if (bo->kptr) {
if (ptr) {
*ptr = bo->kptr;
@@ -253,6 +253,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
@@ -300,6 +301,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
+ radeon_fence_unref(&rbo->last_move);
+ rbo->last_move = radeon_fence_ref(fence);
radeon_fence_unref(&fence);
return r;
}
@@ -401,7 +401,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
void *ptr;
int i, r;
@@ -411,15 +410,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.resv);
- if (f) {
- r = radeon_fence_wait((struct radeon_fence *)f, false);
- if (r) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
- return r;
- }
- }
-
r = radeon_bo_kmap(bo, &ptr);
if (r) {
DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);