@@ -453,6 +453,7 @@ struct radeon_bo_va {
struct list_head bo_list;
uint32_t flags;
uint64_t addr;
+ struct radeon_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex */
@@ -907,6 +908,8 @@ struct radeon_vm_id {
};
struct radeon_vm {
+ struct mutex mutex;
+
struct rb_root va;
/* BOs moved, but not yet updated in the PT */
@@ -924,10 +927,6 @@ struct radeon_vm {
struct radeon_bo_va *ib_bo_va;
- struct mutex mutex;
- /* last fence for cs using this vm */
- struct radeon_fence *fence;
-
/* for id and flush management per ring */
struct radeon_vm_id ids[RADEON_NUM_RINGS];
};
@@ -494,6 +494,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
if (r)
return r;
+ radeon_sync_resv(&p->ib.sync, vm->page_directory->tbo.resv, true);
+
r = radeon_vm_clear_freed(rdev, vm);
if (r)
return r;
@@ -525,6 +527,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
+
+ radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
}
return radeon_vm_clear_invalids(rdev, vm);
@@ -563,7 +567,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_sync_fence(&parser->ib.sync, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -275,9 +275,6 @@ void radeon_vm_fence(struct radeon_device *rdev,
{
unsigned vm_id = vm->ids[fence->ring].id;
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(fence);
-
radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
@@ -706,8 +703,6 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
}
ib.fence->is_vm_update = true;
radeon_bo_fence(pd, ib.fence, false);
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
}
radeon_ib_free(rdev, &ib);
@@ -998,8 +993,8 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
}
ib.fence->is_vm_update = true;
radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
+ radeon_fence_unref(&bo_va->last_pt_update);
+ bo_va->last_pt_update = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
return 0;
@@ -1025,6 +1020,7 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
r = radeon_vm_bo_update(rdev, bo_va, NULL);
radeon_bo_unref(&bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
if (r)
return r;
@@ -1083,6 +1079,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1129,8 +1126,6 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int i, r;
vm->ib_bo_va = NULL;
- vm->fence = NULL;
-
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL;
@@ -1191,11 +1186,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
}
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
radeon_bo_unref(&bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1205,8 +1202,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_bo_unref(&vm->page_directory);
- radeon_fence_unref(&vm->fence);
-
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_unref(&vm->ids[i].flushed_updates);
radeon_fence_unref(&vm->ids[i].last_id_use);