@@ -484,23 +484,11 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
const struct ttm_operation_ctx *ctx)
{
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
struct drm_gem_object *obj = &bo->ttm.base;
struct drm_gpuvm_bo *vm_bo;
bool idle = false;
int ret = 0;
- dma_resv_assert_held(bo->ttm.base.resv);
-
- if (!list_empty(&bo->ttm.base.gpuva.list)) {
- dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
- }
-
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
struct drm_gpuva *gpuva;
@@ -515,11 +503,11 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
if (ctx->no_wait_gpu &&
!dma_resv_test_signaled(bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP))
+ DMA_RESV_USAGE_PREEMPT))
return -EBUSY;
timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
ctx->interruptible,
MAX_SCHEDULE_TIMEOUT);
if (!timeout)
@@ -723,7 +711,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (old_mem_type == XE_PL_TT &&
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
true,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
@@ -1056,7 +1044,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
* unbind.
*/
dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
- DMA_RESV_USAGE_BOOKKEEP, fence) {
+ DMA_RESV_USAGE_PREEMPT, fence) {
if (xe_fence_is_xe_preempt(fence) &&
!dma_fence_is_signaled(fence)) {
if (!replacement)
@@ -1065,7 +1053,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
dma_resv_replace_fences(ttm_bo->base.resv,
fence->context,
replacement,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
}
}
dma_fence_put(replacement);
@@ -895,10 +895,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) {
err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
if (!err && src_bo != dst_bo)
err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
if (err)
goto err_job;
}
@@ -1115,7 +1115,7 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
err = job_test_add_deps(job, xe_vm_resv(vm),
pt_update_ops->wait_vm_bookkeep ?
- DMA_RESV_USAGE_BOOKKEEP :
+ DMA_RESV_USAGE_PREEMPT :
DMA_RESV_USAGE_KERNEL);
if (err)
return err;
@@ -1231,18 +1231,10 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
spin_unlock(&vm->userptr.invalidated_lock);
if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
long err;
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
}
@@ -195,7 +195,7 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
if (q->lr.pfence) {
dma_resv_add_fence(bo->ttm.base.resv,
q->lr.pfence,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
}
return 0;
@@ -213,7 +213,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
q->ops->resume(q);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT, DMA_RESV_USAGE_PREEMPT);
}
}
@@ -250,7 +250,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
down_read(&vm->userptr.notifier_lock);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT, DMA_RESV_USAGE_PREEMPT);
/*
* Check to see if a preemption on VM is in flight or userptr
@@ -588,8 +588,6 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
long err;
xe_assert(vm->xe, xe_vma_is_userptr(vma));
@@ -625,20 +623,8 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
up_write(&vm->userptr.notifier_lock);
- /*
- * Preempt fences turn into schedule disables, pipeline these.
- * Note that even in fault mode, we need to wait for binds and
- * unbinds to complete, and those are attached as BOOKMARK fences
- * to the vm.
- */
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
Use the new DMA_RESV_USAGE_PREEMPT dma-resv slots in Xe for preemptive fences, and call them in dma-resv/scheduler rather than open-coding the enabling of signaling before waiting. Cc: Dave Airlie <airlied@redhat.com> Cc: Simona Vetter <simona.vetter@ffwll.ch> Cc: Christian Koenig <christian.koenig@amd.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> --- drivers/gpu/drm/xe/xe_bo.c | 22 +++++----------------- drivers/gpu/drm/xe/xe_migrate.c | 4 ++-- drivers/gpu/drm/xe/xe_pt.c | 12 ++---------- drivers/gpu/drm/xe/xe_vm.c | 22 ++++------------------ 4 files changed, 13 insertions(+), 47 deletions(-)