@@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
- static const char *usage[] = { "write", "read" };
+ static const char *usage[] = { "kernel", "write", "read" };
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -296,7 +296,7 @@ int dma_resv(void)
int r;
spin_lock_init(&fence_lock);
- for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
+ for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ;
++usage) {
r = subtests(tests, (void *)(unsigned long)usage);
if (r)
@@ -117,7 +117,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
i915_fence_timeout(i915),
I915_FENCE_GFP);
dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
- DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_KERNEL);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
@@ -739,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
- dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret)) {
@@ -508,7 +508,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
return ret;
dma_resv_add_fence(&ghost_obj->base._resv, fence,
- DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
@@ -562,7 +562,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
- dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
@@ -759,7 +759,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (!ret)
dma_resv_add_fence(bo->base.resv, &fence->base,
- DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_KERNEL);
else
/* Last resort fallback when we are OOM */
dma_fence_wait(&fence->base, false);
@@ -55,11 +55,29 @@ struct dma_resv_list;
* This enum describes the different use cases for a dma_resv object and
* controls which fences are returned when queried.
*
- * An important fact is that there is the order WRITE<READ and when the
- * dma_resv object is asked for fences for one use case the fences for the
- * lower use case are returned as well.
+ * An important fact is that there is the order KERNEL<WRITE<READ and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
*/
enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
/**
* @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
*