diff mbox series

[03/26] dma-buf: make fence mandatory for dma_resv_add_excl_fence

Message ID 20211123142111.3885-4-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/26] drm/amdgpu: partially revert "svm bo enable_signal call condition" | expand

Commit Message

Christian König Nov. 23, 2021, 2:20 p.m. UTC
Calling dma_resv_add_excl_fence() with the fence as NULL and expecting
that that this frees up the fences is simply abuse of the internals of
the dma_resv object.

Rework how pruning fences works and make the fence parameter mandatory.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 39 ++++++++++++++++++++++++++++++++++----
 1 file changed, 35 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index f6499e87963c..e627a4274ff6 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -96,6 +96,34 @@  static void dma_resv_list_free(struct dma_resv_list *list)
 	kfree_rcu(list, rcu);
 }
 
+/**
+ * dma_resv_list_prune - drop all signaled fences
+ * @list: list to check for signaled fences
+ * @obj: dma_resv object for lockdep
+ *
+ * Replace all the signaled fences with the stub fence to free them up.
+ */
+static void dma_resv_list_prune(struct dma_resv_list *list,
+				struct dma_resv *obj)
+{
+	unsigned int i;
+
+	if (!list)
+		return;
+
+	for (i = 0; i < list->shared_count; ++i) {
+		struct dma_fence *fence;
+
+		fence = rcu_dereference_protected(list->shared[i],
+						  dma_resv_held(obj));
+		if (!dma_fence_is_signaled(fence))
+			continue;
+
+		RCU_INIT_POINTER(list->shared[i], dma_fence_get_stub());
+		dma_fence_put(fence);
+	}
+}
+
 /**
  * dma_resv_init - initialize a reservation object
  * @obj: the reservation object
@@ -305,8 +333,7 @@  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 	if (old)
 		i = old->shared_count;
 
-	if (fence)
-		dma_fence_get(fence);
+	dma_fence_get(fence);
 
 	write_seqcount_begin(&obj->seq);
 	/* write_seqcount_begin provides the necessary memory barrier */
@@ -334,8 +361,12 @@  void dma_resv_prune(struct dma_resv *obj)
 {
 	dma_resv_assert_held(obj);
 
-	if (dma_resv_test_signaled(obj, true))
-		dma_resv_add_excl_fence(obj, NULL);
+	write_seqcount_begin(&obj->seq);
+	if (obj->fence_excl && dma_fence_is_signaled(obj->fence_excl))
+		dma_fence_put(rcu_replace_pointer(obj->fence_excl, NULL,
+						  dma_resv_held(obj)));
+	dma_resv_list_prune(dma_resv_shared_list(obj), obj);
+	write_seqcount_end(&obj->seq);
 }
 EXPORT_SYMBOL(dma_resv_prune_unlocked);