@@ -316,6 +316,7 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
+ * @writes_only: if true then only write operations are returned
* @pfence_excl: the returned exclusive fence (or NULL)
* @pshared_count: the number of shared fences returned
* @pshared: the array of shared fence ptrs returned (array is krealloc'd to
@@ -326,6 +327,7 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
* shared fences as well. Returns either zero or -ENOMEM.
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ bool writes_only,
struct dma_fence **pfence_excl,
unsigned *pshared_count,
struct dma_fence ***pshared)
@@ -358,6 +360,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
if (sz) {
struct dma_fence **nshared;
+ unsigned int j;
nshared = krealloc(shared, sz,
GFP_NOWAIT | __GFP_NOWARN);
@@ -374,13 +377,20 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
shared = nshared;
shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- void *e = rcu_dereference(fobj->shared[i]);
+ for (i = 0, j = 0; j < shared_count; ++j) {
+ void *e = rcu_dereference(fobj->shared[j]);
+
+ if (writes_only &&
+ !reservation_object_shared_is_write(e))
+ continue;
shared[i] = reservation_object_shared_fence(e);
if (!dma_fence_get_rcu(shared[i]))
- break;
+ goto drop_references;
+
+ i++;
}
+ shared_count = i;
if (!pfence_excl && fence_excl) {
shared[i] = fence_excl;
@@ -390,7 +400,8 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
}
- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+ if (read_seqcount_retry(&obj->seq, seq)) {
+drop_references:
while (i--)
dma_fence_put(shared[i]);
dma_fence_put(fence_excl);
@@ -200,7 +200,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
+ r = reservation_object_get_fences_rcu(new_abo->tbo.resv, false,
+ &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
@@ -112,7 +112,8 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned count;
int r;
- r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+ r = reservation_object_get_fences_rcu(resv, false, NULL,
+ &count, &fences);
if (r)
goto fallback;
@@ -1841,7 +1841,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned i, shared_count;
int r;
- r = reservation_object_get_fences_rcu(resv, &excl,
+ r = reservation_object_get_fences_rcu(resv, false, &excl,
&shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
@@ -188,7 +188,8 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+ ret = reservation_object_get_fences_rcu(robj, false,
+ &bo->excl,
&bo->nr_shared,
&bo->shared);
if (ret)
@@ -517,7 +517,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = reservation_object_get_fences_rcu(resv, false,
&excl, &count, &shared);
if (ret)
return ret;
@@ -619,7 +619,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(obj->resv,
+ ret = reservation_object_get_fences_rcu(obj->resv, false,
&excl, &count, &shared);
if (ret)
return ret;
@@ -981,7 +981,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->resv,
+ ret = reservation_object_get_fences_rcu(obj->resv, false,
&excl, &count, &shared);
if (ret)
return ret;
@@ -499,7 +499,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = reservation_object_get_fences_rcu(resv, false,
&excl, &count, &shared);
if (ret)
return ret;
@@ -296,6 +296,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
struct dma_fence *fence);
int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ bool writes_only,
struct dma_fence **pfence_excl,
unsigned *pshared_count,
struct dma_fence ***pshared);
That allows us to only retreive fences of write operations. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/dma-buf/reservation.c | 19 +++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 3 ++- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_request.c | 2 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- include/linux/reservation.h | 1 + 9 files changed, 27 insertions(+), 12 deletions(-)