Message ID | 20211005113742.1101-26-christian.koenig@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/28] dma-buf: add dma_resv_for_each_fence_unlocked v8 | expand |
On Tue, Oct 05, 2021 at 01:37:39PM +0200, Christian König wrote: > Simplifying the code a bit. > > Signed-off-by: Christian König <christian.koenig@amd.com> A bit a trick conversion since the previous code was clever with the ret handling in the loop, but looks correct. Please mention in the commit message that this code now also waits for all shared fences in all cases. Previously if we found an exclusive fence, we bailed out. That needs to be recorded in the commit message, together with an explainer that defacto too many other drivers have broken this rule already, and so you have to always iterate all fences. With that added: Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> > --- > drivers/gpu/drm/nouveau/nouveau_fence.c | 48 +++++++------------------ > 1 file changed, 12 insertions(+), 36 deletions(-) > > diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c > index 05d0b3eb3690..26f9299df881 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_fence.c > +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c > @@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) > } > > int > -nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) > +nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, > + bool exclusive, bool intr) > { > struct nouveau_fence_chan *fctx = chan->fence; > - struct dma_fence *fence; > struct dma_resv *resv = nvbo->bo.base.resv; > - struct dma_resv_list *fobj; > + struct dma_resv_iter cursor; > + struct dma_fence *fence; > struct nouveau_fence *f; > - int ret = 0, i; > + int ret; > > if (!exclusive) { > ret = dma_resv_reserve_shared(resv, 1); > @@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e > return ret; > } > > - fobj = dma_resv_shared_list(resv); > - fence = dma_resv_excl_fence(resv); > - > - if (fence) { > + dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { > struct nouveau_channel *prev = NULL; > bool must_wait = true; > > @@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e > if (f) { > rcu_read_lock(); > prev = rcu_dereference(f->channel); > - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) > + if (prev && (prev == chan || > + fctx->sync(f, prev, chan) == 0)) > must_wait = false; > rcu_read_unlock(); > } > > - if (must_wait) > + if (must_wait) { > ret = dma_fence_wait(fence, intr); > - > - return ret; > - } > - > - if (!exclusive || !fobj) > - return ret; > - > - for (i = 0; i < fobj->shared_count && !ret; ++i) { > - struct nouveau_channel *prev = NULL; > - bool must_wait = true; > - > - fence = rcu_dereference_protected(fobj->shared[i], > - dma_resv_held(resv)); > - > - f = nouveau_local_fence(fence, chan->drm); > - if (f) { > - rcu_read_lock(); > - prev = rcu_dereference(f->channel); > - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) > - must_wait = false; > - rcu_read_unlock(); > + if (ret) > + return ret; > } > - > - if (must_wait) > - ret = dma_fence_wait(fence, intr); > } > - > - return ret; > + return 0; > } > > void > -- > 2.25.1 >
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 05d0b3eb3690..26f9299df881 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) } int -nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) +nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + bool exclusive, bool intr) { struct nouveau_fence_chan *fctx = chan->fence; - struct dma_fence *fence; struct dma_resv *resv = nvbo->bo.base.resv; - struct dma_resv_list *fobj; + struct dma_resv_iter cursor; + struct dma_fence *fence; struct nouveau_fence *f; - int ret = 0, i; + int ret; if (!exclusive) { ret = dma_resv_reserve_shared(resv, 1); @@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e return ret; } - fobj = dma_resv_shared_list(resv); - fence = dma_resv_excl_fence(resv); - - if (fence) { + dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { struct nouveau_channel *prev = NULL; bool must_wait = true; @@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e if (f) { rcu_read_lock(); prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) + if (prev && (prev == chan || + fctx->sync(f, prev, chan) == 0)) must_wait = false; rcu_read_unlock(); } - if (must_wait) + if (must_wait) { ret = dma_fence_wait(fence, intr); - - return ret; - } - - if (!exclusive || !fobj) - return ret; - - for (i = 0; i < fobj->shared_count && !ret; ++i) { - struct nouveau_channel *prev = NULL; - bool must_wait = true; - - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(resv)); - - f = nouveau_local_fence(fence, chan->drm); - if (f) { - rcu_read_lock(); - prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) - must_wait = false; - rcu_read_unlock(); + if (ret) + return ret; } - - if (must_wait) - ret = dma_fence_wait(fence, intr); } - - return ret; + return 0; } void
Simplifying the code a bit. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/nouveau/nouveau_fence.c | 48 +++++++------------------ 1 file changed, 12 insertions(+), 36 deletions(-)