diff mbox series

[09/28] dma-buf: use the new iterator in dma_resv_poll

Message ID 20211001100610.2899-10-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/28] dma-buf: add dma_resv_for_each_fence_unlocked v7 | expand

Commit Message

Christian König Oct. 1, 2021, 10:05 a.m. UTC
Simplify the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c | 36 ++++++------------------------------
 1 file changed, 6 insertions(+), 30 deletions(-)

Comments

Tvrtko Ursulin Oct. 5, 2021, 7:44 a.m. UTC | #1
On 01/10/2021 11:05, Christian König wrote:
> Simplify the code a bit.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/dma-buf/dma-buf.c | 36 ++++++------------------------------
>   1 file changed, 6 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 8242b5d9baeb..beb504a92d60 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -209,19 +209,14 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
>   	dma_fence_put(fence);
>   }
>   
> -static bool dma_buf_poll_shared(struct dma_resv *resv,
> +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
>   				struct dma_buf_poll_cb_t *dcb)
>   {
> -	struct dma_resv_list *fobj = dma_resv_shared_list(resv);
> +	struct dma_resv_iter cursor;
>   	struct dma_fence *fence;
> -	int i, r;
> -
> -	if (!fobj)
> -		return false;
> +	int r;
>   
> -	for (i = 0; i < fobj->shared_count; ++i) {
> -		fence = rcu_dereference_protected(fobj->shared[i],
> -						  dma_resv_held(resv));
> +	dma_resv_for_each_fence(&cursor, resv, write, fence) {
>   		dma_fence_get(fence);
>   		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
>   		if (!r)

It is unchanged with this patch, but are the semantics supposed to be 
like this? Signal poll event if _any_ of the shared fences has been 
signaled?

Regards,

Tvrtko

> @@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv *resv,
>   	return false;
>   }
>   
> -static bool dma_buf_poll_excl(struct dma_resv *resv,
> -			      struct dma_buf_poll_cb_t *dcb)
> -{
> -	struct dma_fence *fence = dma_resv_excl_fence(resv);
> -	int r;
> -
> -	if (!fence)
> -		return false;
> -
> -	dma_fence_get(fence);
> -	r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
> -	if (!r)
> -		return true;
> -	dma_fence_put(fence);
> -
> -	return false;
> -}
> -
>   static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>   {
>   	struct dma_buf *dmabuf;
> @@ -282,8 +259,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>   		spin_unlock_irq(&dmabuf->poll.lock);
>   
>   		if (events & EPOLLOUT) {
> -			if (!dma_buf_poll_shared(resv, dcb) &&
> -			    !dma_buf_poll_excl(resv, dcb))
> +			if (!dma_buf_poll_add_cb(resv, true, dcb))
>   				/* No callback queued, wake up any other waiters */
>   				dma_buf_poll_cb(NULL, &dcb->cb);
>   			else
> @@ -303,7 +279,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>   		spin_unlock_irq(&dmabuf->poll.lock);
>   
>   		if (events & EPOLLIN) {
> -			if (!dma_buf_poll_excl(resv, dcb))
> +			if (!dma_buf_poll_add_cb(resv, false, dcb))
>   				/* No callback queued, wake up any other waiters */
>   				dma_buf_poll_cb(NULL, &dcb->cb);
>   			else
>
Christian König Oct. 5, 2021, 8:16 a.m. UTC | #2
Am 05.10.21 um 09:44 schrieb Tvrtko Ursulin:
>
> On 01/10/2021 11:05, Christian König wrote:
>> Simplify the code a bit.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-buf.c | 36 ++++++------------------------------
>>   1 file changed, 6 insertions(+), 30 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>> index 8242b5d9baeb..beb504a92d60 100644
>> --- a/drivers/dma-buf/dma-buf.c
>> +++ b/drivers/dma-buf/dma-buf.c
>> @@ -209,19 +209,14 @@ static void dma_buf_poll_cb(struct dma_fence 
>> *fence, struct dma_fence_cb *cb)
>>       dma_fence_put(fence);
>>   }
>>   -static bool dma_buf_poll_shared(struct dma_resv *resv,
>> +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
>>                   struct dma_buf_poll_cb_t *dcb)
>>   {
>> -    struct dma_resv_list *fobj = dma_resv_shared_list(resv);
>> +    struct dma_resv_iter cursor;
>>       struct dma_fence *fence;
>> -    int i, r;
>> -
>> -    if (!fobj)
>> -        return false;
>> +    int r;
>>   -    for (i = 0; i < fobj->shared_count; ++i) {
>> -        fence = rcu_dereference_protected(fobj->shared[i],
>> -                          dma_resv_held(resv));
>> +    dma_resv_for_each_fence(&cursor, resv, write, fence) {
>>           dma_fence_get(fence);
>>           r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
>>           if (!r)
>
> It is unchanged with this patch, but are the semantics supposed to be 
> like this? Signal poll event if _any_ of the shared fences has been 
> signaled?

That had Daniel and me confused for a moment as well.

We don't signal the poll when any of the shared fences has signaled, but 
rather install a callback on the first not-signaled fence.

This callback then issues a re-test of the poll and only if we can't 
find any more fence the poll is considered signaled (at least that's the 
idea, the coding could as well be broken).

Christian.

>
> Regards,
>
> Tvrtko
>
>> @@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv 
>> *resv,
>>       return false;
>>   }
>>   -static bool dma_buf_poll_excl(struct dma_resv *resv,
>> -                  struct dma_buf_poll_cb_t *dcb)
>> -{
>> -    struct dma_fence *fence = dma_resv_excl_fence(resv);
>> -    int r;
>> -
>> -    if (!fence)
>> -        return false;
>> -
>> -    dma_fence_get(fence);
>> -    r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
>> -    if (!r)
>> -        return true;
>> -    dma_fence_put(fence);
>> -
>> -    return false;
>> -}
>> -
>>   static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>>   {
>>       struct dma_buf *dmabuf;
>> @@ -282,8 +259,7 @@ static __poll_t dma_buf_poll(struct file *file, 
>> poll_table *poll)
>>           spin_unlock_irq(&dmabuf->poll.lock);
>>             if (events & EPOLLOUT) {
>> -            if (!dma_buf_poll_shared(resv, dcb) &&
>> -                !dma_buf_poll_excl(resv, dcb))
>> +            if (!dma_buf_poll_add_cb(resv, true, dcb))
>>                   /* No callback queued, wake up any other waiters */
>>                   dma_buf_poll_cb(NULL, &dcb->cb);
>>               else
>> @@ -303,7 +279,7 @@ static __poll_t dma_buf_poll(struct file *file, 
>> poll_table *poll)
>>           spin_unlock_irq(&dmabuf->poll.lock);
>>             if (events & EPOLLIN) {
>> -            if (!dma_buf_poll_excl(resv, dcb))
>> +            if (!dma_buf_poll_add_cb(resv, false, dcb))
>>                   /* No callback queued, wake up any other waiters */
>>                   dma_buf_poll_cb(NULL, &dcb->cb);
>>               else
>>
Tvrtko Ursulin Oct. 5, 2021, 8:41 a.m. UTC | #3
On 05/10/2021 09:16, Christian König wrote:
> Am 05.10.21 um 09:44 schrieb Tvrtko Ursulin:
>>
>> On 01/10/2021 11:05, Christian König wrote:
>>> Simplify the code a bit.
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/dma-buf/dma-buf.c | 36 ++++++------------------------------
>>>   1 file changed, 6 insertions(+), 30 deletions(-)
>>>
>>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>>> index 8242b5d9baeb..beb504a92d60 100644
>>> --- a/drivers/dma-buf/dma-buf.c
>>> +++ b/drivers/dma-buf/dma-buf.c
>>> @@ -209,19 +209,14 @@ static void dma_buf_poll_cb(struct dma_fence 
>>> *fence, struct dma_fence_cb *cb)
>>>       dma_fence_put(fence);
>>>   }
>>>   -static bool dma_buf_poll_shared(struct dma_resv *resv,
>>> +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
>>>                   struct dma_buf_poll_cb_t *dcb)
>>>   {
>>> -    struct dma_resv_list *fobj = dma_resv_shared_list(resv);
>>> +    struct dma_resv_iter cursor;
>>>       struct dma_fence *fence;
>>> -    int i, r;
>>> -
>>> -    if (!fobj)
>>> -        return false;
>>> +    int r;
>>>   -    for (i = 0; i < fobj->shared_count; ++i) {
>>> -        fence = rcu_dereference_protected(fobj->shared[i],
>>> -                          dma_resv_held(resv));
>>> +    dma_resv_for_each_fence(&cursor, resv, write, fence) {
>>>           dma_fence_get(fence);
>>>           r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
>>>           if (!r)
>>
>> It is unchanged with this patch, but are the semantics supposed to be 
>> like this? Signal poll event if _any_ of the shared fences has been 
>> signaled?
> 
> That had Daniel and me confused for a moment as well.
> 
> We don't signal the poll when any of the shared fences has signaled, but 
> rather install a callback on the first not-signaled fence.
> 
> This callback then issues a re-test of the poll and only if we can't 
> find any more fence the poll is considered signaled (at least that's the 
> idea, the coding could as well be broken).

You are right, one too many boolean inversions for me not to get confused.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

> 
> Christian.
> 
>>
>> Regards,
>>
>> Tvrtko
>>
>>> @@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv 
>>> *resv,
>>>       return false;
>>>   }
>>>   -static bool dma_buf_poll_excl(struct dma_resv *resv,
>>> -                  struct dma_buf_poll_cb_t *dcb)
>>> -{
>>> -    struct dma_fence *fence = dma_resv_excl_fence(resv);
>>> -    int r;
>>> -
>>> -    if (!fence)
>>> -        return false;
>>> -
>>> -    dma_fence_get(fence);
>>> -    r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
>>> -    if (!r)
>>> -        return true;
>>> -    dma_fence_put(fence);
>>> -
>>> -    return false;
>>> -}
>>> -
>>>   static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>>>   {
>>>       struct dma_buf *dmabuf;
>>> @@ -282,8 +259,7 @@ static __poll_t dma_buf_poll(struct file *file, 
>>> poll_table *poll)
>>>           spin_unlock_irq(&dmabuf->poll.lock);
>>>             if (events & EPOLLOUT) {
>>> -            if (!dma_buf_poll_shared(resv, dcb) &&
>>> -                !dma_buf_poll_excl(resv, dcb))
>>> +            if (!dma_buf_poll_add_cb(resv, true, dcb))
>>>                   /* No callback queued, wake up any other waiters */
>>>                   dma_buf_poll_cb(NULL, &dcb->cb);
>>>               else
>>> @@ -303,7 +279,7 @@ static __poll_t dma_buf_poll(struct file *file, 
>>> poll_table *poll)
>>>           spin_unlock_irq(&dmabuf->poll.lock);
>>>             if (events & EPOLLIN) {
>>> -            if (!dma_buf_poll_excl(resv, dcb))
>>> +            if (!dma_buf_poll_add_cb(resv, false, dcb))
>>>                   /* No callback queued, wake up any other waiters */
>>>                   dma_buf_poll_cb(NULL, &dcb->cb);
>>>               else
>>>
>
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 8242b5d9baeb..beb504a92d60 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -209,19 +209,14 @@  static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 	dma_fence_put(fence);
 }
 
-static bool dma_buf_poll_shared(struct dma_resv *resv,
+static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
 				struct dma_buf_poll_cb_t *dcb)
 {
-	struct dma_resv_list *fobj = dma_resv_shared_list(resv);
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	int i, r;
-
-	if (!fobj)
-		return false;
+	int r;
 
-	for (i = 0; i < fobj->shared_count; ++i) {
-		fence = rcu_dereference_protected(fobj->shared[i],
-						  dma_resv_held(resv));
+	dma_resv_for_each_fence(&cursor, resv, write, fence) {
 		dma_fence_get(fence);
 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
 		if (!r)
@@ -232,24 +227,6 @@  static bool dma_buf_poll_shared(struct dma_resv *resv,
 	return false;
 }
 
-static bool dma_buf_poll_excl(struct dma_resv *resv,
-			      struct dma_buf_poll_cb_t *dcb)
-{
-	struct dma_fence *fence = dma_resv_excl_fence(resv);
-	int r;
-
-	if (!fence)
-		return false;
-
-	dma_fence_get(fence);
-	r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
-	if (!r)
-		return true;
-	dma_fence_put(fence);
-
-	return false;
-}
-
 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 {
 	struct dma_buf *dmabuf;
@@ -282,8 +259,7 @@  static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 		spin_unlock_irq(&dmabuf->poll.lock);
 
 		if (events & EPOLLOUT) {
-			if (!dma_buf_poll_shared(resv, dcb) &&
-			    !dma_buf_poll_excl(resv, dcb))
+			if (!dma_buf_poll_add_cb(resv, true, dcb))
 				/* No callback queued, wake up any other waiters */
 				dma_buf_poll_cb(NULL, &dcb->cb);
 			else
@@ -303,7 +279,7 @@  static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 		spin_unlock_irq(&dmabuf->poll.lock);
 
 		if (events & EPOLLIN) {
-			if (!dma_buf_poll_excl(resv, dcb))
+			if (!dma_buf_poll_add_cb(resv, false, dcb))
 				/* No callback queued, wake up any other waiters */
 				dma_buf_poll_cb(NULL, &dcb->cb);
 			else