diff mbox series

[13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl

Message ID 20210917123513.1106-14-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2 | expand

Commit Message

Christian König Sept. 17, 2021, 12:35 p.m. UTC
This makes the function much simpler since the complex
retry logic is now handled else where.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
 1 file changed, 11 insertions(+), 21 deletions(-)

Comments

Tvrtko Ursulin Sept. 20, 2021, 8:45 a.m. UTC | #1
On 17/09/2021 13:35, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled else where.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
>   1 file changed, 11 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 6234e17259c1..b1cb7ba688da 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>   {
>   	struct drm_i915_gem_busy *args = data;
>   	struct drm_i915_gem_object *obj;
> -	struct dma_resv_list *list;
> -	unsigned int seq;
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>   	int err;
>   
>   	err = -ENOENT;
> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>   	 * to report the overall busyness. This is what the wait-ioctl does.
>   	 *
>   	 */
> -retry:
> -	seq = raw_read_seqcount(&obj->base.resv->seq);
> -
> -	/* Translate the exclusive fence to the READ *and* WRITE engine */
> -	args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
> -
> -	/* Translate shared fences to READ set of engines */
> -	list = dma_resv_shared_list(obj->base.resv);
> -	if (list) {
> -		unsigned int shared_count = list->shared_count, i;
> -
> -		for (i = 0; i < shared_count; ++i) {
> -			struct dma_fence *fence =
> -				rcu_dereference(list->shared[i]);
> -
> +	args->busy = false;
> +	dma_resv_iter_begin(&cursor, obj->base.resv, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {

You did not agree with my suggestion to reset args->busy on restart and 
so preserve current behaviour?

Regards,

Tvrtko

> +		if (dma_resv_iter_is_exclusive(&cursor))
> +			/* Translate the exclusive fence to the READ *and* WRITE engine */
> +			args->busy = busy_check_writer(fence);
> +		else
> +			/* Translate shared fences to READ set of engines */
>   			args->busy |= busy_check_reader(fence);
> -		}
>   	}
> -
> -	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
> -		goto retry;
> +	dma_resv_iter_end(&cursor);
>   
>   	err = 0;
>   out:
>
Christian König Sept. 20, 2021, 10:13 a.m. UTC | #2
Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>
> On 17/09/2021 13:35, Christian König wrote:
>> This makes the function much simpler since the complex
>> retry logic is now handled else where.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> index 6234e17259c1..b1cb7ba688da 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>> *data,
>>   {
>>       struct drm_i915_gem_busy *args = data;
>>       struct drm_i915_gem_object *obj;
>> -    struct dma_resv_list *list;
>> -    unsigned int seq;
>> +    struct dma_resv_iter cursor;
>> +    struct dma_fence *fence;
>>       int err;
>>         err = -ENOENT;
>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>> void *data,
>>        * to report the overall busyness. This is what the wait-ioctl 
>> does.
>>        *
>>        */
>> -retry:
>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>> -
>> -    /* Translate the exclusive fence to the READ *and* WRITE engine */
>> -    args->busy = 
>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>> -
>> -    /* Translate shared fences to READ set of engines */
>> -    list = dma_resv_shared_list(obj->base.resv);
>> -    if (list) {
>> -        unsigned int shared_count = list->shared_count, i;
>> -
>> -        for (i = 0; i < shared_count; ++i) {
>> -            struct dma_fence *fence =
>> -                rcu_dereference(list->shared[i]);
>> -
>> +    args->busy = false;
>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>
> You did not agree with my suggestion to reset args->busy on restart 
> and so preserve current behaviour?

No, I want to keep the restart behavior internally to the dma_resv 
object and as far as I can see it should not make a difference here.

Regards,
Christian.

>
> Regards,
>
> Tvrtko
>
>> +        if (dma_resv_iter_is_exclusive(&cursor))
>> +            /* Translate the exclusive fence to the READ *and* WRITE 
>> engine */
>> +            args->busy = busy_check_writer(fence);
>> +        else
>> +            /* Translate shared fences to READ set of engines */
>>               args->busy |= busy_check_reader(fence);
>> -        }
>>       }
>> -
>> -    if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>> -        goto retry;
>> +    dma_resv_iter_end(&cursor);
>>         err = 0;
>>   out:
>>
Tvrtko Ursulin Sept. 20, 2021, 10:33 a.m. UTC | #3
On 20/09/2021 11:13, Christian König wrote:
> Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>>
>> On 17/09/2021 13:35, Christian König wrote:
>>> This makes the function much simpler since the complex
>>> retry logic is now handled else where.
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
>>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>> index 6234e17259c1..b1cb7ba688da 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>>> *data,
>>>   {
>>>       struct drm_i915_gem_busy *args = data;
>>>       struct drm_i915_gem_object *obj;
>>> -    struct dma_resv_list *list;
>>> -    unsigned int seq;
>>> +    struct dma_resv_iter cursor;
>>> +    struct dma_fence *fence;
>>>       int err;
>>>         err = -ENOENT;
>>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>>> void *data,
>>>        * to report the overall busyness. This is what the wait-ioctl 
>>> does.
>>>        *
>>>        */
>>> -retry:
>>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>>> -
>>> -    /* Translate the exclusive fence to the READ *and* WRITE engine */
>>> -    args->busy = 
>>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>>> -
>>> -    /* Translate shared fences to READ set of engines */
>>> -    list = dma_resv_shared_list(obj->base.resv);
>>> -    if (list) {
>>> -        unsigned int shared_count = list->shared_count, i;
>>> -
>>> -        for (i = 0; i < shared_count; ++i) {
>>> -            struct dma_fence *fence =
>>> -                rcu_dereference(list->shared[i]);
>>> -
>>> +    args->busy = false;
>>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>>
>> You did not agree with my suggestion to reset args->busy on restart 
>> and so preserve current behaviour?
> 
> No, I want to keep the restart behavior internally to the dma_resv 
> object and as far as I can see it should not make a difference here.

To be clear, on paper difference between old and new implementation is 
if the restart happens while processing the shared fences.

Old implementation unconditionally goes to "args->busy =
 >>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));" and so 
overwrites the set of flags returned to userspace.

New implementation can merge new read flags to the old set of flags and 
so return a composition of past and current fences.

Maybe it does not matter hugely in this case, depends if userspace 
typically just restarts until flags are clear. But I am not sure.

On the higher level - what do you mean with wanting to keep the restart 
behaviour internal? Not providing iterators users means of detecting it? 
I think it has to be provided.

Regards,

Tvrtko

> Regards,
> Christian.
> 
>>
>> Regards,
>>
>> Tvrtko
>>
>>> +        if (dma_resv_iter_is_exclusive(&cursor))
>>> +            /* Translate the exclusive fence to the READ *and* WRITE 
>>> engine */
>>> +            args->busy = busy_check_writer(fence);
>>> +        else
>>> +            /* Translate shared fences to READ set of engines */
>>>               args->busy |= busy_check_reader(fence);
>>> -        }
>>>       }
>>> -
>>> -    if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>>> -        goto retry;
>>> +    dma_resv_iter_end(&cursor);
>>>         err = 0;
>>>   out:
>>>
>
Christian König Sept. 21, 2021, 9:41 a.m. UTC | #4
Am 20.09.21 um 12:33 schrieb Tvrtko Ursulin:
> On 20/09/2021 11:13, Christian König wrote:
>> Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>>>
>>> On 17/09/2021 13:35, Christian König wrote:
>>>> This makes the function much simpler since the complex
>>>> retry logic is now handled else where.
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 
>>>> ++++++++----------------
>>>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>>>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>> index 6234e17259c1..b1cb7ba688da 100644
>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>>>> *data,
>>>>   {
>>>>       struct drm_i915_gem_busy *args = data;
>>>>       struct drm_i915_gem_object *obj;
>>>> -    struct dma_resv_list *list;
>>>> -    unsigned int seq;
>>>> +    struct dma_resv_iter cursor;
>>>> +    struct dma_fence *fence;
>>>>       int err;
>>>>         err = -ENOENT;
>>>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>>>> void *data,
>>>>        * to report the overall busyness. This is what the 
>>>> wait-ioctl does.
>>>>        *
>>>>        */
>>>> -retry:
>>>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>>>> -
>>>> -    /* Translate the exclusive fence to the READ *and* WRITE 
>>>> engine */
>>>> -    args->busy = 
>>>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>>>> -
>>>> -    /* Translate shared fences to READ set of engines */
>>>> -    list = dma_resv_shared_list(obj->base.resv);
>>>> -    if (list) {
>>>> -        unsigned int shared_count = list->shared_count, i;
>>>> -
>>>> -        for (i = 0; i < shared_count; ++i) {
>>>> -            struct dma_fence *fence =
>>>> -                rcu_dereference(list->shared[i]);
>>>> -
>>>> +    args->busy = false;
>>>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>>>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>>>
>>> You did not agree with my suggestion to reset args->busy on restart 
>>> and so preserve current behaviour?
>>
>> No, I want to keep the restart behavior internally to the dma_resv 
>> object and as far as I can see it should not make a difference here.
>
> To be clear, on paper difference between old and new implementation is 
> if the restart happens while processing the shared fences.
>
> Old implementation unconditionally goes to "args->busy =
> >>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));" and so 
> overwrites the set of flags returned to userspace.
>
> New implementation can merge new read flags to the old set of flags 
> and so return a composition of past and current fences.
>
> Maybe it does not matter hugely in this case, depends if userspace 
> typically just restarts until flags are clear. But I am not sure.
>
> On the higher level - what do you mean with wanting to keep the 
> restart behaviour internal? Not providing iterators users means of 
> detecting it? I think it has to be provided.

Ok I will adjust that for now to get the patch set upstream. But in 
general when somebody outside of the dma_resv code base depends on the 
restart behavior then that's a bug inside the design of that code.

The callers should only care about what unsignaled fences are inside the 
dma_resv container and it shouldn't matter if those fences are presented 
once or multiple times because of a reset..

When this makes a difference we have a bug in the handling and should 
probably consider taking the dma_resv.lock instead.

Regards,
Christian.

>
> Regards,
>
> Tvrtko
>
>> Regards,
>> Christian.
>>
>>>
>>> Regards,
>>>
>>> Tvrtko
>>>
>>>> +        if (dma_resv_iter_is_exclusive(&cursor))
>>>> +            /* Translate the exclusive fence to the READ *and* 
>>>> WRITE engine */
>>>> +            args->busy = busy_check_writer(fence);
>>>> +        else
>>>> +            /* Translate shared fences to READ set of engines */
>>>>               args->busy |= busy_check_reader(fence);
>>>> -        }
>>>>       }
>>>> -
>>>> -    if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>>>> -        goto retry;
>>>> +    dma_resv_iter_end(&cursor);
>>>>         err = 0;
>>>>   out:
>>>>
>>
Tvrtko Ursulin Sept. 21, 2021, 1:10 p.m. UTC | #5
On 21/09/2021 10:41, Christian König wrote:
> Am 20.09.21 um 12:33 schrieb Tvrtko Ursulin:
>> On 20/09/2021 11:13, Christian König wrote:
>>> Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>>>>
>>>> On 17/09/2021 13:35, Christian König wrote:
>>>>> This makes the function much simpler since the complex
>>>>> retry logic is now handled else where.
>>>>>
>>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>>> ---
>>>>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 
>>>>> ++++++++----------------
>>>>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>>>>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>>> index 6234e17259c1..b1cb7ba688da 100644
>>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>>>>> *data,
>>>>>   {
>>>>>       struct drm_i915_gem_busy *args = data;
>>>>>       struct drm_i915_gem_object *obj;
>>>>> -    struct dma_resv_list *list;
>>>>> -    unsigned int seq;
>>>>> +    struct dma_resv_iter cursor;
>>>>> +    struct dma_fence *fence;
>>>>>       int err;
>>>>>         err = -ENOENT;
>>>>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>>>>> void *data,
>>>>>        * to report the overall busyness. This is what the 
>>>>> wait-ioctl does.
>>>>>        *
>>>>>        */
>>>>> -retry:
>>>>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>>>>> -
>>>>> -    /* Translate the exclusive fence to the READ *and* WRITE 
>>>>> engine */
>>>>> -    args->busy = 
>>>>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>>>>> -
>>>>> -    /* Translate shared fences to READ set of engines */
>>>>> -    list = dma_resv_shared_list(obj->base.resv);
>>>>> -    if (list) {
>>>>> -        unsigned int shared_count = list->shared_count, i;
>>>>> -
>>>>> -        for (i = 0; i < shared_count; ++i) {
>>>>> -            struct dma_fence *fence =
>>>>> -                rcu_dereference(list->shared[i]);
>>>>> -
>>>>> +    args->busy = false;
>>>>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>>>>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>>>>
>>>> You did not agree with my suggestion to reset args->busy on restart 
>>>> and so preserve current behaviour?
>>>
>>> No, I want to keep the restart behavior internally to the dma_resv 
>>> object and as far as I can see it should not make a difference here.
>>
>> To be clear, on paper difference between old and new implementation is 
>> if the restart happens while processing the shared fences.
>>
>> Old implementation unconditionally goes to "args->busy =
>> >>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));" and so 
>> overwrites the set of flags returned to userspace.
>>
>> New implementation can merge new read flags to the old set of flags 
>> and so return a composition of past and current fences.
>>
>> Maybe it does not matter hugely in this case, depends if userspace 
>> typically just restarts until flags are clear. But I am not sure.
>>
>> On the higher level - what do you mean with wanting to keep the 
>> restart behaviour internal? Not providing iterators users means of 
>> detecting it? I think it has to be provided.
> 
> Ok I will adjust that for now to get the patch set upstream. But in 
> general when somebody outside of the dma_resv code base depends on the 
> restart behavior then that's a bug inside the design of that code.

Thanks, no change in behaviour makes for an easy r-b. :)

> The callers should only care about what unsignaled fences are inside the 
> dma_resv container and it shouldn't matter if those fences are presented 
> once or multiple times because of a reset..
> 
> When this makes a difference we have a bug in the handling and should 
> probably consider taking the dma_resv.lock instead.

I agree, which is why I was mentioning earlier how it would be good to 
completely sort locked from unlocked iterators and avoid situations 
where unlocked one is called from a path where object is locked.

Unfortunately for the display code path I cannot easily help with the 
audit of call paths. And I think there are at least two patches in your 
series which need KMS expertise.

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6234e17259c1..b1cb7ba688da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,8 +82,8 @@  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
-	struct dma_resv_list *list;
-	unsigned int seq;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int err;
 
 	err = -ENOENT;
@@ -109,27 +109,17 @@  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-retry:
-	seq = raw_read_seqcount(&obj->base.resv->seq);
-
-	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
-
-	/* Translate shared fences to READ set of engines */
-	list = dma_resv_shared_list(obj->base.resv);
-	if (list) {
-		unsigned int shared_count = list->shared_count, i;
-
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence =
-				rcu_dereference(list->shared[i]);
-
+	args->busy = false;
+	dma_resv_iter_begin(&cursor, obj->base.resv, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			/* Translate the exclusive fence to the READ *and* WRITE engine */
+			args->busy = busy_check_writer(fence);
+		else
+			/* Translate shared fences to READ set of engines */
 			args->busy |= busy_check_reader(fence);
-		}
 	}
-
-	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
-		goto retry;
+	dma_resv_iter_end(&cursor);
 
 	err = 0;
 out: