diff mbox

drm/ttm: enable eviction for Per-VM-BO

Message ID 1513239041-32734-1-git-send-email-Hongbo.He@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

He, Hongbo Dec. 14, 2017, 8:10 a.m. UTC
Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

Comments

Christian König Dec. 14, 2017, 8:18 a.m. UTC | #1
We need some commit message here. Something like this:

Allow eviction of BOs reserved by the caller when they are not part of 
the current working set.

with that fixed the patch is Reviewed-by: Christian König 
<christian.koenig@amd.com>.

Thanks,
Christian.

Am 14.12.2017 um 09:10 schrieb Roger He:
> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>   1 file changed, 5 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 098b22e..ba5b486 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>   
>   static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
> -			       struct reservation_object *resv,
>   			       uint32_t mem_type,
>   			       const struct ttm_place *place,
>   			       struct ttm_operation_ctx *ctx)
> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>   	spin_lock(&glob->lru_lock);
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		list_for_each_entry(bo, &man->lru[i], lru) {
> -			if (bo->resv == resv) {
> -				if (list_empty(&bo->ddestroy))
> +			if (bo->resv == ctx->resv) {
> +				if (!ctx->allow_reserved_eviction &&
> +				    list_empty(&bo->ddestroy))
>   					continue;
>   			} else {
>   				locked = reservation_object_trylock(bo->resv);
> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
>   			return ret;
>   		if (mem->mm_node)
>   			break;
> -		ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
> +		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>   		if (unlikely(ret != 0))
>   			return ret;
>   	} while (1);
> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, NULL, mem_type,
> -						  NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
Thomas Hellström (VMware) Dec. 15, 2017, 6:24 a.m. UTC | #2
Hi.

On 12/14/2017 09:10 AM, Roger He wrote:
> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>   1 file changed, 5 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 098b22e..ba5b486 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>   
>   static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
> -			       struct reservation_object *resv,
>   			       uint32_t mem_type,
>   			       const struct ttm_place *place,
>   			       struct ttm_operation_ctx *ctx)
> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>   	spin_lock(&glob->lru_lock);
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		list_for_each_entry(bo, &man->lru[i], lru) {
> -			if (bo->resv == resv) {
> -				if (list_empty(&bo->ddestroy))
> +			if (bo->resv == ctx->resv) {
> +				if (!ctx->allow_reserved_eviction &&
> +				    list_empty(&bo->ddestroy))
>   					continue;

It looks to me like a value of locked==true could leak through from a 
previous loop iteration here, so that locked ends up to be true if 
(bo->resv == resv  && ctx->allow_reserved_eviction), even if no locking 
was taking place. Perhaps-re-initialize locked to false on each loop 
iteration?

/Thomas

>   			} else {
>   				locked = reservation_object_trylock(bo->resv);
> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
>   			return ret;
>   		if (mem->mm_node)
>   			break;
> -		ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
> +		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>   		if (unlikely(ret != 0))
>   			return ret;
>   	} while (1);
> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, NULL, mem_type,
> -						  NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
Thomas Hellström (VMware) Dec. 15, 2017, 7:01 a.m. UTC | #3
Roger and Chrisitian,

Correct me if I'm wrong, but It seems to me like a lot of the recent 
changes to ttm_bo.c are to allow recursive reservation object locking in 
the case of shared reservation objects, but only in certain functions 
and with special arguments so it doesn't look like recursive locking to 
the lockdep checker.  Wouldn't it be a lot cleaner if we were to hide 
all this in a resurrected __ttm_bo_reserve something along the lines of

int __ttm_bo_reserve(struct ttm_bo *bo, struct ttm_operation_ctx *ctx) {
     if (ctx && ctx->resv == bo->resv) {
#ifdef CONFIG_LOCKDEP
         WARN_ON(bo->reserved);
lockdep_assert_held(&ctx->resv);
         ctx->reserve_count++;
bo->reserved = true;
#endif
         return0;
      } else {
         int ret = reservation_object_lock(bo->resv, NULL) ? 0:-EBUSY;

         if (ret)
             return ret;
#ifdef CONFIG_LOCKDEP
         WARN_ON(bo->reserved);
         bo->reserved = true;
#endif
         return 0;
}

And similar for tryreserve and unreserve? Perhaps with a ww_acquire_ctx 
included somewhere as well...

/Thomas




On 12/14/2017 09:10 AM, Roger He wrote:
> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>   1 file changed, 5 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 098b22e..ba5b486 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>   
>   static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
> -			       struct reservation_object *resv,
>   			       uint32_t mem_type,
>   			       const struct ttm_place *place,
>   			       struct ttm_operation_ctx *ctx)
> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>   	spin_lock(&glob->lru_lock);
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		list_for_each_entry(bo, &man->lru[i], lru) {
> -			if (bo->resv == resv) {
> -				if (list_empty(&bo->ddestroy))
> +			if (bo->resv == ctx->resv) {
> +				if (!ctx->allow_reserved_eviction &&
> +				    list_empty(&bo->ddestroy))
>   					continue;
>   			} else {
>   				locked = reservation_object_trylock(bo->resv);
> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
>   			return ret;
>   		if (mem->mm_node)
>   			break;
> -		ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
> +		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>   		if (unlikely(ret != 0))
>   			return ret;
>   	} while (1);
> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, NULL, mem_type,
> -						  NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
Christian König Dec. 15, 2017, 9:13 a.m. UTC | #4
Hi Thomas,

actually I was very happy to get rid of that stuff.

In the long run I indeed wanted to replace ctx->resv with the 
ww_acquire_ctx to enable eviction of even more things, but that is a 
different story.

Recursive locking is usually something we should try to avoid.

Regards,
Christian.

Am 15.12.2017 um 08:01 schrieb Thomas Hellstrom:
> Roger and Chrisitian,
>
> Correct me if I'm wrong, but It seems to me like a lot of the recent 
> changes to ttm_bo.c are to allow recursive reservation object locking 
> in the case of shared reservation objects, but only in certain 
> functions and with special arguments so it doesn't look like recursive 
> locking to the lockdep checker.  Wouldn't it be a lot cleaner if we 
> were to hide all this in a resurrected __ttm_bo_reserve something 
> along the lines of
>
> int __ttm_bo_reserve(struct ttm_bo *bo, struct ttm_operation_ctx *ctx) {
>     if (ctx && ctx->resv == bo->resv) {
> #ifdef CONFIG_LOCKDEP
>         WARN_ON(bo->reserved);
> lockdep_assert_held(&ctx->resv);
>         ctx->reserve_count++;
> bo->reserved = true;
> #endif
>         return0;
>      } else {
>         int ret = reservation_object_lock(bo->resv, NULL) ? 0:-EBUSY;
>
>         if (ret)
>             return ret;
> #ifdef CONFIG_LOCKDEP
>         WARN_ON(bo->reserved);
>         bo->reserved = true;
> #endif
>         return 0;
> }
>
> And similar for tryreserve and unreserve? Perhaps with a 
> ww_acquire_ctx included somewhere as well...
>
> /Thomas
>
>
>
>
> On 12/14/2017 09:10 AM, Roger He wrote:
>> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
>> Signed-off-by: Roger He <Hongbo.He@amd.com>
>> ---
>>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>>   1 file changed, 5 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
>> index 098b22e..ba5b486 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct 
>> ttm_buffer_object *bo,
>>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>>     static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>> -                   struct reservation_object *resv,
>>                      uint32_t mem_type,
>>                      const struct ttm_place *place,
>>                      struct ttm_operation_ctx *ctx)
>> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct 
>> ttm_bo_device *bdev,
>>       spin_lock(&glob->lru_lock);
>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>           list_for_each_entry(bo, &man->lru[i], lru) {
>> -            if (bo->resv == resv) {
>> -                if (list_empty(&bo->ddestroy))
>> +            if (bo->resv == ctx->resv) {
>> +                if (!ctx->allow_reserved_eviction &&
>> +                    list_empty(&bo->ddestroy))
>>                       continue;
>>               } else {
>>                   locked = reservation_object_trylock(bo->resv);
>> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct 
>> ttm_buffer_object *bo,
>>               return ret;
>>           if (mem->mm_node)
>>               break;
>> -        ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, 
>> ctx);
>> +        ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>>           if (unlikely(ret != 0))
>>               return ret;
>>       } while (1);
>> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct 
>> ttm_bo_device *bdev,
>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>           while (!list_empty(&man->lru[i])) {
>>               spin_unlock(&glob->lru_lock);
>> -            ret = ttm_mem_evict_first(bdev, NULL, mem_type,
>> -                          NULL, &ctx);
>> +            ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>>               if (ret)
>>                   return ret;
>>               spin_lock(&glob->lru_lock);
>
>
He, Hongbo Dec. 15, 2017, 9:14 a.m. UTC | #5
-----Original Message-----
From: Thomas Hellstrom [mailto:thomas@shipmail.org] 

Sent: Friday, December 15, 2017 2:25 PM
To: He, Roger <Hongbo.He@amd.com>; amd-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
Cc: Koenig, Christian <Christian.Koenig@amd.com>
Subject: Re: [PATCH] drm/ttm: enable eviction for Per-VM-BO

Hi.

On 12/14/2017 09:10 AM, Roger He wrote:
> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62

> Signed-off-by: Roger He <Hongbo.He@amd.com>

> ---

>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------

>   1 file changed, 5 insertions(+), 6 deletions(-)

>

> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c 

> b/drivers/gpu/drm/ttm/ttm_bo.c index 098b22e..ba5b486 100644

> --- a/drivers/gpu/drm/ttm/ttm_bo.c

> +++ b/drivers/gpu/drm/ttm/ttm_bo.c

> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,

>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);

>   

>   static int ttm_mem_evict_first(struct ttm_bo_device *bdev,

> -			       struct reservation_object *resv,

>   			       uint32_t mem_type,

>   			       const struct ttm_place *place,

>   			       struct ttm_operation_ctx *ctx) @@ -722,8 +721,9 @@ static 

> int ttm_mem_evict_first(struct ttm_bo_device *bdev,

>   	spin_lock(&glob->lru_lock);

>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {

>   		list_for_each_entry(bo, &man->lru[i], lru) {

> -			if (bo->resv == resv) {

> -				if (list_empty(&bo->ddestroy))

> +			if (bo->resv == ctx->resv) {

> +				if (!ctx->allow_reserved_eviction &&

> +				    list_empty(&bo->ddestroy))

>   					continue;


	It looks to me like a value of locked==true could leak through from a previous loop iteration here, so that locked ends up to be 	true if (bo->resv == resv  && ctx->allow_reserved_eviction), even if no locking was taking place. Perhaps-re-initialize locked to 	false on each loop iteration?

At  the beginning, I also have this concern about incorrect using of locked. So, add patch a few days ago.

init locked again to prevent incorrect unlock
is it help?


Thanks
Roger(Hongbo.He)

/Thomas

>   			} else {

>   				locked = reservation_object_trylock(bo->resv);

> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,

>   			return ret;

>   		if (mem->mm_node)

>   			break;

> -		ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);

> +		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);

>   		if (unlikely(ret != 0))

>   			return ret;

>   	} while (1);

> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,

>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {

>   		while (!list_empty(&man->lru[i])) {

>   			spin_unlock(&glob->lru_lock);

> -			ret = ttm_mem_evict_first(bdev, NULL, mem_type,

> -						  NULL, &ctx);

> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);

>   			if (ret)

>   				return ret;

>   			spin_lock(&glob->lru_lock);
Christian König Dec. 15, 2017, 9:21 a.m. UTC | #6
Am 15.12.2017 um 07:24 schrieb Thomas Hellstrom:
> Hi.
>
> On 12/14/2017 09:10 AM, Roger He wrote:
>> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
>> Signed-off-by: Roger He <Hongbo.He@amd.com>
>> ---
>>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>>   1 file changed, 5 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
>> index 098b22e..ba5b486 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct 
>> ttm_buffer_object *bo,
>>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>>     static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>> -                   struct reservation_object *resv,
>>                      uint32_t mem_type,
>>                      const struct ttm_place *place,
>>                      struct ttm_operation_ctx *ctx)
>> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct 
>> ttm_bo_device *bdev,
>>       spin_lock(&glob->lru_lock);
>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>           list_for_each_entry(bo, &man->lru[i], lru) {
>> -            if (bo->resv == resv) {
>> -                if (list_empty(&bo->ddestroy))
>> +            if (bo->resv == ctx->resv) {
>> +                if (!ctx->allow_reserved_eviction &&
>> +                    list_empty(&bo->ddestroy))
>>                       continue;
>
> It looks to me like a value of locked==true could leak through from a 
> previous loop iteration here, so that locked ends up to be true if 
> (bo->resv == resv  && ctx->allow_reserved_eviction), even if no 
> locking was taking place. Perhaps-re-initialize locked to false on 
> each loop iteration?

Yeah, that is correct. Roger had a patch for this as prerequisite for 
this one.

No idea why this change isn't obvious in this one.

Christian.

>
> /Thomas
>
>>               } else {
>>                   locked = reservation_object_trylock(bo->resv);
>> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct 
>> ttm_buffer_object *bo,
>>               return ret;
>>           if (mem->mm_node)
>>               break;
>> -        ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, 
>> ctx);
>> +        ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>>           if (unlikely(ret != 0))
>>               return ret;
>>       } while (1);
>> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct 
>> ttm_bo_device *bdev,
>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>           while (!list_empty(&man->lru[i])) {
>>               spin_unlock(&glob->lru_lock);
>> -            ret = ttm_mem_evict_first(bdev, NULL, mem_type,
>> -                          NULL, &ctx);
>> +            ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>>               if (ret)
>>                   return ret;
>>               spin_lock(&glob->lru_lock);
>
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Thomas Hellström (VMware) Dec. 15, 2017, 9:38 a.m. UTC | #7
On 12/15/2017 10:13 AM, Christian König wrote:
> Hi Thomas,
>
> actually I was very happy to get rid of that stuff.

Yes, wrappers that don't do anything don't make much sense, but this is 
a different story.

>
> In the long run I indeed wanted to replace ctx->resv with the 
> ww_acquire_ctx to enable eviction of even more things, but that is a 
> different story.
>
> Recursive locking is usually something we should try to avoid.

Well this is more or less replicating what you are doing currently but 
instead of spreading the checks and locking state all over the code, 
both as local variables and parameters this is keeping it in a single 
place with correctness checks.

I agree recursive locking is generally frowned upon, but you're already 
doing it, not by using recursive locks, but by passing locking state 
around which IMO is worse.

Collecting the state in a the operation_ctx will make that usage-pattern 
more obvious but will help make the code cleaner and less error prone.

/Thomas



>
> Regards,
> Christian.
>
> Am 15.12.2017 um 08:01 schrieb Thomas Hellstrom:
>> Roger and Chrisitian,
>>
>> Correct me if I'm wrong, but It seems to me like a lot of the recent 
>> changes to ttm_bo.c are to allow recursive reservation object locking 
>> in the case of shared reservation objects, but only in certain 
>> functions and with special arguments so it doesn't look like 
>> recursive locking to the lockdep checker. Wouldn't it be a lot 
>> cleaner if we were to hide all this in a resurrected __ttm_bo_reserve 
>> something along the lines of
>>
>> int __ttm_bo_reserve(struct ttm_bo *bo, struct ttm_operation_ctx *ctx) {
>>     if (ctx && ctx->resv == bo->resv) {
>> #ifdef CONFIG_LOCKDEP
>>         WARN_ON(bo->reserved);
>> lockdep_assert_held(&ctx->resv);
>>         ctx->reserve_count++;
>> bo->reserved = true;
>> #endif
>>         return0;
>>      } else {
>>         int ret = reservation_object_lock(bo->resv, NULL) ? 0:-EBUSY;
>>
>>         if (ret)
>>             return ret;
>> #ifdef CONFIG_LOCKDEP
>>         WARN_ON(bo->reserved);
>>         bo->reserved = true;
>> #endif
>>         return 0;
>> }
>>
>> And similar for tryreserve and unreserve? Perhaps with a 
>> ww_acquire_ctx included somewhere as well...
>>
>> /Thomas
>>
>>
>>
>>
>> On 12/14/2017 09:10 AM, Roger He wrote:
>>> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
>>> Signed-off-by: Roger He <Hongbo.He@amd.com>
>>> ---
>>>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>>>   1 file changed, 5 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c 
>>> b/drivers/gpu/drm/ttm/ttm_bo.c
>>> index 098b22e..ba5b486 100644
>>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>>> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct 
>>> ttm_buffer_object *bo,
>>>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>>>     static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>>> -                   struct reservation_object *resv,
>>>                      uint32_t mem_type,
>>>                      const struct ttm_place *place,
>>>                      struct ttm_operation_ctx *ctx)
>>> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct 
>>> ttm_bo_device *bdev,
>>>       spin_lock(&glob->lru_lock);
>>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>>           list_for_each_entry(bo, &man->lru[i], lru) {
>>> -            if (bo->resv == resv) {
>>> -                if (list_empty(&bo->ddestroy))
>>> +            if (bo->resv == ctx->resv) {
>>> +                if (!ctx->allow_reserved_eviction &&
>>> +                    list_empty(&bo->ddestroy))
>>>                       continue;
>>>               } else {
>>>                   locked = reservation_object_trylock(bo->resv);
>>> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct 
>>> ttm_buffer_object *bo,
>>>               return ret;
>>>           if (mem->mm_node)
>>>               break;
>>> -        ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, 
>>> ctx);
>>> +        ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>>>           if (unlikely(ret != 0))
>>>               return ret;
>>>       } while (1);
>>> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct 
>>> ttm_bo_device *bdev,
>>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>>           while (!list_empty(&man->lru[i])) {
>>>               spin_unlock(&glob->lru_lock);
>>> -            ret = ttm_mem_evict_first(bdev, NULL, mem_type,
>>> -                          NULL, &ctx);
>>> +            ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>>>               if (ret)
>>>                   return ret;
>>>               spin_lock(&glob->lru_lock);
>>
>>
Christian König Dec. 15, 2017, 9:53 a.m. UTC | #8
Am 15.12.2017 um 10:38 schrieb Thomas Hellstrom:
> On 12/15/2017 10:13 AM, Christian König wrote:
>> Hi Thomas,
>>
>> actually I was very happy to get rid of that stuff.
>
> Yes, wrappers that don't do anything don't make much sense, but this 
> is a different story.

I was not talking about the wrappers, but rather about having the 
locking code in TTM.

>
>>
>> In the long run I indeed wanted to replace ctx->resv with the 
>> ww_acquire_ctx to enable eviction of even more things, but that is a 
>> different story.
>>
>> Recursive locking is usually something we should try to avoid.
>
> Well this is more or less replicating what you are doing currently but 
> instead of spreading the checks and locking state all over the code, 
> both as local variables and parameters this is keeping it in a single 
> place with correctness checks.

I don't see how that can be correct. As far as I can see it makes TTM 
responsible about locking again and to be honest TTM is a bloody mess 
regarding this already.

My intention in the long term is rather to remove logic from TTM and 
move it back into the drivers. The end result I have in mind is that TTM 
becomes a toolbox instead of the midlayer it is currently.

Regards,
Christian.

>
>
> I agree recursive locking is generally frowned upon, but you're 
> already doing it, not by using recursive locks, but by passing locking 
> state around which IMO is worse.
>
> Collecting the state in a the operation_ctx will make that 
> usage-pattern more obvious but will help make the code cleaner and 
> less error prone.
>
> /Thomas
>
>
>
>>
>> Regards,
>> Christian.
>>
>> Am 15.12.2017 um 08:01 schrieb Thomas Hellstrom:
>>> Roger and Chrisitian,
>>>
>>> Correct me if I'm wrong, but It seems to me like a lot of the recent 
>>> changes to ttm_bo.c are to allow recursive reservation object 
>>> locking in the case of shared reservation objects, but only in 
>>> certain functions and with special arguments so it doesn't look like 
>>> recursive locking to the lockdep checker. Wouldn't it be a lot 
>>> cleaner if we were to hide all this in a resurrected 
>>> __ttm_bo_reserve something along the lines of
>>>
>>> int __ttm_bo_reserve(struct ttm_bo *bo, struct ttm_operation_ctx 
>>> *ctx) {
>>>     if (ctx && ctx->resv == bo->resv) {
>>> #ifdef CONFIG_LOCKDEP
>>>         WARN_ON(bo->reserved);
>>> lockdep_assert_held(&ctx->resv);
>>>         ctx->reserve_count++;
>>> bo->reserved = true;
>>> #endif
>>>         return0;
>>>      } else {
>>>         int ret = reservation_object_lock(bo->resv, NULL) ? 0:-EBUSY;
>>>
>>>         if (ret)
>>>             return ret;
>>> #ifdef CONFIG_LOCKDEP
>>>         WARN_ON(bo->reserved);
>>>         bo->reserved = true;
>>> #endif
>>>         return 0;
>>> }
>>>
>>> And similar for tryreserve and unreserve? Perhaps with a 
>>> ww_acquire_ctx included somewhere as well...
>>>
>>> /Thomas
>>>
>>>
>>>
>>>
>>> On 12/14/2017 09:10 AM, Roger He wrote:
>>>> Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
>>>> Signed-off-by: Roger He <Hongbo.He@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------
>>>>   1 file changed, 5 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c 
>>>> b/drivers/gpu/drm/ttm/ttm_bo.c
>>>> index 098b22e..ba5b486 100644
>>>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>>>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>>>> @@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct 
>>>> ttm_buffer_object *bo,
>>>>   EXPORT_SYMBOL(ttm_bo_eviction_valuable);
>>>>     static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>>>> -                   struct reservation_object *resv,
>>>>                      uint32_t mem_type,
>>>>                      const struct ttm_place *place,
>>>>                      struct ttm_operation_ctx *ctx)
>>>> @@ -722,8 +721,9 @@ static int ttm_mem_evict_first(struct 
>>>> ttm_bo_device *bdev,
>>>>       spin_lock(&glob->lru_lock);
>>>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>>>           list_for_each_entry(bo, &man->lru[i], lru) {
>>>> -            if (bo->resv == resv) {
>>>> -                if (list_empty(&bo->ddestroy))
>>>> +            if (bo->resv == ctx->resv) {
>>>> +                if (!ctx->allow_reserved_eviction &&
>>>> +                    list_empty(&bo->ddestroy))
>>>>                       continue;
>>>>               } else {
>>>>                   locked = reservation_object_trylock(bo->resv);
>>>> @@ -835,7 +835,7 @@ static int ttm_bo_mem_force_space(struct 
>>>> ttm_buffer_object *bo,
>>>>               return ret;
>>>>           if (mem->mm_node)
>>>>               break;
>>>> -        ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, 
>>>> ctx);
>>>> +        ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
>>>>           if (unlikely(ret != 0))
>>>>               return ret;
>>>>       } while (1);
>>>> @@ -1332,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct 
>>>> ttm_bo_device *bdev,
>>>>       for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>>>>           while (!list_empty(&man->lru[i])) {
>>>>               spin_unlock(&glob->lru_lock);
>>>> -            ret = ttm_mem_evict_first(bdev, NULL, mem_type,
>>>> -                          NULL, &ctx);
>>>> +            ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
>>>>               if (ret)
>>>>                   return ret;
>>>>               spin_lock(&glob->lru_lock);
>>>
>>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 098b22e..ba5b486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -707,7 +707,6 @@  bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
-			       struct reservation_object *resv,
 			       uint32_t mem_type,
 			       const struct ttm_place *place,
 			       struct ttm_operation_ctx *ctx)
@@ -722,8 +721,9 @@  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		list_for_each_entry(bo, &man->lru[i], lru) {
-			if (bo->resv == resv) {
-				if (list_empty(&bo->ddestroy))
+			if (bo->resv == ctx->resv) {
+				if (!ctx->allow_reserved_eviction &&
+				    list_empty(&bo->ddestroy))
 					continue;
 			} else {
 				locked = reservation_object_trylock(bo->resv);
@@ -835,7 +835,7 @@  static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 			return ret;
 		if (mem->mm_node)
 			break;
-		ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
+		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
 		if (unlikely(ret != 0))
 			return ret;
 	} while (1);
@@ -1332,8 +1332,7 @@  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		while (!list_empty(&man->lru[i])) {
 			spin_unlock(&glob->lru_lock);
-			ret = ttm_mem_evict_first(bdev, NULL, mem_type,
-						  NULL, &ctx);
+			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
 			if (ret)
 				return ret;
 			spin_lock(&glob->lru_lock);