diff mbox

[2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean

Message ID 1518161419-27346-2-git-send-email-Hongbo.He@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

He, Hongbo Feb. 9, 2018, 7:30 a.m. UTC
if it is  true, allocate TTM pages regardless of zone global memory
account limit. For example suspend, We should avoid TTM memory
allocate failure to lead to whole process fail.

Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

Comments

Christian König Feb. 9, 2018, 9:38 a.m. UTC | #1
Am 09.02.2018 um 08:30 schrieb Roger He:
> if it is  true, allocate TTM pages regardless of zone global memory
> account limit. For example suspend, We should avoid TTM memory
> allocate failure to lead to whole process fail.
>
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++-----
>   1 file changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index a907311..685baad 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
>   EXPORT_SYMBOL(ttm_bo_create);
>   
>   static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
> -				   unsigned mem_type)
> +			unsigned mem_type, bool force_alloc)
>   {
> -	struct ttm_operation_ctx ctx = { false, false };
> +	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>   	struct ttm_bo_global *glob = bdev->glob;
>   	struct dma_fence *fence;
>   	int ret;
>   	unsigned i;
>   
> +	if (force_alloc)
> +		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;

Just unconditional set that flag here as well. ttm_bo_force_list_clean() 
is only called on two occasions:
1. By ttm_bo_evict_mm() during suspend.
2. By ttm_bo_clean_mm() when the driver unloads.

On both cases we absolutely don't want any memory allocation failure.

Christian.

>   	/*
>   	 * Can't use standard list traversal since we're unlocking.
>   	 */
> @@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
> +						  &ttm_opt_ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
> @@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   
>   	ret = 0;
>   	if (mem_type > 0) {
> -		ret = ttm_bo_force_list_clean(bdev, mem_type);
> +		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
>   		if (ret) {
>   			pr_err("Cleanup eviction failed\n");
>   			return ret;
> @@ -1433,7 +1436,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   		return 0;
>   	}
>   
> -	return ttm_bo_force_list_clean(bdev, mem_type);
> +	return ttm_bo_force_list_clean(bdev, mem_type, true);
>   }
>   EXPORT_SYMBOL(ttm_bo_evict_mm);
>
He, Hongbo Feb. 9, 2018, 9:43 a.m. UTC | #2
Ok. please ignore patch3 since I have some minor changes.
Will send out later.

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: Koenig, Christian 

Sent: Friday, February 09, 2018 5:38 PM
To: He, Roger <Hongbo.He@amd.com>; dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean

Am 09.02.2018 um 08:30 schrieb Roger He:
> if it is  true, allocate TTM pages regardless of zone global memory 

> account limit. For example suspend, We should avoid TTM memory 

> allocate failure to lead to whole process fail.

>

> Signed-off-by: Roger He <Hongbo.He@amd.com>

> ---

>   drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++-----

>   1 file changed, 8 insertions(+), 5 deletions(-)

>

> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c 

> b/drivers/gpu/drm/ttm/ttm_bo.c index a907311..685baad 100644

> --- a/drivers/gpu/drm/ttm/ttm_bo.c

> +++ b/drivers/gpu/drm/ttm/ttm_bo.c

> @@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,

>   EXPORT_SYMBOL(ttm_bo_create);

>   

>   static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,

> -				   unsigned mem_type)

> +			unsigned mem_type, bool force_alloc)

>   {

> -	struct ttm_operation_ctx ctx = { false, false };

> +	struct ttm_operation_ctx ttm_opt_ctx = { false, false };

>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

>   	struct ttm_bo_global *glob = bdev->glob;

>   	struct dma_fence *fence;

>   	int ret;

>   	unsigned i;

>   

> +	if (force_alloc)

> +		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;


Just unconditional set that flag here as well. ttm_bo_force_list_clean() is only called on two occasions:
1. By ttm_bo_evict_mm() during suspend.
2. By ttm_bo_clean_mm() when the driver unloads.

On both cases we absolutely don't want any memory allocation failure.






Christian.

>   	/*

>   	 * Can't use standard list traversal since we're unlocking.

>   	 */

> @@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,

>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {

>   		while (!list_empty(&man->lru[i])) {

>   			spin_unlock(&glob->lru_lock);

> -			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);

> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL,

> +						  &ttm_opt_ctx);

>   			if (ret)

>   				return ret;

>   			spin_lock(&glob->lru_lock);

> @@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, 

> unsigned mem_type)

>   

>   	ret = 0;

>   	if (mem_type > 0) {

> -		ret = ttm_bo_force_list_clean(bdev, mem_type);

> +		ret = ttm_bo_force_list_clean(bdev, mem_type, true);

>   		if (ret) {

>   			pr_err("Cleanup eviction failed\n");

>   			return ret;

> @@ -1433,7 +1436,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)

>   		return 0;

>   	}

>   

> -	return ttm_bo_force_list_clean(bdev, mem_type);

> +	return ttm_bo_force_list_clean(bdev, mem_type, true);

>   }

>   EXPORT_SYMBOL(ttm_bo_evict_mm);

>
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a907311..685baad 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1342,15 +1342,17 @@  int ttm_bo_create(struct ttm_bo_device *bdev,
 EXPORT_SYMBOL(ttm_bo_create);
 
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-				   unsigned mem_type)
+			unsigned mem_type, bool force_alloc)
 {
-	struct ttm_operation_ctx ctx = { false, false };
+	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_bo_global *glob = bdev->glob;
 	struct dma_fence *fence;
 	int ret;
 	unsigned i;
 
+	if (force_alloc)
+		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;
 	/*
 	 * Can't use standard list traversal since we're unlocking.
 	 */
@@ -1359,7 +1361,8 @@  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		while (!list_empty(&man->lru[i])) {
 			spin_unlock(&glob->lru_lock);
-			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
+						  &ttm_opt_ctx);
 			if (ret)
 				return ret;
 			spin_lock(&glob->lru_lock);
@@ -1403,7 +1406,7 @@  int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 
 	ret = 0;
 	if (mem_type > 0) {
-		ret = ttm_bo_force_list_clean(bdev, mem_type);
+		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
 		if (ret) {
 			pr_err("Cleanup eviction failed\n");
 			return ret;
@@ -1433,7 +1436,7 @@  int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 		return 0;
 	}
 
-	return ttm_bo_force_list_clean(bdev, mem_type);
+	return ttm_bo_force_list_clean(bdev, mem_type, true);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);