diff mbox

[1/4] drm/ttm: add page order in page pool

Message ID 1511329016-552-1-git-send-email-Hongbo.He@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

He, Hongbo Nov. 22, 2017, 5:36 a.m. UTC
to indicate page order for each element in the pool

Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 42 ++++++++++++++++++++++++++----------
 1 file changed, 31 insertions(+), 11 deletions(-)

Comments

Christian König Nov. 22, 2017, 7:47 a.m. UTC | #1
Am 22.11.2017 um 06:36 schrieb Roger He:
> to indicate page order for each element in the pool
>
> Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_page_alloc.c | 42 ++++++++++++++++++++++++++----------
>   1 file changed, 31 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 72ea037..0a0c653 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -81,6 +81,7 @@ struct ttm_page_pool {
>   	char			*name;
>   	unsigned long		nfrees;
>   	unsigned long		nrefills;
> +	unsigned int		order;
>   };
>   
>   /**
> @@ -412,6 +413,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>   	struct ttm_page_pool *pool;
>   	int shrink_pages = sc->nr_to_scan;
>   	unsigned long freed = 0;
> +	unsigned int nr_free_pool;
>   
>   	if (!mutex_trylock(&lock))
>   		return SHRINK_STOP;
> @@ -421,10 +423,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>   		unsigned nr_free = shrink_pages;
>   		if (shrink_pages == 0)
>   			break;
> +
>   		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
>   		/* OK to use static buffer since global mutex is held. */
> -		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
> -		freed += nr_free - shrink_pages;
> +		nr_free_pool = (nr_free >> pool->order);
> +		if (nr_free_pool == 0)
> +			continue;
> +
> +		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
> +		freed += ((nr_free_pool - shrink_pages) << pool->order);
>   	}
>   	mutex_unlock(&lock);
>   	return freed;
> @@ -436,9 +443,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
>   {
>   	unsigned i;
>   	unsigned long count = 0;
> +	struct ttm_page_pool *pool;
>   
> -	for (i = 0; i < NUM_POOLS; ++i)
> -		count += _manager->pools[i].npages;
> +	for (i = 0; i < NUM_POOLS; ++i) {
> +		pool = &_manager->pools[i];
> +		count += (pool->npages << pool->order);
> +	}
>   
>   	return count;
>   }
> @@ -932,7 +942,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
>   }
>   
>   static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
> -		char *name)
> +		char *name, unsigned int order)
>   {
>   	spin_lock_init(&pool->lock);
>   	pool->fill_lock = false;
> @@ -940,8 +950,18 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
>   	pool->npages = pool->nfrees = 0;
>   	pool->gfp_flags = flags;
>   	pool->name = name;
> +	pool->order = order;
>   }
>   
> +/**
> + * Actually if TRANSPARENT_HUGEPAGE not enabled, we will not use
> + * wc_pool_huge and uc_pool_huge, so no matter whatever the page
> + * order are for those two pools
> + */
> +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
> +#define	HPAGE_PMD_ORDER	9
> +#endif
> +

That still won't work and sorry I wasn't 100% clear in the last mail.

When CONFIG_TRANSPARENT_HUGEPAGE isn't set HPAGE_PMD_ORDER is defined as 
BUILD_BUG().

So you will still run into problems when that config option isn't set.

>   int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
>   {
>   	int ret;

I suggest to just handle it here like this

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
     unsigned order = HPAGE_PMD_ORDER;
#else
     unsigned order = 0;
#endif

Apart from that the patch looks good to me,
Christian.

> @@ -952,23 +972,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
>   
>   	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
>   
> -	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
> +	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
>   
> -	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
> +	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
> -				  GFP_USER | GFP_DMA32, "wc dma");
> +				  GFP_USER | GFP_DMA32, "wc dma", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
> -				  GFP_USER | GFP_DMA32, "uc dma");
> +				  GFP_USER | GFP_DMA32, "uc dma", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
> -				  "wc huge");
> +				  "wc huge", HPAGE_PMD_ORDER);
>   
>   	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
> -				  , "uc huge");
> +				  , "uc huge", HPAGE_PMD_ORDER);
>   
>   	_manager->options.max_size = max_pages;
>   	_manager->options.small = SMALL_ALLOCATION;
He, Hongbo Nov. 22, 2017, 8:10 a.m. UTC | #2
-----Original Message-----
From: Koenig, Christian 

Sent: Wednesday, November 22, 2017 3:48 PM
To: He, Roger <Hongbo.He@amd.com>; amd-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 1/4] drm/ttm: add page order in page pool

Am 22.11.2017 um 06:36 schrieb Roger He:
> to indicate page order for each element in the pool

>

> Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf

> Signed-off-by: Roger He <Hongbo.He@amd.com>

> ---

>   drivers/gpu/drm/ttm/ttm_page_alloc.c | 42 ++++++++++++++++++++++++++----------

>   1 file changed, 31 insertions(+), 11 deletions(-)

>

> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c 

> b/drivers/gpu/drm/ttm/ttm_page_alloc.c

> index 72ea037..0a0c653 100644

> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c

> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c

> @@ -81,6 +81,7 @@ struct ttm_page_pool {

>   	char			*name;

>   	unsigned long		nfrees;

>   	unsigned long		nrefills;

> +	unsigned int		order;

>   };

>   

>   /**

> @@ -412,6 +413,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)

>   	struct ttm_page_pool *pool;

>   	int shrink_pages = sc->nr_to_scan;

>   	unsigned long freed = 0;

> +	unsigned int nr_free_pool;

>   

>   	if (!mutex_trylock(&lock))

>   		return SHRINK_STOP;

> @@ -421,10 +423,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)

>   		unsigned nr_free = shrink_pages;

>   		if (shrink_pages == 0)

>   			break;

> +

>   		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];

>   		/* OK to use static buffer since global mutex is held. */

> -		shrink_pages = ttm_page_pool_free(pool, nr_free, true);

> -		freed += nr_free - shrink_pages;

> +		nr_free_pool = (nr_free >> pool->order);

> +		if (nr_free_pool == 0)

> +			continue;

> +

> +		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);

> +		freed += ((nr_free_pool - shrink_pages) << pool->order);

>   	}

>   	mutex_unlock(&lock);

>   	return freed;

> @@ -436,9 +443,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)

>   {

>   	unsigned i;

>   	unsigned long count = 0;

> +	struct ttm_page_pool *pool;

>   

> -	for (i = 0; i < NUM_POOLS; ++i)

> -		count += _manager->pools[i].npages;

> +	for (i = 0; i < NUM_POOLS; ++i) {

> +		pool = &_manager->pools[i];

> +		count += (pool->npages << pool->order);

> +	}

>   

>   	return count;

>   }

> @@ -932,7 +942,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,

>   }

>   

>   static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,

> -		char *name)

> +		char *name, unsigned int order)

>   {

>   	spin_lock_init(&pool->lock);

>   	pool->fill_lock = false;

> @@ -940,8 +950,18 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,

>   	pool->npages = pool->nfrees = 0;

>   	pool->gfp_flags = flags;

>   	pool->name = name;

> +	pool->order = order;

>   }

>   

> +/**

> + * Actually if TRANSPARENT_HUGEPAGE not enabled, we will not use

> + * wc_pool_huge and uc_pool_huge, so no matter whatever the page

> + * order are for those two pools

> + */

> +#ifndef CONFIG_TRANSPARENT_HUGEPAGE

> +#define	HPAGE_PMD_ORDER	9

> +#endif

> +


That still won't work and sorry I wasn't 100% clear in the last mail.

When CONFIG_TRANSPARENT_HUGEPAGE isn't set HPAGE_PMD_ORDER is defined as BUILD_BUG().

So you will still run into problems when that config option isn't set.

>   int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)

>   {

>   	int ret;


I suggest to just handle it here like this

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
     unsigned order = HPAGE_PMD_ORDER;
#else
     unsigned order = 0;
#endif

Apart from that the patch looks good to me, Christian.


Ok, going to modify it. Thanks!

Thanks
Roger(Hongbo.He)

> @@ -952,23 +972,23 @@ int ttm_page_alloc_init(struct ttm_mem_global 

> *glob, unsigned max_pages)

>   

>   	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);

>   

> -	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");

> +	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 

> +0);

>   

> -	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");

> +	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 

> +0);

>   

>   	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,

> -				  GFP_USER | GFP_DMA32, "wc dma");

> +				  GFP_USER | GFP_DMA32, "wc dma", 0);

>   

>   	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,

> -				  GFP_USER | GFP_DMA32, "uc dma");

> +				  GFP_USER | GFP_DMA32, "uc dma", 0);

>   

>   	ttm_page_pool_init_locked(&_manager->wc_pool_huge,

>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),

> -				  "wc huge");

> +				  "wc huge", HPAGE_PMD_ORDER);

>   

>   	ttm_page_pool_init_locked(&_manager->uc_pool_huge,

>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)

> -				  , "uc huge");

> +				  , "uc huge", HPAGE_PMD_ORDER);

>   

>   	_manager->options.max_size = max_pages;

>   	_manager->options.small = SMALL_ALLOCATION;
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 72ea037..0a0c653 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -81,6 +81,7 @@  struct ttm_page_pool {
 	char			*name;
 	unsigned long		nfrees;
 	unsigned long		nrefills;
+	unsigned int		order;
 };
 
 /**
@@ -412,6 +413,7 @@  ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	struct ttm_page_pool *pool;
 	int shrink_pages = sc->nr_to_scan;
 	unsigned long freed = 0;
+	unsigned int nr_free_pool;
 
 	if (!mutex_trylock(&lock))
 		return SHRINK_STOP;
@@ -421,10 +423,15 @@  ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		unsigned nr_free = shrink_pages;
 		if (shrink_pages == 0)
 			break;
+
 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
 		/* OK to use static buffer since global mutex is held. */
-		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
-		freed += nr_free - shrink_pages;
+		nr_free_pool = (nr_free >> pool->order);
+		if (nr_free_pool == 0)
+			continue;
+
+		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+		freed += ((nr_free_pool - shrink_pages) << pool->order);
 	}
 	mutex_unlock(&lock);
 	return freed;
@@ -436,9 +443,12 @@  ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
 	unsigned i;
 	unsigned long count = 0;
+	struct ttm_page_pool *pool;
 
-	for (i = 0; i < NUM_POOLS; ++i)
-		count += _manager->pools[i].npages;
+	for (i = 0; i < NUM_POOLS; ++i) {
+		pool = &_manager->pools[i];
+		count += (pool->npages << pool->order);
+	}
 
 	return count;
 }
@@ -932,7 +942,7 @@  static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 }
 
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
-		char *name)
+		char *name, unsigned int order)
 {
 	spin_lock_init(&pool->lock);
 	pool->fill_lock = false;
@@ -940,8 +950,18 @@  static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
 	pool->npages = pool->nfrees = 0;
 	pool->gfp_flags = flags;
 	pool->name = name;
+	pool->order = order;
 }
 
+/**
+ * Actually if TRANSPARENT_HUGEPAGE not enabled, we will not use
+ * wc_pool_huge and uc_pool_huge, so no matter whatever the page
+ * order are for those two pools
+ */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define	HPAGE_PMD_ORDER	9
+#endif
+
 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 {
 	int ret;
@@ -952,23 +972,23 @@  int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 
 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
-	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
 
-	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-				  GFP_USER | GFP_DMA32, "wc dma");
+				  GFP_USER | GFP_DMA32, "wc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
-				  GFP_USER | GFP_DMA32, "uc dma");
+				  GFP_USER | GFP_DMA32, "uc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
 				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
-				  "wc huge");
+				  "wc huge", HPAGE_PMD_ORDER);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
 				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
-				  , "uc huge");
+				  , "uc huge", HPAGE_PMD_ORDER);
 
 	_manager->options.max_size = max_pages;
 	_manager->options.small = SMALL_ALLOCATION;