diff mbox series

[RFC,2/5] mm: shmem: add an 'order' parameter for shmem_alloc_hugefolio()

Message ID 2014bf7370d78bc1f5600731af5bf8f569e5868b.1713755580.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series add mTHP support for anonymous share pages | expand

Commit Message

Baolin Wang April 22, 2024, 7:02 a.m. UTC
Add a new parameter to specify the huge page order for shmem_alloc_hugefolio(),
as a preparation to supoort mTHP.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 mm/shmem.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

Comments

Kefeng Wang April 24, 2024, 6:28 a.m. UTC | #1
On 2024/4/22 15:02, Baolin Wang wrote:
> Add a new parameter to specify the huge page order for shmem_alloc_hugefolio(),
> as a preparation to supoort mTHP.
> 
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
>   mm/shmem.c | 11 ++++++-----
>   1 file changed, 6 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index fa2a0ed97507..893c88efc45f 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1604,14 +1604,14 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
>   }
>   
>   static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
> -		struct shmem_inode_info *info, pgoff_t index)
> +		struct shmem_inode_info *info, pgoff_t index, int order)
>   {
>   	struct mempolicy *mpol;
>   	pgoff_t ilx;
>   	struct page *page;
>   
> -	mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
> -	page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
> +	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
> +	page = alloc_pages_mpol(gfp, order, mpol, ilx, numa_node_id());
>   	mpol_cond_put(mpol);
>   
>   	return page_rmappable_folio(page);
> @@ -1639,13 +1639,14 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
>   	struct shmem_inode_info *info = SHMEM_I(inode);
>   	struct folio *folio;
>   	long pages;
> -	int error;
> +	int error, order;
>   
>   	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
>   		huge = false;
>   
>   	if (huge) {
>   		pages = HPAGE_PMD_NR;
> +		order = HPAGE_PMD_ORDER;
>   		index = round_down(index, HPAGE_PMD_NR);
>   
>   		/*
> @@ -1660,7 +1661,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
>   				index + HPAGE_PMD_NR - 1, XA_PRESENT))
>   			return ERR_PTR(-E2BIG);
>   
> -		folio = shmem_alloc_hugefolio(gfp, info, index);
> +		folio = shmem_alloc_hugefolio(gfp, info, index, order);

Avoid order variable, we can directly use HPAGE_PMD_NR here.

>   		if (!folio)
>   			count_vm_event(THP_FILE_FALLBACK);
>   	} else {
Baolin Wang April 24, 2024, 6:55 a.m. UTC | #2
On 2024/4/24 14:28, Kefeng Wang wrote:
> 
> 
> On 2024/4/22 15:02, Baolin Wang wrote:
>> Add a new parameter to specify the huge page order for 
>> shmem_alloc_hugefolio(),
>> as a preparation to supoort mTHP.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>>   mm/shmem.c | 11 ++++++-----
>>   1 file changed, 6 insertions(+), 5 deletions(-)
>>
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index fa2a0ed97507..893c88efc45f 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -1604,14 +1604,14 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, 
>> gfp_t limit_gfp)
>>   }
>>   static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
>> -        struct shmem_inode_info *info, pgoff_t index)
>> +        struct shmem_inode_info *info, pgoff_t index, int order)
>>   {
>>       struct mempolicy *mpol;
>>       pgoff_t ilx;
>>       struct page *page;
>> -    mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
>> -    page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, 
>> numa_node_id());
>> +    mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
>> +    page = alloc_pages_mpol(gfp, order, mpol, ilx, numa_node_id());
>>       mpol_cond_put(mpol);
>>       return page_rmappable_folio(page);
>> @@ -1639,13 +1639,14 @@ static struct folio 
>> *shmem_alloc_and_add_folio(gfp_t gfp,
>>       struct shmem_inode_info *info = SHMEM_I(inode);
>>       struct folio *folio;
>>       long pages;
>> -    int error;
>> +    int error, order;
>>       if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
>>           huge = false;
>>       if (huge) {
>>           pages = HPAGE_PMD_NR;
>> +        order = HPAGE_PMD_ORDER;
>>           index = round_down(index, HPAGE_PMD_NR);
>>           /*
>> @@ -1660,7 +1661,7 @@ static struct folio 
>> *shmem_alloc_and_add_folio(gfp_t gfp,
>>                   index + HPAGE_PMD_NR - 1, XA_PRESENT))
>>               return ERR_PTR(-E2BIG);
>> -        folio = shmem_alloc_hugefolio(gfp, info, index);
>> +        folio = shmem_alloc_hugefolio(gfp, info, index, order);
> 
> Avoid order variable, we can directly use HPAGE_PMD_NR here.

Yes, sure. Thanks.
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index fa2a0ed97507..893c88efc45f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1604,14 +1604,14 @@  static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
 }
 
 static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
-		struct shmem_inode_info *info, pgoff_t index)
+		struct shmem_inode_info *info, pgoff_t index, int order)
 {
 	struct mempolicy *mpol;
 	pgoff_t ilx;
 	struct page *page;
 
-	mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
-	page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
+	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
+	page = alloc_pages_mpol(gfp, order, mpol, ilx, numa_node_id());
 	mpol_cond_put(mpol);
 
 	return page_rmappable_folio(page);
@@ -1639,13 +1639,14 @@  static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	struct folio *folio;
 	long pages;
-	int error;
+	int error, order;
 
 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
 		huge = false;
 
 	if (huge) {
 		pages = HPAGE_PMD_NR;
+		order = HPAGE_PMD_ORDER;
 		index = round_down(index, HPAGE_PMD_NR);
 
 		/*
@@ -1660,7 +1661,7 @@  static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 				index + HPAGE_PMD_NR - 1, XA_PRESENT))
 			return ERR_PTR(-E2BIG);
 
-		folio = shmem_alloc_hugefolio(gfp, info, index);
+		folio = shmem_alloc_hugefolio(gfp, info, index, order);
 		if (!folio)
 			count_vm_event(THP_FILE_FALLBACK);
 	} else {