diff mbox series

mm/hugetlb: Move page order check inside hugetlb_cma_reserve()

Message ID 20240209054221.1403364-1-anshuman.khandual@arm.com (mailing list archive)
State New
Headers show
Series mm/hugetlb: Move page order check inside hugetlb_cma_reserve() | expand

Commit Message

Anshuman Khandual Feb. 9, 2024, 5:42 a.m. UTC
All platforms could benefit from page order check against MAX_PAGE_ORDER
before allocating a CMA area for gigantic hugetlb pages. Let's move this
check from individual platforms to generic hugetlb.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
This applies on v6.8-rc3
 
 arch/arm64/mm/hugetlbpage.c   | 7 -------
 arch/powerpc/mm/hugetlbpage.c | 4 +---
 mm/hugetlb.c                  | 7 +++++++
 3 files changed, 8 insertions(+), 10 deletions(-)

Comments

Jane Chu Feb. 9, 2024, 6:41 a.m. UTC | #1
On 2/8/2024 9:42 PM, Anshuman Khandual wrote:

> All platforms could benefit from page order check against MAX_PAGE_ORDER
> before allocating a CMA area for gigantic hugetlb pages. Let's move this
> check from individual platforms to generic hugetlb.
>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linuxppc-dev@lists.ozlabs.org
> Cc: linux-mm@kvack.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
> This applies on v6.8-rc3
>   
>   arch/arm64/mm/hugetlbpage.c   | 7 -------
>   arch/powerpc/mm/hugetlbpage.c | 4 +---
>   mm/hugetlb.c                  | 7 +++++++
>   3 files changed, 8 insertions(+), 10 deletions(-)
>
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 8116ac599f80..6720ec8d50e7 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
>   	else
>   		order = CONT_PMD_SHIFT - PAGE_SHIFT;
>   
> -	/*
> -	 * HugeTLB CMA reservation is required for gigantic
> -	 * huge pages which could not be allocated via the
> -	 * page allocator. Just warn if there is any change
> -	 * breaking this assumption.
> -	 */
> -	WARN_ON(order <= MAX_PAGE_ORDER);
>   	hugetlb_cma_reserve(order);
>   }
>   #endif /* CONFIG_CMA */
> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
> index 0a540b37aab6..16557d008eef 100644
> --- a/arch/powerpc/mm/hugetlbpage.c
> +++ b/arch/powerpc/mm/hugetlbpage.c
> @@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
>   		 */
>   		order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
>   
> -	if (order) {
> -		VM_WARN_ON(order <= MAX_PAGE_ORDER);
> +	if (order)
>   		hugetlb_cma_reserve(order);
> -	}
>   }
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index cf9c9b2906ea..345b3524df35 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -7699,6 +7699,13 @@ void __init hugetlb_cma_reserve(int order)
>   	bool node_specific_cma_alloc = false;
>   	int nid;
>   
> +	/*
> +	 * HugeTLB CMA reservation is required for gigantic
> +	 * huge pages which could not be allocated via the
> +	 * page allocator. Just warn if there is any change
> +	 * breaking this assumption.
> +	 */
> +	VM_WARN_ON(order <= MAX_PAGE_ORDER);
>   	cma_reserve_called = true;
>   
>   	if (!hugetlb_cma_size)

Looks straight forward to me.

Reviewed-by: Jane Chu <jane.chu@oracle.com>
David Hildenbrand Feb. 12, 2024, 2:19 p.m. UTC | #2
On 09.02.24 06:42, Anshuman Khandual wrote:
> All platforms could benefit from page order check against MAX_PAGE_ORDER
> before allocating a CMA area for gigantic hugetlb pages. Let's move this
> check from individual platforms to generic hugetlb.
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linuxppc-dev@lists.ozlabs.org
> Cc: linux-mm@kvack.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
> This applies on v6.8-rc3
>   
>   arch/arm64/mm/hugetlbpage.c   | 7 -------
>   arch/powerpc/mm/hugetlbpage.c | 4 +---
>   mm/hugetlb.c                  | 7 +++++++
>   3 files changed, 8 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 8116ac599f80..6720ec8d50e7 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
>   	else
>   		order = CONT_PMD_SHIFT - PAGE_SHIFT;
>   
> -	/*
> -	 * HugeTLB CMA reservation is required for gigantic
> -	 * huge pages which could not be allocated via the
> -	 * page allocator. Just warn if there is any change
> -	 * breaking this assumption.
> -	 */
> -	WARN_ON(order <= MAX_PAGE_ORDER);
>   	hugetlb_cma_reserve(order);
>   }
>   #endif /* CONFIG_CMA */
> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
> index 0a540b37aab6..16557d008eef 100644
> --- a/arch/powerpc/mm/hugetlbpage.c
> +++ b/arch/powerpc/mm/hugetlbpage.c
> @@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
>   		 */
>   		order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
>   
> -	if (order) {
> -		VM_WARN_ON(order <= MAX_PAGE_ORDER);
> +	if (order)
>   		hugetlb_cma_reserve(order);
> -	}
>   }
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index cf9c9b2906ea..345b3524df35 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -7699,6 +7699,13 @@ void __init hugetlb_cma_reserve(int order)
>   	bool node_specific_cma_alloc = false;
>   	int nid;
>   
> +	/*
> +	 * HugeTLB CMA reservation is required for gigantic
> +	 * huge pages which could not be allocated via the
> +	 * page allocator. Just warn if there is any change
> +	 * breaking this assumption.
> +	 */
> +	VM_WARN_ON(order <= MAX_PAGE_ORDER);
>   	cma_reserve_called = true;
>   
>   	if (!hugetlb_cma_size)

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..6720ec8d50e7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -45,13 +45,6 @@  void __init arm64_hugetlb_cma_reserve(void)
 	else
 		order = CONT_PMD_SHIFT - PAGE_SHIFT;
 
-	/*
-	 * HugeTLB CMA reservation is required for gigantic
-	 * huge pages which could not be allocated via the
-	 * page allocator. Just warn if there is any change
-	 * breaking this assumption.
-	 */
-	WARN_ON(order <= MAX_PAGE_ORDER);
 	hugetlb_cma_reserve(order);
 }
 #endif /* CONFIG_CMA */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aab6..16557d008eef 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -614,8 +614,6 @@  void __init gigantic_hugetlb_cma_reserve(void)
 		 */
 		order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
 
-	if (order) {
-		VM_WARN_ON(order <= MAX_PAGE_ORDER);
+	if (order)
 		hugetlb_cma_reserve(order);
-	}
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cf9c9b2906ea..345b3524df35 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7699,6 +7699,13 @@  void __init hugetlb_cma_reserve(int order)
 	bool node_specific_cma_alloc = false;
 	int nid;
 
+	/*
+	 * HugeTLB CMA reservation is required for gigantic
+	 * huge pages which could not be allocated via the
+	 * page allocator. Just warn if there is any change
+	 * breaking this assumption.
+	 */
+	VM_WARN_ON(order <= MAX_PAGE_ORDER);
 	cma_reserve_called = true;
 
 	if (!hugetlb_cma_size)