diff mbox series

mm: remove unused hugepage for vma_alloc_folio()

Message ID 20241010061556.1846751-1-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: remove unused hugepage for vma_alloc_folio() | expand

Commit Message

Kefeng Wang Oct. 10, 2024, 6:15 a.m. UTC
The hugepage parameter was deprecated since commit ddc1a5cbc05d
("mempolicy: alloc_pages_mpol() for NUMA policy without vma"),
for PMD-sized THP, it still tries only preferred node if possible
in vma_alloc_folio() by checking the order of the folio allocation.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 arch/alpha/include/asm/page.h   |  2 +-
 arch/arm64/mm/fault.c           |  2 +-
 arch/m68k/include/asm/page_no.h |  2 +-
 arch/s390/include/asm/page.h    |  2 +-
 arch/x86/include/asm/page.h     |  2 +-
 include/linux/gfp.h             |  6 +++---
 include/linux/highmem.h         |  2 +-
 mm/huge_memory.c                |  2 +-
 mm/ksm.c                        |  2 +-
 mm/memory.c                     | 10 ++++------
 mm/mempolicy.c                  |  3 +--
 mm/userfaultfd.c                |  2 +-
 12 files changed, 17 insertions(+), 20 deletions(-)

Comments

David Hildenbrand Oct. 10, 2024, 12:42 p.m. UTC | #1
On 10.10.24 08:15, Kefeng Wang wrote:
> The hugepage parameter was deprecated since commit ddc1a5cbc05d
> ("mempolicy: alloc_pages_mpol() for NUMA policy without vma"),
> for PMD-sized THP, it still tries only preferred node if possible
> in vma_alloc_folio() by checking the order of the folio allocation.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Acked-by: David Hildenbrand <david@redhat.com>
Zi Yan Oct. 10, 2024, 3:09 p.m. UTC | #2
On 10 Oct 2024, at 2:15, Kefeng Wang wrote:

> The hugepage parameter was deprecated since commit ddc1a5cbc05d
> ("mempolicy: alloc_pages_mpol() for NUMA policy without vma"),
> for PMD-sized THP, it still tries only preferred node if possible
> in vma_alloc_folio() by checking the order of the folio allocation.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  arch/alpha/include/asm/page.h   |  2 +-
>  arch/arm64/mm/fault.c           |  2 +-
>  arch/m68k/include/asm/page_no.h |  2 +-
>  arch/s390/include/asm/page.h    |  2 +-
>  arch/x86/include/asm/page.h     |  2 +-
>  include/linux/gfp.h             |  6 +++---
>  include/linux/highmem.h         |  2 +-
>  mm/huge_memory.c                |  2 +-
>  mm/ksm.c                        |  2 +-
>  mm/memory.c                     | 10 ++++------
>  mm/mempolicy.c                  |  3 +--
>  mm/userfaultfd.c                |  2 +-
>  12 files changed, 17 insertions(+), 20 deletions(-)

LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>

Best Regards,
Yan, Zi
Barry Song Oct. 10, 2024, 3:35 p.m. UTC | #3
On Thu, Oct 10, 2024 at 2:16 PM Kefeng Wang <wangkefeng.wang@huawei.com> wrote:
>
> The hugepage parameter was deprecated since commit ddc1a5cbc05d
> ("mempolicy: alloc_pages_mpol() for NUMA policy without vma"),
> for PMD-sized THP, it still tries only preferred node if possible
> in vma_alloc_folio() by checking the order of the folio allocation.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Reviewed-by: Barry Song <baohua@kernel.org>

> ---
>  arch/alpha/include/asm/page.h   |  2 +-
>  arch/arm64/mm/fault.c           |  2 +-
>  arch/m68k/include/asm/page_no.h |  2 +-
>  arch/s390/include/asm/page.h    |  2 +-
>  arch/x86/include/asm/page.h     |  2 +-
>  include/linux/gfp.h             |  6 +++---
>  include/linux/highmem.h         |  2 +-
>  mm/huge_memory.c                |  2 +-
>  mm/ksm.c                        |  2 +-
>  mm/memory.c                     | 10 ++++------
>  mm/mempolicy.c                  |  3 +--
>  mm/userfaultfd.c                |  2 +-
>  12 files changed, 17 insertions(+), 20 deletions(-)
>
> diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
> index 70419e6be1a3..3dffa2a461d7 100644
> --- a/arch/alpha/include/asm/page.h
> +++ b/arch/alpha/include/asm/page.h
> @@ -18,7 +18,7 @@ extern void clear_page(void *page);
>  #define clear_user_page(page, vaddr, pg)       clear_page(page)
>
>  #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
> -       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
> +       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
>
>  extern void copy_page(void * _to, void * _from);
>  #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index c2f89a678ac0..ef63651099a9 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -1023,7 +1023,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
>         if (vma->vm_flags & VM_MTE)
>                 flags |= __GFP_ZEROTAGS;
>
> -       return vma_alloc_folio(flags, 0, vma, vaddr, false);
> +       return vma_alloc_folio(flags, 0, vma, vaddr);
>  }
>
>  void tag_clear_highpage(struct page *page)
> diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
> index af3a10973233..63c0e706084b 100644
> --- a/arch/m68k/include/asm/page_no.h
> +++ b/arch/m68k/include/asm/page_no.h
> @@ -14,7 +14,7 @@ extern unsigned long memory_end;
>  #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
>
>  #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
> -       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
> +       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
>
>  #define __pa(vaddr)            ((unsigned long)(vaddr))
>  #define __va(paddr)            ((void *)((unsigned long)(paddr)))
> diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
> index 73e1e03317b4..d02058f96bcf 100644
> --- a/arch/s390/include/asm/page.h
> +++ b/arch/s390/include/asm/page.h
> @@ -74,7 +74,7 @@ static inline void copy_page(void *to, void *from)
>  #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
>
>  #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
> -       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
> +       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
>
>  /*
>   * These are used to make use of C type-checking..
> diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
> index 1b93ff80b43b..c9fe207916f4 100644
> --- a/arch/x86/include/asm/page.h
> +++ b/arch/x86/include/asm/page.h
> @@ -35,7 +35,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
>  }
>
>  #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
> -       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
> +       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
>
>  #ifndef __pa
>  #define __pa(x)                __phys_addr((unsigned long)(x))
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index a951de920e20..b65724c3427d 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -306,7 +306,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
>  struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
>                 struct mempolicy *mpol, pgoff_t ilx, int nid);
>  struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
> -               unsigned long addr, bool hugepage);
> +               unsigned long addr);
>  #else
>  static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
>  {
> @@ -326,7 +326,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde
>  {
>         return folio_alloc_noprof(gfp, order);
>  }
> -#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage)                \
> +#define vma_alloc_folio_noprof(gfp, order, vma, addr)          \
>         folio_alloc_noprof(gfp, order)
>  #endif
>
> @@ -341,7 +341,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde
>  static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
>                 struct vm_area_struct *vma, unsigned long addr)
>  {
> -       struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false);
> +       struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
>
>         return &folio->page;
>  }
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 930a591b9b61..bec9bd715acf 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -226,7 +226,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
>  {
>         struct folio *folio;
>
> -       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
> +       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
>         if (folio)
>                 clear_user_highpage(&folio->page, vaddr);
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 30912a93f7dc..7f254fd2a3a0 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1342,7 +1342,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
>                 return ret;
>         }
>         gfp = vma_thp_gfp_mask(vma);
> -       folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
> +       folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr);
>         if (unlikely(!folio)) {
>                 count_vm_event(THP_FAULT_FALLBACK);
>                 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
> diff --git a/mm/ksm.c b/mm/ksm.c
> index eea5a426be2c..4d482d011745 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -2970,7 +2970,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
>         if (!folio_test_uptodate(folio))
>                 return folio;           /* let do_swap_page report the error */
>
> -       new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
> +       new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
>         if (new_folio &&
>             mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
>                 folio_put(new_folio);
> diff --git a/mm/memory.c b/mm/memory.c
> index fe21bd3beff5..9ba1fcdb9bb5 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1059,8 +1059,7 @@ static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
>         if (need_zero)
>                 new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
>         else
> -               new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
> -                                           addr, false);
> +               new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
>
>         if (!new_folio)
>                 return NULL;
> @@ -4017,8 +4016,7 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
>         struct folio *folio;
>         swp_entry_t entry;
>
> -       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
> -                               vmf->address, false);
> +       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
>         if (!folio)
>                 return NULL;
>
> @@ -4174,7 +4172,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>         gfp = vma_thp_gfp_mask(vma);
>         while (orders) {
>                 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
> -               folio = vma_alloc_folio(gfp, order, vma, addr, true);
> +               folio = vma_alloc_folio(gfp, order, vma, addr);
>                 if (folio) {
>                         if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
>                                                             gfp, entry))
> @@ -4716,7 +4714,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>         gfp = vma_thp_gfp_mask(vma);
>         while (orders) {
>                 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
> -               folio = vma_alloc_folio(gfp, order, vma, addr, true);
> +               folio = vma_alloc_folio(gfp, order, vma, addr);
>                 if (folio) {
>                         if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
>                                 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index a8aa83a97ad1..bb37cd1a51d8 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2290,7 +2290,6 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
>   * @order: Order of the folio.
>   * @vma: Pointer to VMA.
>   * @addr: Virtual address of the allocation.  Must be inside @vma.
> - * @hugepage: Unused (was: For hugepages try only preferred node if possible).
>   *
>   * Allocate a folio for a specific address in @vma, using the appropriate
>   * NUMA policy.  The caller must hold the mmap_lock of the mm_struct of the
> @@ -2301,7 +2300,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
>   * Return: The folio on success or NULL if allocation fails.
>   */
>  struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
> -               unsigned long addr, bool hugepage)
> +               unsigned long addr)
>  {
>         struct mempolicy *pol;
>         pgoff_t ilx;
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 48b87c62fc3d..60a0be33766f 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -251,7 +251,7 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
>         if (!*foliop) {
>                 ret = -ENOMEM;
>                 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
> -                                       dst_addr, false);
> +                                       dst_addr);
>                 if (!folio)
>                         goto out;
>
> --
> 2.27.0
>
diff mbox series

Patch

diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 70419e6be1a3..3dffa2a461d7 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -18,7 +18,7 @@  extern void clear_page(void *page);
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 
 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
 
 extern void copy_page(void * _to, void * _from);
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c2f89a678ac0..ef63651099a9 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -1023,7 +1023,7 @@  struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
 	if (vma->vm_flags & VM_MTE)
 		flags |= __GFP_ZEROTAGS;
 
-	return vma_alloc_folio(flags, 0, vma, vaddr, false);
+	return vma_alloc_folio(flags, 0, vma, vaddr);
 }
 
 void tag_clear_highpage(struct page *page)
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index af3a10973233..63c0e706084b 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -14,7 +14,7 @@  extern unsigned long memory_end;
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
 
 #define __pa(vaddr)		((unsigned long)(vaddr))
 #define __va(paddr)		((void *)((unsigned long)(paddr)))
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 73e1e03317b4..d02058f96bcf 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -74,7 +74,7 @@  static inline void copy_page(void *to, void *from)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
 
 /*
  * These are used to make use of C type-checking..
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 1b93ff80b43b..c9fe207916f4 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -35,7 +35,7 @@  static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 }
 
 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
 
 #ifndef __pa
 #define __pa(x)		__phys_addr((unsigned long)(x))
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a951de920e20..b65724c3427d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -306,7 +306,7 @@  struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
 struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
 		struct mempolicy *mpol, pgoff_t ilx, int nid);
 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
-		unsigned long addr, bool hugepage);
+		unsigned long addr);
 #else
 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
 {
@@ -326,7 +326,7 @@  static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde
 {
 	return folio_alloc_noprof(gfp, order);
 }
-#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage)		\
+#define vma_alloc_folio_noprof(gfp, order, vma, addr)		\
 	folio_alloc_noprof(gfp, order)
 #endif
 
@@ -341,7 +341,7 @@  static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde
 static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
 		struct vm_area_struct *vma, unsigned long addr)
 {
-	struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false);
+	struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
 
 	return &folio->page;
 }
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 930a591b9b61..bec9bd715acf 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -226,7 +226,7 @@  struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
 {
 	struct folio *folio;
 
-	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
 	if (folio)
 		clear_user_highpage(&folio->page, vaddr);
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 30912a93f7dc..7f254fd2a3a0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1342,7 +1342,7 @@  vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 		return ret;
 	}
 	gfp = vma_thp_gfp_mask(vma);
-	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
+	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr);
 	if (unlikely(!folio)) {
 		count_vm_event(THP_FAULT_FALLBACK);
 		count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
diff --git a/mm/ksm.c b/mm/ksm.c
index eea5a426be2c..4d482d011745 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2970,7 +2970,7 @@  struct folio *ksm_might_need_to_copy(struct folio *folio,
 	if (!folio_test_uptodate(folio))
 		return folio;		/* let do_swap_page report the error */
 
-	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
 	if (new_folio &&
 	    mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
 		folio_put(new_folio);
diff --git a/mm/memory.c b/mm/memory.c
index fe21bd3beff5..9ba1fcdb9bb5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1059,8 +1059,7 @@  static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
 	if (need_zero)
 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
 	else
-		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
-					    addr, false);
+		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
 
 	if (!new_folio)
 		return NULL;
@@ -4017,8 +4016,7 @@  static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
 	struct folio *folio;
 	swp_entry_t entry;
 
-	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
-				vmf->address, false);
+	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
 	if (!folio)
 		return NULL;
 
@@ -4174,7 +4172,7 @@  static struct folio *alloc_swap_folio(struct vm_fault *vmf)
 	gfp = vma_thp_gfp_mask(vma);
 	while (orders) {
 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-		folio = vma_alloc_folio(gfp, order, vma, addr, true);
+		folio = vma_alloc_folio(gfp, order, vma, addr);
 		if (folio) {
 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
 							    gfp, entry))
@@ -4716,7 +4714,7 @@  static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 	gfp = vma_thp_gfp_mask(vma);
 	while (orders) {
 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-		folio = vma_alloc_folio(gfp, order, vma, addr, true);
+		folio = vma_alloc_folio(gfp, order, vma, addr);
 		if (folio) {
 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a8aa83a97ad1..bb37cd1a51d8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2290,7 +2290,6 @@  struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
  * @order: Order of the folio.
  * @vma: Pointer to VMA.
  * @addr: Virtual address of the allocation.  Must be inside @vma.
- * @hugepage: Unused (was: For hugepages try only preferred node if possible).
  *
  * Allocate a folio for a specific address in @vma, using the appropriate
  * NUMA policy.  The caller must hold the mmap_lock of the mm_struct of the
@@ -2301,7 +2300,7 @@  struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
  * Return: The folio on success or NULL if allocation fails.
  */
 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
-		unsigned long addr, bool hugepage)
+		unsigned long addr)
 {
 	struct mempolicy *pol;
 	pgoff_t ilx;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 48b87c62fc3d..60a0be33766f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -251,7 +251,7 @@  static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
 	if (!*foliop) {
 		ret = -ENOMEM;
 		folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
-					dst_addr, false);
+					dst_addr);
 		if (!folio)
 			goto out;