diff mbox series

[2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask

Message ID 20210124120357.701077-3-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Get rid of __alloc_pages wrapper | expand

Commit Message

Matthew Wilcox Jan. 24, 2021, 12:03 p.m. UTC
There are only two callers of __alloc_pages() so prune the thicket of
alloc_pages variants by combining the two functions together.  Current
callers of __alloc_pages() simply add an extra 'NULL' parameter and
current callers of __alloc_pages_nodemask() call __alloc_pages() instead.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 Documentation/admin-guide/mm/transhuge.rst |  2 +-
 include/linux/gfp.h                        | 13 +++----------
 mm/hugetlb.c                               |  2 +-
 mm/internal.h                              |  4 ++--
 mm/mempolicy.c                             |  6 +++---
 mm/migrate.c                               |  2 +-
 mm/page_alloc.c                            |  5 ++---
 7 files changed, 13 insertions(+), 21 deletions(-)

Comments

Vlastimil Babka Jan. 26, 2021, 1:47 p.m. UTC | #1
On 1/24/21 1:03 PM, Matthew Wilcox (Oracle) wrote:
> There are only two callers of __alloc_pages() so prune the thicket of
> alloc_pages variants by combining the two functions together.  Current
> callers of __alloc_pages() simply add an extra 'NULL' parameter and
> current callers of __alloc_pages_nodemask() call __alloc_pages() instead.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>
Michal Hocko Jan. 27, 2021, 9:34 a.m. UTC | #2
On Sun 24-01-21 12:03:57, Matthew Wilcox wrote:
> There are only two callers of __alloc_pages() so prune the thicket of
> alloc_pages variants by combining the two functions together.  Current
> callers of __alloc_pages() simply add an extra 'NULL' parameter and
> current callers of __alloc_pages_nodemask() call __alloc_pages() instead.

Thanks this is indeed a simplification. The allocation API zoo is a real
maze. This will simplify it a bit. __alloc_pages_nodemask is also
quite a verbatim name.

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  Documentation/admin-guide/mm/transhuge.rst |  2 +-
>  include/linux/gfp.h                        | 13 +++----------
>  mm/hugetlb.c                               |  2 +-
>  mm/internal.h                              |  4 ++--
>  mm/mempolicy.c                             |  6 +++---
>  mm/migrate.c                               |  2 +-
>  mm/page_alloc.c                            |  5 ++---
>  7 files changed, 13 insertions(+), 21 deletions(-)
> 
> diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
> index 3b8a336511a4..c9c37f16eef8 100644
> --- a/Documentation/admin-guide/mm/transhuge.rst
> +++ b/Documentation/admin-guide/mm/transhuge.rst
> @@ -402,7 +402,7 @@ compact_fail
>  	but failed.
>  
>  It is possible to establish how long the stalls were using the function
> -tracer to record how long was spent in __alloc_pages_nodemask and
> +tracer to record how long was spent in __alloc_pages() and
>  using the mm_page_alloc tracepoint to identify which allocations were
>  for huge pages.
>  
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 53caa9846854..acca2c487da8 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -501,15 +501,8 @@ static inline int arch_make_page_accessible(struct page *page)
>  }
>  #endif
>  
> -struct page *
> -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
> -							nodemask_t *nodemask);
> -
> -static inline struct page *
> -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
> -{
> -	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
> -}
> +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
> +		nodemask_t *nodemask);
>  
>  /*
>   * Allocate pages, preferring the node given as nid. The node must be valid and
> @@ -521,7 +514,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
>  	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
>  	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
>  
> -	return __alloc_pages(gfp_mask, order, nid);
> +	return __alloc_pages(gfp_mask, order, nid, NULL);
>  }
>  
>  /*
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index a6bad1f686c5..604857289e02 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1644,7 +1644,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
>  		gfp_mask |= __GFP_RETRY_MAYFAIL;
>  	if (nid == NUMA_NO_NODE)
>  		nid = numa_mem_id();
> -	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
> +	page = __alloc_pages(gfp_mask, order, nid, nmask);
>  	if (page)
>  		__count_vm_event(HTLB_BUDDY_PGALLOC);
>  	else
> diff --git a/mm/internal.h b/mm/internal.h
> index 8e9c660f33ca..19aee773f6a8 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -126,10 +126,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
>   * family of functions.
>   *
>   * nodemask, migratetype and highest_zoneidx are initialized only once in
> - * __alloc_pages_nodemask() and then never change.
> + * __alloc_pages() and then never change.
>   *
>   * zonelist, preferred_zone and highest_zoneidx are set first in
> - * __alloc_pages_nodemask() for the fast path, and might be later changed
> + * __alloc_pages() for the fast path, and might be later changed
>   * in __alloc_pages_slowpath(). All other functions pass the whole structure
>   * by a const pointer.
>   */
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 6961238c7ef5..addf0854d693 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
>  {
>  	struct page *page;
>  
> -	page = __alloc_pages(gfp, order, nid);
> +	page = __alloc_pages(gfp, order, nid, NULL);
>  	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
>  	if (!static_branch_likely(&vm_numa_stat_key))
>  		return page;
> @@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
>  
>  	nmask = policy_nodemask(gfp, pol);
>  	preferred_nid = policy_node(gfp, pol, node);
> -	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
> +	page = __alloc_pages(gfp, order, preferred_nid, nmask);
>  	mpol_cond_put(pol);
>  out:
>  	return page;
> @@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
>  	if (pol->mode == MPOL_INTERLEAVE)
>  		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
>  	else
> -		page = __alloc_pages_nodemask(gfp, order,
> +		page = __alloc_pages(gfp, order,
>  				policy_node(gfp, pol, numa_node_id()),
>  				policy_nodemask(gfp, pol));
>  
> diff --git a/mm/migrate.c b/mm/migrate.c
> index a3e1acc72ad7..f1ca50febfbe 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>  	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
>  		gfp_mask |= __GFP_HIGHMEM;
>  
> -	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
> +	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
>  
>  	if (new_page && PageTransHuge(new_page))
>  		prep_transhuge_page(new_page);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d72ef706f6e6..90a1eb06c11b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4962,8 +4962,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>  /*
>   * This is the 'heart' of the zoned buddy allocator.
>   */
> -struct page *
> -__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
> +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
>  							nodemask_t *nodemask)
>  {
>  	struct page *page;
> @@ -5025,7 +5024,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
>  
>  	return page;
>  }
> -EXPORT_SYMBOL(__alloc_pages_nodemask);
> +EXPORT_SYMBOL(__alloc_pages);
>  
>  /*
>   * Common helper functions. Never use with __GFP_HIGHMEM because the returned
> -- 
> 2.29.2
>
diff mbox series

Patch

diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 3b8a336511a4..c9c37f16eef8 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -402,7 +402,7 @@  compact_fail
 	but failed.
 
 It is possible to establish how long the stalls were using the function
-tracer to record how long was spent in __alloc_pages_nodemask and
+tracer to record how long was spent in __alloc_pages() and
 using the mm_page_alloc tracepoint to identify which allocations were
 for huge pages.
 
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 53caa9846854..acca2c487da8 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -501,15 +501,8 @@  static inline int arch_make_page_accessible(struct page *page)
 }
 #endif
 
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
-							nodemask_t *nodemask);
-
-static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
-{
-	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
-}
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+		nodemask_t *nodemask);
 
 /*
  * Allocate pages, preferring the node given as nid. The node must be valid and
@@ -521,7 +514,7 @@  __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
 	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
 
-	return __alloc_pages(gfp_mask, order, nid);
+	return __alloc_pages(gfp_mask, order, nid, NULL);
 }
 
 /*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a6bad1f686c5..604857289e02 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1644,7 +1644,7 @@  static struct page *alloc_buddy_huge_page(struct hstate *h,
 		gfp_mask |= __GFP_RETRY_MAYFAIL;
 	if (nid == NUMA_NO_NODE)
 		nid = numa_mem_id();
-	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+	page = __alloc_pages(gfp_mask, order, nid, nmask);
 	if (page)
 		__count_vm_event(HTLB_BUDDY_PGALLOC);
 	else
diff --git a/mm/internal.h b/mm/internal.h
index 8e9c660f33ca..19aee773f6a8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -126,10 +126,10 @@  extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
  * family of functions.
  *
  * nodemask, migratetype and highest_zoneidx are initialized only once in
- * __alloc_pages_nodemask() and then never change.
+ * __alloc_pages() and then never change.
  *
  * zonelist, preferred_zone and highest_zoneidx are set first in
- * __alloc_pages_nodemask() for the fast path, and might be later changed
+ * __alloc_pages() for the fast path, and might be later changed
  * in __alloc_pages_slowpath(). All other functions pass the whole structure
  * by a const pointer.
  */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 6961238c7ef5..addf0854d693 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2140,7 +2140,7 @@  static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 {
 	struct page *page;
 
-	page = __alloc_pages(gfp, order, nid);
+	page = __alloc_pages(gfp, order, nid, NULL);
 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
 	if (!static_branch_likely(&vm_numa_stat_key))
 		return page;
@@ -2237,7 +2237,7 @@  alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 
 	nmask = policy_nodemask(gfp, pol);
 	preferred_nid = policy_node(gfp, pol, node);
-	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
+	page = __alloc_pages(gfp, order, preferred_nid, nmask);
 	mpol_cond_put(pol);
 out:
 	return page;
@@ -2274,7 +2274,7 @@  struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 	if (pol->mode == MPOL_INTERLEAVE)
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
 	else
-		page = __alloc_pages_nodemask(gfp, order,
+		page = __alloc_pages(gfp, order,
 				policy_node(gfp, pol, numa_node_id()),
 				policy_nodemask(gfp, pol));
 
diff --git a/mm/migrate.c b/mm/migrate.c
index a3e1acc72ad7..f1ca50febfbe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1617,7 +1617,7 @@  struct page *alloc_migration_target(struct page *page, unsigned long private)
 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
+	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d72ef706f6e6..90a1eb06c11b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4962,8 +4962,7 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page *
-__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask)
 {
 	struct page *page;
@@ -5025,7 +5024,7 @@  __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
 
 	return page;
 }
-EXPORT_SYMBOL(__alloc_pages_nodemask);
+EXPORT_SYMBOL(__alloc_pages);
 
 /*
  * Common helper functions. Never use with __GFP_HIGHMEM because the returned