diff mbox series

[1/7] mm/page_alloc: Move gfp_allowed_mask enforcement to prepare_alloc_pages

Message ID 20210312154331.32229-2-mgorman@techsingularity.net (mailing list archive)
State New, archived
Headers show
Series Introduce a bulk order-0 page allocator with two in-tree users | expand

Commit Message

Mel Gorman March 12, 2021, 3:43 p.m. UTC
__alloc_pages updates GFP flags to enforce what flags are allowed
during a global context such as booting or suspend. This patch moves the
enforcement from __alloc_pages to prepare_alloc_pages so the code can be
shared between the single page allocator and a new bulk page allocator.

When moving, it is obvious that __alloc_pages() and __alloc_pages
use different names for the same variable. This is an unnecessary
complication so rename gfp_mask to gfp in prepare_alloc_pages() so the
name is consistent.

No functional change.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
 mm/page_alloc.c | 25 +++++++++++++------------
 1 file changed, 13 insertions(+), 12 deletions(-)

Comments

Vlastimil Babka March 19, 2021, 4:11 p.m. UTC | #1
On 3/12/21 4:43 PM, Mel Gorman wrote:
> __alloc_pages updates GFP flags to enforce what flags are allowed
> during a global context such as booting or suspend. This patch moves the
> enforcement from __alloc_pages to prepare_alloc_pages so the code can be
> shared between the single page allocator and a new bulk page allocator.
> 
> When moving, it is obvious that __alloc_pages() and __alloc_pages
> use different names for the same variable. This is an unnecessary
> complication so rename gfp_mask to gfp in prepare_alloc_pages() so the
> name is consistent.
> 
> No functional change.

Hm, I have some doubts.

> Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
> ---
>  mm/page_alloc.c | 25 +++++++++++++------------
>  1 file changed, 13 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 00b67c47ad87..f0c1d74ead6f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4914,15 +4914,18 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  	return page;
>  }
>  
> -static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
> +static inline bool prepare_alloc_pages(gfp_t gfp, unsigned int order,
>  		int preferred_nid, nodemask_t *nodemask,
>  		struct alloc_context *ac, gfp_t *alloc_gfp,
>  		unsigned int *alloc_flags)
>  {
> -	ac->highest_zoneidx = gfp_zone(gfp_mask);
> -	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
> +	gfp &= gfp_allowed_mask;
> +	*alloc_gfp = gfp;
> +

...

> @@ -4980,8 +4983,6 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
>  		return NULL;
>  	}
>  
> -	gfp &= gfp_allowed_mask;
> -	alloc_gfp = gfp;
>  	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
>  			&alloc_gfp, &alloc_flags))
>  		return NULL;

As a result, "gfp" doesn't have the restrictions by gfp_allowed_mask applied,
only alloc_gfp does. But in case we go to slowpath, before
going there we throw away the current alloc_gfp:

    alloc_gfp = current_gfp_context(gfp);
    ...
    page = __alloc_pages_slowpath(alloc_gfp, ...);

So we lost the gfp_allowed_mask restrictions here?
Mel Gorman March 19, 2021, 5:49 p.m. UTC | #2
On Fri, Mar 19, 2021 at 05:11:39PM +0100, Vlastimil Babka wrote:
> On 3/12/21 4:43 PM, Mel Gorman wrote:
> > __alloc_pages updates GFP flags to enforce what flags are allowed
> > during a global context such as booting or suspend. This patch moves the
> > enforcement from __alloc_pages to prepare_alloc_pages so the code can be
> > shared between the single page allocator and a new bulk page allocator.
> > 
> > When moving, it is obvious that __alloc_pages() and __alloc_pages
> > use different names for the same variable. This is an unnecessary
> > complication so rename gfp_mask to gfp in prepare_alloc_pages() so the
> > name is consistent.
> > 
> > No functional change.
> 
> Hm, I have some doubts.
> 

And you were right, I'll drop the patch and apply the same mask to the
bulk allocator.
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 00b67c47ad87..f0c1d74ead6f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4914,15 +4914,18 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	return page;
 }
 
-static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
+static inline bool prepare_alloc_pages(gfp_t gfp, unsigned int order,
 		int preferred_nid, nodemask_t *nodemask,
 		struct alloc_context *ac, gfp_t *alloc_gfp,
 		unsigned int *alloc_flags)
 {
-	ac->highest_zoneidx = gfp_zone(gfp_mask);
-	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
+	gfp &= gfp_allowed_mask;
+	*alloc_gfp = gfp;
+
+	ac->highest_zoneidx = gfp_zone(gfp);
+	ac->zonelist = node_zonelist(preferred_nid, gfp);
 	ac->nodemask = nodemask;
-	ac->migratetype = gfp_migratetype(gfp_mask);
+	ac->migratetype = gfp_migratetype(gfp);
 
 	if (cpusets_enabled()) {
 		*alloc_gfp |= __GFP_HARDWALL;
@@ -4936,18 +4939,18 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 			*alloc_flags |= ALLOC_CPUSET;
 	}
 
-	fs_reclaim_acquire(gfp_mask);
-	fs_reclaim_release(gfp_mask);
+	fs_reclaim_acquire(gfp);
+	fs_reclaim_release(gfp);
 
-	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
+	might_sleep_if(gfp & __GFP_DIRECT_RECLAIM);
 
-	if (should_fail_alloc_page(gfp_mask, order))
+	if (should_fail_alloc_page(gfp, order))
 		return false;
 
-	*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
+	*alloc_flags = current_alloc_flags(gfp, *alloc_flags);
 
 	/* Dirty zone balancing only done in the fast path */
-	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
+	ac->spread_dirty_pages = (gfp & __GFP_WRITE);
 
 	/*
 	 * The preferred zone is used for statistics but crucially it is
@@ -4980,8 +4983,6 @@  struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 		return NULL;
 	}
 
-	gfp &= gfp_allowed_mask;
-	alloc_gfp = gfp;
 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
 			&alloc_gfp, &alloc_flags))
 		return NULL;