diff mbox series

[2/5] mm/page_alloc: Add a bulk page allocator

Message ID 20210301161200.18852-3-mgorman@techsingularity.net (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Introduce a bulk order-0 page allocator with two in-tree users | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 2 of 2 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit fail Errors and warnings before: 23963 this patch: 23965
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning CHECK: Alignment should match open parenthesis CHECK: extern prototypes should be avoided in .h files WARNING: line length of 103 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns
netdev/build_allmodconfig_warn fail Errors and warnings before: 23363 this patch: 23365
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Mel Gorman March 1, 2021, 4:11 p.m. UTC
This patch adds a new page allocator interface via alloc_pages_bulk,
and __alloc_pages_bulk_nodemask. A caller requests a number of pages
to be allocated and added to a list. They can be freed in bulk using
free_pages_bulk().

The API is not guaranteed to return the requested number of pages and
may fail if the preferred allocation zone has limited free memory, the
cpuset changes during the allocation or page debugging decides to fail
an allocation. It's up to the caller to request more pages in batch
if necessary.

Note that this implementation is not very efficient and could be improved
but it would require refactoring. The intent is to make it available early
to determine what semantics are required by different callers. Once the
full semantics are nailed down, it can be refactored.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
 include/linux/gfp.h |  13 +++++
 mm/page_alloc.c     | 113 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 124 insertions(+), 2 deletions(-)

Comments

Christoph Hellwig March 9, 2021, 5:12 p.m. UTC | #1
Would vmalloc be another good user of this API? 

> +	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
> +	if (!prepare_alloc_pages(gfp_mask, 0, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))

This crazy long line is really hard to follow.

> +		return 0;
> +	gfp_mask = alloc_mask;
> +
> +	/* Find an allowed local zone that meets the high watermark. */
> +	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {

Same here.

> +		unsigned long mark;
> +
> +		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
> +		    !__cpuset_zone_allowed(zone, gfp_mask)) {
> +			continue;
> +		}

No need for the curly braces.

>  	}
>  
> -	gfp_mask &= gfp_allowed_mask;
> -	alloc_mask = gfp_mask;

Is this change intentional?
Mel Gorman March 9, 2021, 6:10 p.m. UTC | #2
On Tue, Mar 09, 2021 at 05:12:30PM +0000, Christoph Hellwig wrote:
> Would vmalloc be another good user of this API? 
> 
> > +	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
> > +	if (!prepare_alloc_pages(gfp_mask, 0, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
> 
> This crazy long line is really hard to follow.
> 

It's not crazier than what is already in alloc_pages_nodemask to share
code.

> > +		return 0;
> > +	gfp_mask = alloc_mask;
> > +
> > +	/* Find an allowed local zone that meets the high watermark. */
> > +	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
> 
> Same here.
> 

Similar to what happens in get_page_from_freelist with the
for_next_zone_zonelist_nodemask iterator.

> > +		unsigned long mark;
> > +
> > +		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
> > +		    !__cpuset_zone_allowed(zone, gfp_mask)) {
> > +			continue;
> > +		}
> 
> No need for the curly braces.
> 

Yes, but it's for coding style. MM has no hard coding style guidelines
around this but for sched, it's generally preferred that if the "if"
statement spans multiple lines then it should use {} even if the block
is one line long for clarity.

> >  	}
> >  
> > -	gfp_mask &= gfp_allowed_mask;
> > -	alloc_mask = gfp_mask;
> 
> Is this change intentional?

Yes so that prepare_alloc_pages works for both the single page and bulk
allocator. Slightly less code duplication.
Shay Agroskin March 10, 2021, 11:04 a.m. UTC | #3
Mel Gorman <mgorman@techsingularity.net> writes:

>
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 8572a1474e16..4903d1cc48dc 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -515,6 +515,10 @@ static inline int 
> arch_make_page_accessible(struct page *page)
>  }
>  #endif
>  
> +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int 
> preferred_nid,
> +				nodemask_t *nodemask, int 
> nr_pages,
> +				struct list_head *list);
> +
>  struct page *
>  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int 
>  preferred_nid,
>  							nodemask_t 
>  *nodemask);
> @@ -525,6 +529,14 @@ __alloc_pages(gfp_t gfp_mask, unsigned int 
> order, int preferred_nid)
>  	return __alloc_pages_nodemask(gfp_mask, order, 
>  preferred_nid, NULL);
>  }
>  
> +/* Bulk allocate order-0 pages */
> +static inline unsigned long
> +alloc_pages_bulk(gfp_t gfp_mask, unsigned long nr_pages, struct 
> list_head *list)
> +{
> +	return __alloc_pages_bulk_nodemask(gfp_mask, 
> numa_mem_id(), NULL,
> +							nr_pages, 
> list);

Is the second line indentation intentional ? Why not align it to 
the first argument (gfp_mask) ?

> +}
> +
>  /*
>   * Allocate pages, preferring the node given as nid. The node 
>   must be valid and
>   * online. For more general interface, see alloc_pages_node().
> @@ -594,6 +606,7 @@ void * __meminit alloc_pages_exact_nid(int 
> nid, size_t size, gfp_t gfp_mask);
>  
>  extern void __free_pages(struct page *page, unsigned int 
>  order);
>  extern void free_pages(unsigned long addr, unsigned int order);
> +extern void free_pages_bulk(struct list_head *list);
>  
>  struct page_frag_cache;
>  extern void __page_frag_cache_drain(struct page *page, unsigned 
>  int count);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 3e4b29ee2b1e..ff1e55793786 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4436,6 +4436,21 @@ static void wake_all_kswapds(unsigned int 
> order, gfp_t gfp_mask,
>  	}
>  }
> ...
>  
> +/*
> + * This is a batched version of the page allocator that 
> attempts to
> + * allocate nr_pages quickly from the preferred zone and add 
> them to list.
> + */
> +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int 
> preferred_nid,
> +			nodemask_t *nodemask, int nr_pages,
> +			struct list_head *alloc_list)
> +{
> +	struct page *page;
> +	unsigned long flags;
> +	struct zone *zone;
> +	struct zoneref *z;
> +	struct per_cpu_pages *pcp;
> +	struct list_head *pcp_list;
> +	struct alloc_context ac;
> +	gfp_t alloc_mask;
> +	unsigned int alloc_flags;
> +	int alloced = 0;

Does alloced count the number of allocated pages ? Do you mind 
renaming it to 'allocated' ?

> +
> +	if (nr_pages == 1)
> +		goto failed;
> +
> +	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 
> page. */
> +	if (!prepare_alloc_pages(gfp_mask, 0, preferred_nid, 
> nodemask, &ac, &alloc_mask, &alloc_flags))
> +		return 0;
> +	gfp_mask = alloc_mask;
> +
> +	/* Find an allowed local zone that meets the high 
> watermark. */
> +	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, 
> ac.highest_zoneidx, ac.nodemask) {
> +		unsigned long mark;
> +
> +		if (cpusets_enabled() && (alloc_flags & 
> ALLOC_CPUSET) &&
> +		    !__cpuset_zone_allowed(zone, gfp_mask)) {
> +			continue;
> +		}
> +
> +		if (nr_online_nodes > 1 && zone != 
> ac.preferred_zoneref->zone &&
> +		    zone_to_nid(zone) != 
> zone_to_nid(ac.preferred_zoneref->zone)) {
> +			goto failed;
> +		}
> +
> +		mark = wmark_pages(zone, alloc_flags & 
> ALLOC_WMARK_MASK) + nr_pages;
> +		if (zone_watermark_fast(zone, 0,  mark,
> + 
> zonelist_zone_idx(ac.preferred_zoneref),
> +				alloc_flags, gfp_mask)) {
> +			break;
> +		}
> +	}
> +	if (!zone)
> +		return 0;
> +
> +	/* Attempt the batch allocation */
> +	local_irq_save(flags);
> +	pcp = &this_cpu_ptr(zone->pageset)->pcp;
> +	pcp_list = &pcp->lists[ac.migratetype];
> +
> +	while (alloced < nr_pages) {
> +		page = __rmqueue_pcplist(zone, ac.migratetype, 
> alloc_flags,
> + 
> pcp, pcp_list);

Same indentation comment as before

> +		if (!page)
> +			break;
> +
> +		prep_new_page(page, 0, gfp_mask, 0);
> +		list_add(&page->lru, alloc_list);
> +		alloced++;
> +	}
> +
> +	if (!alloced)
> +		goto failed_irq;
> +
> +	if (alloced) {
> +		__count_zid_vm_events(PGALLOC, zone_idx(zone), 
> alloced);
> +		zone_statistics(zone, zone);
> +	}
> +
> +	local_irq_restore(flags);
> +
> +	return alloced;
> +
> +failed_irq:
> +	local_irq_restore(flags);
> +
> +failed:
> +	page = __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, 
> nodemask);
> +	if (page) {
> +		alloced++;
> +		list_add(&page->lru, alloc_list);
> +	}
> +
> +	return alloced;
> +}
> +EXPORT_SYMBOL_GPL(__alloc_pages_bulk_nodemask);
> +
>  /*
>   * This is the 'heart' of the zoned buddy allocator.
>   */
> @@ -4981,8 +5092,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, 
> unsigned int order, int preferred_nid,
>  		return NULL;
>  	}
>  
> -	gfp_mask &= gfp_allowed_mask;
> -	alloc_mask = gfp_mask;
>  	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, 
>  nodemask, &ac, &alloc_mask, &alloc_flags))
>  		return NULL;
Mel Gorman March 10, 2021, 11:38 a.m. UTC | #4
On Wed, Mar 10, 2021 at 01:04:17PM +0200, Shay Agroskin wrote:
> 
> Mel Gorman <mgorman@techsingularity.net> writes:
> 
> > 
> > diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> > index 8572a1474e16..4903d1cc48dc 100644
> > --- a/include/linux/gfp.h
> > +++ b/include/linux/gfp.h
> > @@ -515,6 +515,10 @@ static inline int arch_make_page_accessible(struct
> > page *page)
> >  }
> >  #endif
> >   +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int preferred_nid,
> > +				nodemask_t *nodemask, int nr_pages,
> > +				struct list_head *list);
> > +
> >  struct page *
> >  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int
> > preferred_nid,
> >  							nodemask_t  *nodemask);
> > @@ -525,6 +529,14 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
> > int preferred_nid)
> >  	return __alloc_pages_nodemask(gfp_mask, order,  preferred_nid, NULL);
> >  }
> > +/* Bulk allocate order-0 pages */
> > +static inline unsigned long
> > +alloc_pages_bulk(gfp_t gfp_mask, unsigned long nr_pages, struct
> > list_head *list)
> > +{
> > +	return __alloc_pages_bulk_nodemask(gfp_mask, numa_mem_id(), NULL,
> > +							nr_pages, list);
> 
> Is the second line indentation intentional ? Why not align it to the first
> argument (gfp_mask) ?
> 

No particular reason. I usually pick this as it's visually clearer to me
that it's part of the same line when the multi-line is part of an if block.

> > +}
> > +
> >  /*
> >   * Allocate pages, preferring the node given as nid. The node   must be
> > valid and
> >   * online. For more general interface, see alloc_pages_node().
> > @@ -594,6 +606,7 @@ void * __meminit alloc_pages_exact_nid(int nid,
> > size_t size, gfp_t gfp_mask);
> >    extern void __free_pages(struct page *page, unsigned int  order);
> >  extern void free_pages(unsigned long addr, unsigned int order);
> > +extern void free_pages_bulk(struct list_head *list);
> >  struct page_frag_cache;
> >  extern void __page_frag_cache_drain(struct page *page, unsigned  int
> > count);
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index 3e4b29ee2b1e..ff1e55793786 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -4436,6 +4436,21 @@ static void wake_all_kswapds(unsigned int order,
> > gfp_t gfp_mask,
> >  	}
> >  }
> > ...
> > +/*
> > + * This is a batched version of the page allocator that attempts to
> > + * allocate nr_pages quickly from the preferred zone and add them to
> > list.
> > + */
> > +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int preferred_nid,
> > +			nodemask_t *nodemask, int nr_pages,
> > +			struct list_head *alloc_list)
> > +{
> > +	struct page *page;
> > +	unsigned long flags;
> > +	struct zone *zone;
> > +	struct zoneref *z;
> > +	struct per_cpu_pages *pcp;
> > +	struct list_head *pcp_list;
> > +	struct alloc_context ac;
> > +	gfp_t alloc_mask;
> > +	unsigned int alloc_flags;
> > +	int alloced = 0;
> 
> Does alloced count the number of allocated pages ?

Yes.

> Do you mind renaming it to 'allocated' ?

I will if there is another version as I do not feel particularly strongly
about alloced vs allocated. alloc was to match the function name and I
don't think the change makes it much clearer.


> > <SNIP>
> > +	/* Attempt the batch allocation */
> > +	local_irq_save(flags);
> > +	pcp = &this_cpu_ptr(zone->pageset)->pcp;
> > +	pcp_list = &pcp->lists[ac.migratetype];
> > +
> > +	while (alloced < nr_pages) {
> > +		page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
> > + pcp, pcp_list);
> 
> Same indentation comment as before
> 

Again, simple personal perference to avoid any possibility it's mixed
up with a later line. There has not been consistent code styling
enforcement of what indentation style should be used for a multi-line
within mm/page_alloc.c
Jesper Dangaard Brouer March 12, 2021, 12:01 p.m. UTC | #5
On Wed, 10 Mar 2021 11:38:36 +0000
Mel Gorman <mgorman@techsingularity.net> wrote:

> On Wed, Mar 10, 2021 at 01:04:17PM +0200, Shay Agroskin wrote:
> > 
> > Mel Gorman <mgorman@techsingularity.net> writes:
> >   
> > > 
> > > diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> > > index 8572a1474e16..4903d1cc48dc 100644
> > > --- a/include/linux/gfp.h
> > > +++ b/include/linux/gfp.h
> > > @@ -515,6 +515,10 @@ static inline int arch_make_page_accessible(struct
> > > page *page)
> > >  }
> > >  #endif
> > >   +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int preferred_nid,
> > > +				nodemask_t *nodemask, int nr_pages,
> > > +				struct list_head *list);
> > > +
> > >  struct page *
> > >  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int
> > > preferred_nid,
> > >  							nodemask_t  *nodemask);
> > > @@ -525,6 +529,14 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
> > > int preferred_nid)
> > >  	return __alloc_pages_nodemask(gfp_mask, order,  preferred_nid, NULL);
> > >  }
> > > +/* Bulk allocate order-0 pages */
> > > +static inline unsigned long
> > > +alloc_pages_bulk(gfp_t gfp_mask, unsigned long nr_pages, struct
> > > list_head *list)
> > > +{
> > > +	return __alloc_pages_bulk_nodemask(gfp_mask, numa_mem_id(), NULL,
> > > +							nr_pages, list);  
> > 
> > Is the second line indentation intentional ? Why not align it to the first
> > argument (gfp_mask) ?
> >   
> 
> No particular reason. I usually pick this as it's visually clearer to me
> that it's part of the same line when the multi-line is part of an if block.
> 
> > > +}
> > > +
[...]
> > 
> > Same indentation comment as before
> >   
> 
> Again, simple personal perference to avoid any possibility it's mixed
> up with a later line. There has not been consistent code styling
> enforcement of what indentation style should be used for a multi-line
> within mm/page_alloc.c

Hi Shay, it is might be surprising that indentation style actually
differs slightly in different parts of the kernel.  I started in
networking area of the kernel, and I was also surprised when I started
working in MM area that the coding style differs.  I can tell you that
the indentation style Mel choose is consistent with the code styling in
MM area.  I usually respect that even-though I prefer the networking
style as I was "raised" with that style.
diff mbox series

Patch

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8572a1474e16..4903d1cc48dc 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -515,6 +515,10 @@  static inline int arch_make_page_accessible(struct page *page)
 }
 #endif
 
+int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int preferred_nid,
+				nodemask_t *nodemask, int nr_pages,
+				struct list_head *list);
+
 struct page *
 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask);
@@ -525,6 +529,14 @@  __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
 	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
 }
 
+/* Bulk allocate order-0 pages */
+static inline unsigned long
+alloc_pages_bulk(gfp_t gfp_mask, unsigned long nr_pages, struct list_head *list)
+{
+	return __alloc_pages_bulk_nodemask(gfp_mask, numa_mem_id(), NULL,
+							nr_pages, list);
+}
+
 /*
  * Allocate pages, preferring the node given as nid. The node must be valid and
  * online. For more general interface, see alloc_pages_node().
@@ -594,6 +606,7 @@  void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
 
 extern void __free_pages(struct page *page, unsigned int order);
 extern void free_pages(unsigned long addr, unsigned int order);
+extern void free_pages_bulk(struct list_head *list);
 
 struct page_frag_cache;
 extern void __page_frag_cache_drain(struct page *page, unsigned int count);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3e4b29ee2b1e..ff1e55793786 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4436,6 +4436,21 @@  static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
 	}
 }
 
+/* Drop reference counts and free order-0 pages from a list. */
+void free_pages_bulk(struct list_head *list)
+{
+	struct page *page, *next;
+
+	list_for_each_entry_safe(page, next, list, lru) {
+		trace_mm_page_free_batched(page);
+		if (put_page_testzero(page)) {
+			list_del(&page->lru);
+			__free_pages_ok(page, 0, FPI_NONE);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(free_pages_bulk);
+
 static inline unsigned int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
@@ -4919,6 +4934,9 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 		struct alloc_context *ac, gfp_t *alloc_mask,
 		unsigned int *alloc_flags)
 {
+	gfp_mask &= gfp_allowed_mask;
+	*alloc_mask = gfp_mask;
+
 	ac->highest_zoneidx = gfp_zone(gfp_mask);
 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
 	ac->nodemask = nodemask;
@@ -4960,6 +4978,99 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 	return true;
 }
 
+/*
+ * This is a batched version of the page allocator that attempts to
+ * allocate nr_pages quickly from the preferred zone and add them to list.
+ */
+int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int preferred_nid,
+			nodemask_t *nodemask, int nr_pages,
+			struct list_head *alloc_list)
+{
+	struct page *page;
+	unsigned long flags;
+	struct zone *zone;
+	struct zoneref *z;
+	struct per_cpu_pages *pcp;
+	struct list_head *pcp_list;
+	struct alloc_context ac;
+	gfp_t alloc_mask;
+	unsigned int alloc_flags;
+	int alloced = 0;
+
+	if (nr_pages == 1)
+		goto failed;
+
+	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
+	if (!prepare_alloc_pages(gfp_mask, 0, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
+		return 0;
+	gfp_mask = alloc_mask;
+
+	/* Find an allowed local zone that meets the high watermark. */
+	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
+		unsigned long mark;
+
+		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
+		    !__cpuset_zone_allowed(zone, gfp_mask)) {
+			continue;
+		}
+
+		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
+		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
+			goto failed;
+		}
+
+		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
+		if (zone_watermark_fast(zone, 0,  mark,
+				zonelist_zone_idx(ac.preferred_zoneref),
+				alloc_flags, gfp_mask)) {
+			break;
+		}
+	}
+	if (!zone)
+		return 0;
+
+	/* Attempt the batch allocation */
+	local_irq_save(flags);
+	pcp = &this_cpu_ptr(zone->pageset)->pcp;
+	pcp_list = &pcp->lists[ac.migratetype];
+
+	while (alloced < nr_pages) {
+		page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
+								pcp, pcp_list);
+		if (!page)
+			break;
+
+		prep_new_page(page, 0, gfp_mask, 0);
+		list_add(&page->lru, alloc_list);
+		alloced++;
+	}
+
+	if (!alloced)
+		goto failed_irq;
+
+	if (alloced) {
+		__count_zid_vm_events(PGALLOC, zone_idx(zone), alloced);
+		zone_statistics(zone, zone);
+	}
+
+	local_irq_restore(flags);
+
+	return alloced;
+
+failed_irq:
+	local_irq_restore(flags);
+
+failed:
+	page = __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
+	if (page) {
+		alloced++;
+		list_add(&page->lru, alloc_list);
+	}
+
+	return alloced;
+}
+EXPORT_SYMBOL_GPL(__alloc_pages_bulk_nodemask);
+
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
@@ -4981,8 +5092,6 @@  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 		return NULL;
 	}
 
-	gfp_mask &= gfp_allowed_mask;
-	alloc_mask = gfp_mask;
 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
 		return NULL;