diff mbox series

[1/2] mm: alloc_pages_bulk_noprof: drop page_list argument

Message ID 4d3041315d1032e9acbe50971f952e716e8f4089.1734453061.git.luizcap@redhat.com (mailing list archive)
State New
Headers show
Series mm: alloc_pages_bulk: small API refactor | expand

Commit Message

Luiz Capitulino Dec. 17, 2024, 4:31 p.m. UTC
The commit 387ba26fb1cb added __alloc_pages_bulk() along with the page_list
argument. The next commit 0f87d9d30f21 added the array-based argument. As
it turns out, the page_list argument has no users in the current tree (if it
ever had any). Dropping it allows for a slight simplification and eliminates
some unnecessary checks, now that page_array is required.

Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
---
 include/linux/gfp.h |  8 ++------
 mm/mempolicy.c      | 14 +++++++-------
 mm/page_alloc.c     | 39 ++++++++++++---------------------------
 3 files changed, 21 insertions(+), 40 deletions(-)

Comments

David Hildenbrand Dec. 19, 2024, 1:24 p.m. UTC | #1
On 17.12.24 17:31, Luiz Capitulino wrote:
> The commit 387ba26fb1cb added __alloc_pages_bulk() along with the page_list
> argument. The next commit 0f87d9d30f21 added the array-based argument. As

Nit: Use "commit 387ba26fb1cb ("mm/page_alloc: add a bulk page 
allocator")" ,same for the other commit

(likely scripts/checkpatch.pl should complain)

> it turns out, the page_list argument has no users in the current tree (if it
> ever had any). Dropping it allows for a slight simplification and eliminates
> some unnecessary checks, now that page_array is required.
> 

It's probably a good idea to link to Mel's patch and the discussion from 
2023. Quoting what Willy said back then about performance of list vs. 
arrays might be valuable to have here is well.


Acked-by: David Hildenbrand <david@redhat.com>

> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
> ---
>   include/linux/gfp.h |  8 ++------
>   mm/mempolicy.c      | 14 +++++++-------
>   mm/page_alloc.c     | 39 ++++++++++++---------------------------
>   3 files changed, 21 insertions(+), 40 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index b0fe9f62d15b6..eebed36443b35 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -212,7 +212,6 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
>   
>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   				nodemask_t *nodemask, int nr_pages,
> -				struct list_head *page_list,
>   				struct page **page_array);
>   #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
>   
> @@ -223,11 +222,8 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>   	alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
>   
>   /* Bulk allocate order-0 pages */
> -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list)			\
> -	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
> -
>   #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)		\
> -	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
> +	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
>   
>   static inline unsigned long
>   alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
> @@ -236,7 +232,7 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
>   	if (nid == NUMA_NO_NODE)
>   		nid = numa_mem_id();
>   
> -	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
> +	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
>   }
>   
>   #define alloc_pages_bulk_array_node(...)				\
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 04f35659717ae..42a7b07ccc15a 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2375,13 +2375,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
>   		if (delta) {
>   			nr_allocated = alloc_pages_bulk_noprof(gfp,
>   					interleave_nodes(pol), NULL,
> -					nr_pages_per_node + 1, NULL,
> +					nr_pages_per_node + 1,
>   					page_array);
>   			delta--;
>   		} else {
>   			nr_allocated = alloc_pages_bulk_noprof(gfp,
>   					interleave_nodes(pol), NULL,
> -					nr_pages_per_node, NULL, page_array);
> +					nr_pages_per_node, page_array);
>   		}
>   
>   		page_array += nr_allocated;
> @@ -2430,7 +2430,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>   	if (weight && node_isset(node, nodes)) {
>   		node_pages = min(rem_pages, weight);
>   		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
> -						  NULL, page_array);
> +						  page_array);
>   		page_array += nr_allocated;
>   		total_allocated += nr_allocated;
>   		/* if that's all the pages, no need to interleave */
> @@ -2493,7 +2493,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>   		if (!node_pages)
>   			break;
>   		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
> -						  NULL, page_array);
> +						  page_array);
>   		page_array += nr_allocated;
>   		total_allocated += nr_allocated;
>   		if (total_allocated == nr_pages)
> @@ -2517,11 +2517,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
>   	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
>   
>   	nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
> -					   nr_pages, NULL, page_array);
> +					   nr_pages, page_array);
>   
>   	if (nr_allocated < nr_pages)
>   		nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
> -				nr_pages - nr_allocated, NULL,
> +				nr_pages - nr_allocated,
>   				page_array + nr_allocated);
>   	return nr_allocated;
>   }
> @@ -2557,7 +2557,7 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>   	nid = numa_node_id();
>   	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
>   	return alloc_pages_bulk_noprof(gfp, nid, nodemask,
> -				       nr_pages, NULL, page_array);
> +				       nr_pages, page_array);
>   }
>   
>   int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 1cb4b8c8886d8..3ef6d902e2fea 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4529,28 +4529,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>   }
>   
>   /*
> - * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
> + * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
>    * @gfp: GFP flags for the allocation
>    * @preferred_nid: The preferred NUMA node ID to allocate from
>    * @nodemask: Set of nodes to allocate from, may be NULL
> - * @nr_pages: The number of pages desired on the list or array
> - * @page_list: Optional list to store the allocated pages
> - * @page_array: Optional array to store the pages
> + * @nr_pages: The number of pages desired in the array
> + * @page_array: Array to store the pages
>    *
>    * This is a batched version of the page allocator that attempts to
> - * allocate nr_pages quickly. Pages are added to page_list if page_list
> - * is not NULL, otherwise it is assumed that the page_array is valid.
> + * allocate nr_pages quickly. Pages are added to the page_array.
>    *
> - * For lists, nr_pages is the number of pages that should be allocated.
> - *
> - * For arrays, only NULL elements are populated with pages and nr_pages
> + * Note that only NULL elements are populated with pages and nr_pages
>    * is the maximum number of pages that will be stored in the array.
>    *
> - * Returns the number of pages on the list or array.
> + * Returns the number of pages in the array.
>    */
>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   			nodemask_t *nodemask, int nr_pages,
> -			struct list_head *page_list,
>   			struct page **page_array)
>   {
>   	struct page *page;
> @@ -4568,7 +4563,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   	 * Skip populated array elements to determine if any pages need
>   	 * to be allocated before disabling IRQs.
>   	 */
> -	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
> +	while (nr_populated < nr_pages && page_array[nr_populated])
>   		nr_populated++;
>   
>   	/* No pages requested? */
> @@ -4576,7 +4571,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   		goto out;
>   
>   	/* Already populated array? */
> -	if (unlikely(page_array && nr_pages - nr_populated == 0))
> +	if (unlikely(nr_pages - nr_populated == 0))
>   		goto out;
>   
>   	/* Bulk allocator does not support memcg accounting. */
> @@ -4658,7 +4653,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   	while (nr_populated < nr_pages) {
>   
>   		/* Skip existing pages */
> -		if (page_array && page_array[nr_populated]) {
> +		if (page_array[nr_populated]) {
>   			nr_populated++;
>   			continue;
>   		}
> @@ -4676,11 +4671,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   		nr_account++;
>   
>   		prep_new_page(page, 0, gfp, 0);
> -		if (page_list)
> -			list_add(&page->lru, page_list);
> -		else
> -			page_array[nr_populated] = page;
> -		nr_populated++;
> +		page_array[nr_populated++] = page;
>   	}
>   
>   	pcp_spin_unlock(pcp);
> @@ -4697,14 +4688,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   
>   failed:
>   	page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
> -	if (page) {
> -		if (page_list)
> -			list_add(&page->lru, page_list);
> -		else
> -			page_array[nr_populated] = page;
> -		nr_populated++;
> -	}
> -
> +	if (page)
> +		page_array[nr_populated++] = page;
>   	goto out;
>   }
>   EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
Luiz Capitulino Dec. 19, 2024, 3:50 p.m. UTC | #2
On 2024-12-19 08:24, David Hildenbrand wrote:
> On 17.12.24 17:31, Luiz Capitulino wrote:
>> The commit 387ba26fb1cb added __alloc_pages_bulk() along with the page_list
>> argument. The next commit 0f87d9d30f21 added the array-based argument. As
> 
> Nit: Use "commit 387ba26fb1cb ("mm/page_alloc: add a bulk page allocator")" ,same for the other commit
> 
> (likely scripts/checkpatch.pl should complain)
> 
>> it turns out, the page_list argument has no users in the current tree (if it
>> ever had any). Dropping it allows for a slight simplification and eliminates
>> some unnecessary checks, now that page_array is required.
>>
> 
> It's probably a good idea to link to Mel's patch and the discussion from 2023. Quoting what Willy said back then about performance of list vs. arrays might be valuable to have here is well.

I'll add these and the other patch's suggestion for v2.

Thanks for the review, David.

> 
> 
> Acked-by: David Hildenbrand <david@redhat.com>
> 
>> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
>> ---
>>   include/linux/gfp.h |  8 ++------
>>   mm/mempolicy.c      | 14 +++++++-------
>>   mm/page_alloc.c     | 39 ++++++++++++---------------------------
>>   3 files changed, 21 insertions(+), 40 deletions(-)
>>
>> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
>> index b0fe9f62d15b6..eebed36443b35 100644
>> --- a/include/linux/gfp.h
>> +++ b/include/linux/gfp.h
>> @@ -212,7 +212,6 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
>>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>                   nodemask_t *nodemask, int nr_pages,
>> -                struct list_head *page_list,
>>                   struct page **page_array);
>>   #define __alloc_pages_bulk(...)            alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
>> @@ -223,11 +222,8 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>>       alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
>>   /* Bulk allocate order-0 pages */
>> -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list)            \
>> -    __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
>> -
>>   #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)        \
>> -    __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
>> +    __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
>>   static inline unsigned long
>>   alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
>> @@ -236,7 +232,7 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
>>       if (nid == NUMA_NO_NODE)
>>           nid = numa_mem_id();
>> -    return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
>> +    return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
>>   }
>>   #define alloc_pages_bulk_array_node(...)                \
>> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
>> index 04f35659717ae..42a7b07ccc15a 100644
>> --- a/mm/mempolicy.c
>> +++ b/mm/mempolicy.c
>> @@ -2375,13 +2375,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
>>           if (delta) {
>>               nr_allocated = alloc_pages_bulk_noprof(gfp,
>>                       interleave_nodes(pol), NULL,
>> -                    nr_pages_per_node + 1, NULL,
>> +                    nr_pages_per_node + 1,
>>                       page_array);
>>               delta--;
>>           } else {
>>               nr_allocated = alloc_pages_bulk_noprof(gfp,
>>                       interleave_nodes(pol), NULL,
>> -                    nr_pages_per_node, NULL, page_array);
>> +                    nr_pages_per_node, page_array);
>>           }
>>           page_array += nr_allocated;
>> @@ -2430,7 +2430,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>>       if (weight && node_isset(node, nodes)) {
>>           node_pages = min(rem_pages, weight);
>>           nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
>> -                          NULL, page_array);
>> +                          page_array);
>>           page_array += nr_allocated;
>>           total_allocated += nr_allocated;
>>           /* if that's all the pages, no need to interleave */
>> @@ -2493,7 +2493,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>>           if (!node_pages)
>>               break;
>>           nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
>> -                          NULL, page_array);
>> +                          page_array);
>>           page_array += nr_allocated;
>>           total_allocated += nr_allocated;
>>           if (total_allocated == nr_pages)
>> @@ -2517,11 +2517,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
>>       preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
>>       nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
>> -                       nr_pages, NULL, page_array);
>> +                       nr_pages, page_array);
>>       if (nr_allocated < nr_pages)
>>           nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
>> -                nr_pages - nr_allocated, NULL,
>> +                nr_pages - nr_allocated,
>>                   page_array + nr_allocated);
>>       return nr_allocated;
>>   }
>> @@ -2557,7 +2557,7 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>>       nid = numa_node_id();
>>       nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
>>       return alloc_pages_bulk_noprof(gfp, nid, nodemask,
>> -                       nr_pages, NULL, page_array);
>> +                       nr_pages, page_array);
>>   }
>>   int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 1cb4b8c8886d8..3ef6d902e2fea 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -4529,28 +4529,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>>   }
>>   /*
>> - * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
>> + * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
>>    * @gfp: GFP flags for the allocation
>>    * @preferred_nid: The preferred NUMA node ID to allocate from
>>    * @nodemask: Set of nodes to allocate from, may be NULL
>> - * @nr_pages: The number of pages desired on the list or array
>> - * @page_list: Optional list to store the allocated pages
>> - * @page_array: Optional array to store the pages
>> + * @nr_pages: The number of pages desired in the array
>> + * @page_array: Array to store the pages
>>    *
>>    * This is a batched version of the page allocator that attempts to
>> - * allocate nr_pages quickly. Pages are added to page_list if page_list
>> - * is not NULL, otherwise it is assumed that the page_array is valid.
>> + * allocate nr_pages quickly. Pages are added to the page_array.
>>    *
>> - * For lists, nr_pages is the number of pages that should be allocated.
>> - *
>> - * For arrays, only NULL elements are populated with pages and nr_pages
>> + * Note that only NULL elements are populated with pages and nr_pages
>>    * is the maximum number of pages that will be stored in the array.
>>    *
>> - * Returns the number of pages on the list or array.
>> + * Returns the number of pages in the array.
>>    */
>>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>               nodemask_t *nodemask, int nr_pages,
>> -            struct list_head *page_list,
>>               struct page **page_array)
>>   {
>>       struct page *page;
>> @@ -4568,7 +4563,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>        * Skip populated array elements to determine if any pages need
>>        * to be allocated before disabling IRQs.
>>        */
>> -    while (page_array && nr_populated < nr_pages && page_array[nr_populated])
>> +    while (nr_populated < nr_pages && page_array[nr_populated])
>>           nr_populated++;
>>       /* No pages requested? */
>> @@ -4576,7 +4571,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>           goto out;
>>       /* Already populated array? */
>> -    if (unlikely(page_array && nr_pages - nr_populated == 0))
>> +    if (unlikely(nr_pages - nr_populated == 0))
>>           goto out;
>>       /* Bulk allocator does not support memcg accounting. */
>> @@ -4658,7 +4653,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>       while (nr_populated < nr_pages) {
>>           /* Skip existing pages */
>> -        if (page_array && page_array[nr_populated]) {
>> +        if (page_array[nr_populated]) {
>>               nr_populated++;
>>               continue;
>>           }
>> @@ -4676,11 +4671,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>           nr_account++;
>>           prep_new_page(page, 0, gfp, 0);
>> -        if (page_list)
>> -            list_add(&page->lru, page_list);
>> -        else
>> -            page_array[nr_populated] = page;
>> -        nr_populated++;
>> +        page_array[nr_populated++] = page;
>>       }
>>       pcp_spin_unlock(pcp);
>> @@ -4697,14 +4688,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>   failed:
>>       page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
>> -    if (page) {
>> -        if (page_list)
>> -            list_add(&page->lru, page_list);
>> -        else
>> -            page_array[nr_populated] = page;
>> -        nr_populated++;
>> -    }
>> -
>> +    if (page)
>> +        page_array[nr_populated++] = page;
>>       goto out;
>>   }
>>   EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
> 
>
diff mbox series

Patch

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b0fe9f62d15b6..eebed36443b35 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -212,7 +212,6 @@  struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
 
 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 				nodemask_t *nodemask, int nr_pages,
-				struct list_head *page_list,
 				struct page **page_array);
 #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
 
@@ -223,11 +222,8 @@  unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
 	alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
 
 /* Bulk allocate order-0 pages */
-#define alloc_pages_bulk_list(_gfp, _nr_pages, _list)			\
-	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
-
 #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)		\
-	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
+	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
 
 static inline unsigned long
 alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
@@ -236,7 +232,7 @@  alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
 	if (nid == NUMA_NO_NODE)
 		nid = numa_mem_id();
 
-	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
+	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
 }
 
 #define alloc_pages_bulk_array_node(...)				\
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 04f35659717ae..42a7b07ccc15a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2375,13 +2375,13 @@  static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
 		if (delta) {
 			nr_allocated = alloc_pages_bulk_noprof(gfp,
 					interleave_nodes(pol), NULL,
-					nr_pages_per_node + 1, NULL,
+					nr_pages_per_node + 1,
 					page_array);
 			delta--;
 		} else {
 			nr_allocated = alloc_pages_bulk_noprof(gfp,
 					interleave_nodes(pol), NULL,
-					nr_pages_per_node, NULL, page_array);
+					nr_pages_per_node, page_array);
 		}
 
 		page_array += nr_allocated;
@@ -2430,7 +2430,7 @@  static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
 	if (weight && node_isset(node, nodes)) {
 		node_pages = min(rem_pages, weight);
 		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
-						  NULL, page_array);
+						  page_array);
 		page_array += nr_allocated;
 		total_allocated += nr_allocated;
 		/* if that's all the pages, no need to interleave */
@@ -2493,7 +2493,7 @@  static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
 		if (!node_pages)
 			break;
 		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
-						  NULL, page_array);
+						  page_array);
 		page_array += nr_allocated;
 		total_allocated += nr_allocated;
 		if (total_allocated == nr_pages)
@@ -2517,11 +2517,11 @@  static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
 
 	nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
-					   nr_pages, NULL, page_array);
+					   nr_pages, page_array);
 
 	if (nr_allocated < nr_pages)
 		nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
-				nr_pages - nr_allocated, NULL,
+				nr_pages - nr_allocated,
 				page_array + nr_allocated);
 	return nr_allocated;
 }
@@ -2557,7 +2557,7 @@  unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
 	nid = numa_node_id();
 	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
 	return alloc_pages_bulk_noprof(gfp, nid, nodemask,
-				       nr_pages, NULL, page_array);
+				       nr_pages, page_array);
 }
 
 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1cb4b8c8886d8..3ef6d902e2fea 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4529,28 +4529,23 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 }
 
 /*
- * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
+ * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
  * @gfp: GFP flags for the allocation
  * @preferred_nid: The preferred NUMA node ID to allocate from
  * @nodemask: Set of nodes to allocate from, may be NULL
- * @nr_pages: The number of pages desired on the list or array
- * @page_list: Optional list to store the allocated pages
- * @page_array: Optional array to store the pages
+ * @nr_pages: The number of pages desired in the array
+ * @page_array: Array to store the pages
  *
  * This is a batched version of the page allocator that attempts to
- * allocate nr_pages quickly. Pages are added to page_list if page_list
- * is not NULL, otherwise it is assumed that the page_array is valid.
+ * allocate nr_pages quickly. Pages are added to the page_array.
  *
- * For lists, nr_pages is the number of pages that should be allocated.
- *
- * For arrays, only NULL elements are populated with pages and nr_pages
+ * Note that only NULL elements are populated with pages and nr_pages
  * is the maximum number of pages that will be stored in the array.
  *
- * Returns the number of pages on the list or array.
+ * Returns the number of pages in the array.
  */
 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 			nodemask_t *nodemask, int nr_pages,
-			struct list_head *page_list,
 			struct page **page_array)
 {
 	struct page *page;
@@ -4568,7 +4563,7 @@  unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 	 * Skip populated array elements to determine if any pages need
 	 * to be allocated before disabling IRQs.
 	 */
-	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
+	while (nr_populated < nr_pages && page_array[nr_populated])
 		nr_populated++;
 
 	/* No pages requested? */
@@ -4576,7 +4571,7 @@  unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 		goto out;
 
 	/* Already populated array? */
-	if (unlikely(page_array && nr_pages - nr_populated == 0))
+	if (unlikely(nr_pages - nr_populated == 0))
 		goto out;
 
 	/* Bulk allocator does not support memcg accounting. */
@@ -4658,7 +4653,7 @@  unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 	while (nr_populated < nr_pages) {
 
 		/* Skip existing pages */
-		if (page_array && page_array[nr_populated]) {
+		if (page_array[nr_populated]) {
 			nr_populated++;
 			continue;
 		}
@@ -4676,11 +4671,7 @@  unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 		nr_account++;
 
 		prep_new_page(page, 0, gfp, 0);
-		if (page_list)
-			list_add(&page->lru, page_list);
-		else
-			page_array[nr_populated] = page;
-		nr_populated++;
+		page_array[nr_populated++] = page;
 	}
 
 	pcp_spin_unlock(pcp);
@@ -4697,14 +4688,8 @@  unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 
 failed:
 	page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
-	if (page) {
-		if (page_list)
-			list_add(&page->lru, page_list);
-		else
-			page_array[nr_populated] = page;
-		nr_populated++;
-	}
-
+	if (page)
+		page_array[nr_populated++] = page;
 	goto out;
 }
 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);