@@ -182,7 +182,9 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct list_head *page_list,
- struct page **page_array);
+ struct page **page_array,
+ void (*cb)(struct page *, void *),
+ void *data);
unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
unsigned long nr_pages,
@@ -192,13 +194,15 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
static inline unsigned long
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
{
- return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL,
+ NULL, NULL);
}
static inline unsigned long
alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
{
- return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array,
+ NULL, NULL);
}
static inline unsigned long
@@ -207,7 +211,16 @@ alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct p
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
+ return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array,
+ NULL, NULL);
+}
+
+static inline unsigned long
+alloc_pages_bulk_cb(gfp_t gfp, unsigned long nr_pages,
+ void (*cb)(struct page *page, void *data), void *data)
+{
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, NULL,
+ cb, data);
}
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
@@ -2318,12 +2318,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
nr_allocated = __alloc_pages_bulk(gfp,
interleave_nodes(pol), NULL,
nr_pages_per_node + 1, NULL,
- page_array);
+ page_array, NULL, NULL);
delta--;
} else {
nr_allocated = __alloc_pages_bulk(gfp,
interleave_nodes(pol), NULL,
- nr_pages_per_node, NULL, page_array);
+ nr_pages_per_node, NULL, page_array,
+ NULL, NULL);
}
page_array += nr_allocated;
@@ -2344,12 +2345,13 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
- nr_pages, NULL, page_array);
+ nr_pages, NULL, page_array,
+ NULL, NULL);
if (nr_allocated < nr_pages)
nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
nr_pages - nr_allocated, NULL,
- page_array + nr_allocated);
+ page_array + nr_allocated, NULL, NULL);
return nr_allocated;
}
@@ -2377,7 +2379,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol), nr_pages, NULL,
- page_array);
+ page_array, NULL, NULL);
}
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
@@ -5402,22 +5402,27 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
* @nr_pages: The number of pages desired on the list or array
* @page_list: Optional list to store the allocated pages
* @page_array: Optional array to store the pages
+ * @cb: Optional callback to handle the page
+ * @data: The parameter passed in by the callback
*
* This is a batched version of the page allocator that attempts to
* allocate nr_pages quickly. Pages are added to page_list if page_list
- * is not NULL, otherwise it is assumed that the page_array is valid.
+ * is not NULL, or it is assumed if the page_array is valid, or it is
+ * passed to a callback if cb is valid.
*
- * For lists, nr_pages is the number of pages that should be allocated.
+ * For lists and cb, nr_pages is the number of pages that should be allocated.
*
* For arrays, only NULL elements are populated with pages and nr_pages
* is the maximum number of pages that will be stored in the array.
*
- * Returns the number of pages on the list or array.
+ * Returns the number of pages on the list or array or consumed by cb.
*/
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct list_head *page_list,
- struct page **page_array)
+ struct page **page_array,
+ void (*cb)(struct page *, void *),
+ void *data)
{
struct page *page;
unsigned long __maybe_unused UP_flags;
@@ -5532,8 +5537,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
prep_new_page(page, 0, gfp, 0);
if (page_list)
list_add(&page->lru, page_list);
- else
+ else if (page_array)
page_array[nr_populated] = page;
+ else
+ cb(page, data);
nr_populated++;
}
@@ -5554,8 +5561,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
if (page) {
if (page_list)
list_add(&page->lru, page_list);
- else
+ else if (page_array)
page_array[nr_populated] = page;
+ else
+ cb(page, data);
nr_populated++;
}
Currently the bulk allocator support to pass pages via list or array, but neither is suitable for some usecases, for example, dm-crypt, which doesn't need a list, but array may be too big to fit on stack. So adding a new bulk allocator API, which passes in a callback function that deal with the allocated pages. The API defined in this patch will be used by the following patches. Signed-off-by: Yang Shi <shy828301@gmail.com> --- include/linux/gfp.h | 21 +++++++++++++++++---- mm/mempolicy.c | 12 +++++++----- mm/page_alloc.c | 21 +++++++++++++++------ 3 files changed, 39 insertions(+), 15 deletions(-)