@@ -625,6 +625,11 @@ static inline bool pm_suspended_storage(void)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
+extern int alloc_pages_bulk(unsigned long start, unsigned long end,
+ unsigned int migratetype, gfp_t gfp_mask,
+ unsigned int order, unsigned int nr_elem,
+ struct page **pages);
+
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#endif
@@ -713,10 +713,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
*/
unsigned long
isolate_freepages_range(struct compact_control *cc,
- unsigned long start_pfn, unsigned long end_pfn)
+ unsigned long start_pfn, unsigned long end_pfn,
+ struct list_head *freepage_list)
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
- LIST_HEAD(freelist);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
@@ -748,7 +748,7 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, &freelist, 0, true);
+ block_end_pfn, freepage_list, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@@ -766,15 +766,14 @@ isolate_freepages_range(struct compact_control *cc,
}
/* __isolate_free_page() does not map the pages */
- split_map_pages(&freelist, cc->isolate_order);
+ split_map_pages(freepage_list, cc->isolate_order);
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
- release_freepages(&freelist, cc->isolate_order);
+ release_freepages(freepage_list, cc->isolate_order);
return 0;
}
- /* We don't use freelists for anything. */
return pfn;
}
@@ -258,7 +258,8 @@ struct capture_control {
unsigned long
isolate_freepages_range(struct compact_control *cc,
- unsigned long start_pfn, unsigned long end_pfn);
+ unsigned long start_pfn, unsigned long end_pfn,
+ struct list_head *freepage_list);
unsigned long
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
@@ -8402,10 +8402,14 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
}
static int __alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask)
+ unsigned int migratetype, gfp_t gfp_mask,
+ unsigned int alloc_order,
+ struct list_head *freepage_list)
{
unsigned long outer_start, outer_end;
unsigned int order;
+ struct page *page, *page2;
+ unsigned long pfn;
int ret = 0;
struct compact_control cc = {
@@ -8417,6 +8421,7 @@ static int __alloc_contig_range(unsigned long start, unsigned long end,
.no_set_skip_hint = true,
.gfp_mask = current_gfp_context(gfp_mask),
.alloc_contig = true,
+ .isolate_order = alloc_order,
};
INIT_LIST_HEAD(&cc.migratepages);
@@ -8515,17 +8520,42 @@ static int __alloc_contig_range(unsigned long start, unsigned long end,
}
/* Grab isolated pages from freelists. */
- outer_end = isolate_freepages_range(&cc, outer_start, end);
+ outer_end = isolate_freepages_range(&cc, outer_start, end,
+ freepage_list);
if (!outer_end) {
ret = -EBUSY;
goto done;
}
/* Free head and tail (if any) */
- if (start != outer_start)
- free_contig_range(outer_start, start - outer_start);
- if (end != outer_end)
- free_contig_range(end, outer_end - end);
+ if (start != outer_start) {
+ if (alloc_order == 0)
+ free_contig_range(outer_start, start - outer_start);
+ else {
+ list_for_each_entry_safe(page, page2,
+ freepage_list, lru) {
+ pfn = page_to_pfn(page);
+ if (pfn >= start)
+ break;
+ list_del(&page->lru);
+ __free_pages(page, alloc_order);
+ }
+ }
+ }
+ if (end != outer_end) {
+ if (alloc_order == 0)
+ free_contig_range(end, outer_end - end);
+ else {
+ list_for_each_entry_safe_reverse(page, page2,
+ freepage_list, lru) {
+ pfn = page_to_pfn(page);
+ if ((pfn + (1 << alloc_order)) <= end)
+ break;
+ list_del(&page->lru);
+ __free_pages(page, alloc_order);
+ }
+ }
+ }
done:
undo_isolate_page_range(pfn_max_align_down(start),
@@ -8558,8 +8588,61 @@ EXPORT_SYMBOL(alloc_contig_range);
int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask)
{
- return __alloc_contig_range(start, end, migratetype, gfp_mask);
+ LIST_HEAD(freepage_list);
+
+ return __alloc_contig_range(start, end, migratetype,
+ gfp_mask, 0, &freepage_list);
+}
+
+/**
+ * alloc_pages_bulk() -- tries to allocate high order pages
+ * by batch from given range [start, end)
+ * @start: start PFN to allocate
+ * @end: one-past-the-last PFN to allocate
+ * @migratetype: migratetype of the underlaying pageblocks (either
+ * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
+ * in range must have the same migratetype and it must
+ * be either of the two.
+ * @gfp_mask: GFP mask to use during compaction
+ * @order: page order requested
+ * @nr_elem: the number of high-order pages to allocate
+ * @pages: page array pointer to store allocated pages (must
+ * have space for at least nr_elem elements)
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned. The PFN range must belong to a single zone.
+ *
+ * Return: the number of pages allocated on success or negative error code.
+ * The allocated pages need to be free with __free_pages
+ */
+int alloc_pages_bulk(unsigned long start, unsigned long end,
+ unsigned int migratetype, gfp_t gfp_mask,
+ unsigned int order, unsigned int nr_elem,
+ struct page **pages)
+{
+ int ret;
+ struct page *page, *page2;
+ LIST_HEAD(freepage_list);
+
+ if (order >= MAX_ORDER)
+ return -EINVAL;
+
+ ret = __alloc_contig_range(start, end, migratetype,
+ gfp_mask, order, &freepage_list);
+ if (ret)
+ return ret;
+
+ /* keep pfn ordering */
+ list_for_each_entry_safe(page, page2, &freepage_list, lru) {
+ if (ret < nr_elem)
+ pages[ret++] = page;
+ else
+ __free_pages(page, order);
+ }
+
+ return ret;
}
+EXPORT_SYMBOL(alloc_pages_bulk);
static int __alloc_contig_pages(unsigned long start_pfn,
unsigned long nr_pages, gfp_t gfp_mask)
There is a need for special HW to require bulk allocation of high-order pages. For example, 4800 * order-4 pages. To meet the requirement, a option is using CMA area because page allocator with compaction under memory pressure is easily failed to meet the requirement and too slow for 4800 times. However, CMA has also the following drawbacks: * 4800 of order-4 * cma_alloc is too slow To avoid the slowness, we could try to allocate 300M contiguous memory once and then split them into order-4 chunks. The problem of this approach is CMA allocation fails one of the pages in those range couldn't migrate out, which happens easily with fs write under memory pressure. To solve issues, this patch introduces alloc_pages_bulk. int alloc_pages_bulk(unsigned long start, unsigned long end, unsigned int migratetype, gfp_t gfp_mask, unsigned int order, unsigned int nr_elem, struct page **pages); It will investigate the [start, end) and migrate movable pages out there by best effort(by upcoming patches) to make requested order's free pages. The allocated pages will be returned using pages parameter. Return value represents how many of requested order pages we got. It could be less than user requested by nr_elem. /** * alloc_pages_bulk() -- tries to allocate high order pages * by batch from given range [start, end) * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate * @migratetype: migratetype of the underlaying pageblocks (either * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. * @gfp_mask: GFP mask to use during compaction * @order: page order requested * @nr_elem: the number of high-order pages to allocate * @pages: page array pointer to store allocated pages (must * have space for at least nr_elem elements) * * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES * aligned. The PFN range must belong to a single zone. * * Return: the number of pages allocated on success or negative error code. * The allocated pages should be freed using __free_pages */ The test goes order-4 * 4800 allocation(i.e., total 300MB) under kernel build workload. System RAM size is 1.5GB and CMA is 500M. With using CMA to allocate to 300M, ran 10 times trial, 10 time failed with big latency(up to several seconds). With this alloc_pages_bulk API, ran 10 time trial, 7 times are successful to allocate 4800 times. Rest 3 times are allocated 4799, 4789 and 4799. They are all done with 300ms. Signed-off-by: Minchan Kim <minchan@kernel.org> --- include/linux/gfp.h | 5 +++ mm/compaction.c | 11 +++-- mm/internal.h | 3 +- mm/page_alloc.c | 97 +++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 102 insertions(+), 14 deletions(-)