diff mbox series

[RFC,3/7] mm: compaction: deal with upcoming high-order page splitting

Message ID 20200814173131.2803002-4-minchan@kernel.org (mailing list archive)
State New, archived
Headers show
Series Support high-order page bulk allocation | expand

Commit Message

Minchan Kim Aug. 14, 2020, 5:31 p.m. UTC
When compaction isolates free pages, it needs to consider freed
pages's order and sub-page splitting to support upcoming high
order page bulk allocation. Since we have primitive functions
to deal with high order page splitting, this patch introduces
cc->isolate_order to indicate what order pages the API user
want to allocate. It isolates free pages with order greater or
equal to cc->isolate_order. After isolating it splits them into
sub pages of cc->isolate_order order.

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/compaction.c | 42 ++++++++++++++++++++++++++++--------------
 mm/internal.h   |  1 +
 2 files changed, 29 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index f31799a841f2..76f380cb801d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -68,7 +68,8 @@  static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
 #define COMPACTION_HPAGE_ORDER	(PMD_SHIFT - PAGE_SHIFT)
 #endif
 
-static unsigned long release_freepages(struct list_head *freelist)
+static unsigned long release_freepages(struct list_head *freelist,
+					unsigned int order)
 {
 	struct page *page, *next;
 	unsigned long high_pfn = 0;
@@ -76,7 +77,7 @@  static unsigned long release_freepages(struct list_head *freelist)
 	list_for_each_entry_safe(page, next, freelist, lru) {
 		unsigned long pfn = page_to_pfn(page);
 		list_del(&page->lru);
-		__free_page(page);
+		__free_pages(page, order);
 		if (pfn > high_pfn)
 			high_pfn = pfn;
 	}
@@ -84,7 +85,7 @@  static unsigned long release_freepages(struct list_head *freelist)
 	return high_pfn;
 }
 
-static void split_map_pages(struct list_head *list)
+static void split_map_pages(struct list_head *list, unsigned int split_order)
 {
 	unsigned int i, order, nr_pages;
 	struct page *page, *next;
@@ -94,15 +95,15 @@  static void split_map_pages(struct list_head *list)
 		list_del(&page->lru);
 
 		order = page_private(page);
-		nr_pages = 1 << order;
+		nr_pages = 1 << (order - split_order);
 
 		post_alloc_hook(page, order, __GFP_MOVABLE);
-		if (order)
-			split_page_by_order(page, order, 0);
+		if (order > split_order)
+			split_page_by_order(page, order, split_order);
 
 		for (i = 0; i < nr_pages; i++) {
 			list_add(&page->lru, &tmp_list);
-			page++;
+			page += 1 << split_order;
 		}
 	}
 
@@ -547,8 +548,10 @@  static bool compact_unlock_should_abort(spinlock_t *lock,
 }
 
 /*
- * Isolate free pages onto a private freelist. If @strict is true, will abort
- * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
+ * Isolate free pages onto a private freelist if order of page is greater
+ * or equal to cc->isolate_order. If @strict is true, will abort
+ * returning 0 on any invalid PFNs, pages with order lower than
+ * cc->isolate_order or non-free pages inside of the pageblock
  * (even though it may still end up isolating some pages).
  */
 static unsigned long isolate_freepages_block(struct compact_control *cc,
@@ -625,8 +628,19 @@  static unsigned long isolate_freepages_block(struct compact_control *cc,
 				goto isolate_fail;
 		}
 
-		/* Found a free page, will break it into order-0 pages */
+		/*
+		 * Found a free page. will isolate and possibly split the page
+		 * into isolate_order sub pages if the page's order is greater
+		 * than or equal to the isolate_order. Otherwise, it will keep
+		 * going with further pages to isolate them unless strict is
+		 * true.
+		 */
 		order = page_order(page);
+		if (order < cc->isolate_order) {
+			blockpfn += (1UL << order) - 1;
+			cursor += (1UL << order) - 1;
+			goto isolate_fail;
+		}
 		isolated = __isolate_free_page(page, order);
 		if (!isolated)
 			break;
@@ -752,11 +766,11 @@  isolate_freepages_range(struct compact_control *cc,
 	}
 
 	/* __isolate_free_page() does not map the pages */
-	split_map_pages(&freelist);
+	split_map_pages(&freelist, cc->isolate_order);
 
 	if (pfn < end_pfn) {
 		/* Loop terminated early, cleanup. */
-		release_freepages(&freelist);
+		release_freepages(&freelist, cc->isolate_order);
 		return 0;
 	}
 
@@ -1564,7 +1578,7 @@  static void isolate_freepages(struct compact_control *cc)
 
 splitmap:
 	/* __isolate_free_page() does not map the pages */
-	split_map_pages(freelist);
+	split_map_pages(freelist, 0);
 }
 
 /*
@@ -2376,7 +2390,7 @@  compact_zone(struct compact_control *cc, struct capture_control *capc)
 	 * so we don't leave any returned pages behind in the next attempt.
 	 */
 	if (cc->nr_freepages > 0) {
-		unsigned long free_pfn = release_freepages(&cc->freepages);
+		unsigned long free_pfn = release_freepages(&cc->freepages, 0);
 
 		cc->nr_freepages = 0;
 		VM_BUG_ON(free_pfn == 0);
diff --git a/mm/internal.h b/mm/internal.h
index 10c677655912..5f1e9d76a623 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -244,6 +244,7 @@  struct compact_control {
 	bool contended;			/* Signal lock or sched contention */
 	bool rescan;			/* Rescanning the same pageblock */
 	bool alloc_contig;		/* alloc_contig_range allocation */
+	int isolate_order;		/* minimum order isolated from buddy */
 };
 
 /*