diff mbox series

[RFC,6/7] mm: make alloc_pages_bulk best effort

Message ID 20200814173131.2803002-7-minchan@kernel.org (mailing list archive)
State New, archived
Headers show
Series Support high-order page bulk allocation | expand

Commit Message

Minchan Kim Aug. 14, 2020, 5:31 p.m. UTC
alloc_pages_bulk takes best effort approach to make high order
pages so it should keep going with further range even though it
encounters non-movable pages. To achieve it, this patch introduces
ALLOW_ISOLATE_FAILURE flags for start_isolate_page_range and
alloc_bulk in compact_control so it could proceed with further
range although some failures happen from isolation/migration/
free page isolation.

What it does with new flag are
 * skip the pageblock if it's not affordable for changing the block
   MIGRATE_ISOLATE
 * skip the pageblock if it couldn't migrate a page by some reasons
 * skip the pageblock if it couldn't isolate free pages by some reasons

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/page-isolation.h |  1 +
 mm/compaction.c                | 17 +++++++++++++----
 mm/internal.h                  |  1 +
 mm/page_alloc.c                | 32 +++++++++++++++++++++++---------
 mm/page_isolation.c            |  4 ++++
 5 files changed, 42 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 572458016331..b8b6789d2bd9 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -32,6 +32,7 @@  static inline bool is_migrate_isolate(int migratetype)
 
 #define MEMORY_OFFLINE	0x1
 #define REPORT_FAILURE	0x2
+#define ALLOW_ISOLATE_FAILURE	0x4
 
 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
 				 int migratetype, int flags);
diff --git a/mm/compaction.c b/mm/compaction.c
index 1e4392f6fec3..94dee139ce0d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -748,15 +748,24 @@  isolate_freepages_range(struct compact_control *cc,
 			break;
 
 		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
-					block_end_pfn, freepage_list, 0, true);
+					block_end_pfn, freepage_list,
+					cc->alloc_bulk ? 1 : 0,
+					cc->alloc_bulk ? false : true);
 
 		/*
 		 * In strict mode, isolate_freepages_block() returns 0 if
 		 * there are any holes in the block (ie. invalid PFNs or
-		 * non-free pages).
+		 * non-free pages) so just stop the isolation in the case.
+		 * However, in alloc_bulk mode, we could check further range
+		 * to find affordable high order free pages so keep going
+		 * with next pageblock.
 		 */
-		if (!isolated)
-			break;
+		if (!isolated) {
+			if (!cc->alloc_bulk)
+				break;
+			pfn = block_end_pfn;
+			continue;
+		}
 
 		/*
 		 * If we managed to isolate pages, it is always (1 << n) *
diff --git a/mm/internal.h b/mm/internal.h
index f9b86257fae2..71f00284326e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -244,6 +244,7 @@  struct compact_control {
 	bool contended;			/* Signal lock or sched contention */
 	bool rescan;			/* Rescanning the same pageblock */
 	bool alloc_contig;		/* alloc_contig_range allocation */
+	bool alloc_bulk;		/* alloc_pages_bulk allocation */
 	int isolate_order;		/* minimum order isolated from buddy */
 };
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cdf956feae80..66cea47ae2b6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8359,8 +8359,8 @@  static int __alloc_contig_migrate_range(struct compact_control *cc,
 	/* This function is based on compact_zone() from compaction.c. */
 	unsigned int nr_reclaimed;
 	unsigned long pfn = start;
-	unsigned int tries = 0;
-	int ret = 0;
+	unsigned int tries;
+	int ret;
 	struct migration_target_control mtc = {
 		.nid = zone_to_nid(cc->zone),
 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
@@ -8368,6 +8368,8 @@  static int __alloc_contig_migrate_range(struct compact_control *cc,
 
 	migrate_prep();
 
+next:
+	tries = ret = 0;
 	while (pfn < end || !list_empty(&cc->migratepages)) {
 		if (fatal_signal_pending(current)) {
 			ret = -EINTR;
@@ -8396,15 +8398,25 @@  static int __alloc_contig_migrate_range(struct compact_control *cc,
 	}
 	if (ret < 0) {
 		putback_movable_pages(&cc->migratepages);
-		return ret;
+		if (cc->alloc_bulk && pfn < end) {
+			/*
+			 * -EINTR means current process has fatal signal.
+			 * -ENOMEM means there is no free memory.
+			 *  In these cases, stop the effort to work with
+			 *  next blocks.
+			 */
+			if (ret != -EINTR && ret != -ENOMEM)
+				goto next;
+		}
 	}
-	return 0;
+	return ret;
 }
 
 static int __alloc_contig_range(unsigned long start, unsigned long end,
 		       unsigned int migratetype, gfp_t gfp_mask,
 		       unsigned int alloc_order,
-		       struct list_head *freepage_list)
+		       struct list_head *freepage_list,
+		       bool alloc_bulk)
 {
 	unsigned long outer_start, outer_end;
 	unsigned int order;
@@ -8422,6 +8434,7 @@  static int __alloc_contig_range(unsigned long start, unsigned long end,
 		.gfp_mask = current_gfp_context(gfp_mask),
 		.alloc_contig = true,
 		.isolate_order = alloc_order,
+		.alloc_bulk = alloc_bulk,
 	};
 	INIT_LIST_HEAD(&cc.migratepages);
 
@@ -8450,7 +8463,8 @@  static int __alloc_contig_range(unsigned long start, unsigned long end,
 	 */
 
 	ret = start_isolate_page_range(pfn_max_align_down(start),
-				       pfn_max_align_up(end), migratetype, 0);
+				       pfn_max_align_up(end), migratetype,
+				       alloc_bulk ? ALLOW_ISOLATE_FAILURE : 0);
 	if (ret < 0)
 		return ret;
 
@@ -8512,7 +8526,7 @@  static int __alloc_contig_range(unsigned long start, unsigned long end,
 	}
 
 	/* Make sure the range is really isolated. */
-	if (test_pages_isolated(outer_start, end, 0)) {
+	if (!alloc_bulk && test_pages_isolated(outer_start, end, 0)) {
 		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
 			__func__, outer_start, end);
 		ret = -EBUSY;
@@ -8591,7 +8605,7 @@  int alloc_contig_range(unsigned long start, unsigned long end,
 	LIST_HEAD(freepage_list);
 
 	return __alloc_contig_range(start, end, migratetype,
-			gfp_mask, 0, &freepage_list);
+			gfp_mask, 0, &freepage_list, false);
 }
 
 /**
@@ -8628,7 +8642,7 @@  int alloc_pages_bulk(unsigned long start, unsigned long end,
 		return -EINVAL;
 
 	ret = __alloc_contig_range(start, end, migratetype,
-				gfp_mask, order, &freepage_list);
+				gfp_mask, order, &freepage_list, true);
 	if (ret)
 		return ret;
 
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 242c03121d73..6208db89a31b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -154,6 +154,8 @@  __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  *					 and PageOffline() pages.
  *			REPORT_FAILURE - report details about the failure to
  *			isolate the range
+ *			ALLOW_ISOLATE_FAILURE - skip the pageblock of the range
+ *			whenever we fail to set MIGRATE_ISOLATE
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
@@ -190,6 +192,8 @@  int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 		page = __first_valid_page(pfn, pageblock_nr_pages);
 		if (page) {
 			if (set_migratetype_isolate(page, migratetype, flags)) {
+				if (flags & ALLOW_ISOLATE_FAILURE)
+					continue;
 				undo_pfn = pfn;
 				goto undo;
 			}