diff mbox series

[RFC,16/26] mm: compaction: improve compaction_suitable() accuracy

Message ID 20230418191313.268131-17-hannes@cmpxchg.org (mailing list archive)
State New
Headers show
Series mm: reliable huge page allocator | expand

Commit Message

Johannes Weiner April 18, 2023, 7:13 p.m. UTC
With the new per-mt free counts, compaction can check the watermarks
specifically against suitable migration targets. This ensures reclaim
keeps going when the free pages are in blocks that aren't actually
suitable migration targets: MIGRATE_FREE, UNMOVABLE, RECLAIMABLE.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/compaction.c | 23 +++++++++++++++--------
 mm/vmscan.c     |  7 +++++--
 2 files changed, 20 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index b9eed0d43403..f637b4ed7f3c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2226,11 +2226,17 @@  enum compact_result compaction_suitable(struct zone *zone, int order,
 					unsigned int alloc_flags,
 					int highest_zoneidx)
 {
+	unsigned long free_pages;
 	enum compact_result ret;
 	int fragindex;
 
-	ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
-				    zone_page_state(zone, NR_FREE_PAGES));
+	/* Suitable migration targets */
+	free_pages = zone_page_state(zone, NR_FREE_MOVABLE);
+	free_pages += zone_page_state(zone, NR_FREE_CMA_PAGES);
+
+	ret = __compaction_suitable(zone, order, alloc_flags,
+				    highest_zoneidx, free_pages);
+
 	/*
 	 * fragmentation index determines if allocation failures are due to
 	 * low memory or external fragmentation
@@ -2273,19 +2279,20 @@  bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
 				ac->highest_zoneidx, ac->nodemask) {
 		unsigned long available;
-		enum compact_result compact_result;
 
+		available = zone_page_state_snapshot(zone, NR_FREE_MOVABLE);
+		available += zone_page_state_snapshot(zone, NR_FREE_CMA_PAGES);
 		/*
 		 * Do not consider all the reclaimable memory because we do not
 		 * want to trash just for a single high order allocation which
 		 * is even not guaranteed to appear even if __compaction_suitable
 		 * is happy about the watermark check.
 		 */
-		available = zone_reclaimable_pages(zone) / order;
-		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
-		compact_result = __compaction_suitable(zone, order, alloc_flags,
-				ac->highest_zoneidx, available);
-		if (compact_result == COMPACT_CONTINUE)
+		available += zone_reclaimable_pages(zone) / order;
+
+		if (__compaction_suitable(zone, order, alloc_flags,
+					  ac->highest_zoneidx,
+					  available) == COMPACT_CONTINUE)
 			return true;
 	}
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5b7b8d4f5297..9ecf29f4dab8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6270,6 +6270,7 @@  static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 {
 	unsigned long watermark;
+	unsigned long free_pages;
 	enum compact_result suitable;
 
 	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
@@ -6290,8 +6291,10 @@  static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 	 * we are already above the high+gap watermark, don't reclaim at all.
 	 */
 	watermark = high_wmark_pages(zone) + compact_gap(sc->order);
-
-	return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
+	free_pages = zone_page_state_snapshot(zone, NR_FREE_MOVABLE);
+	free_pages += zone_page_state_snapshot(zone, NR_FREE_CMA_PAGES);
+	return __zone_watermark_ok(zone, 0, watermark, sc->reclaim_idx,
+				   ALLOC_CMA, free_pages);
 }
 
 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)