diff mbox series

[4/5] mm: compaction: remove unnecessary is_via_compact_memory() checks

Message ID 20230519123959.77335-5-hannes@cmpxchg.org (mailing list archive)
State New
Headers show
Series mm: compaction: cleanups & simplifications | expand

Commit Message

Johannes Weiner May 19, 2023, 12:39 p.m. UTC
Remove from all paths not reachable via /proc/sys/vm/compact_memory.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/compaction.c | 11 +----------
 mm/vmscan.c     |  8 +-------
 2 files changed, 2 insertions(+), 17 deletions(-)

Comments

Vlastimil Babka May 29, 2023, 5:12 p.m. UTC | #1
On 5/19/23 14:39, Johannes Weiner wrote:
> Remove from all paths not reachable via /proc/sys/vm/compact_memory.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>
diff mbox series

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index 8f61cfa87c49..83ecbc829c62 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2293,9 +2293,6 @@  bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
 		unsigned long available;
 		unsigned long watermark;
 
-		if (is_via_compact_memory(order))
-			return true;
-
 		/* Allocation can already succeed, nothing to do */
 		watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
 		if (zone_watermark_ok(zone, order, watermark,
@@ -2850,9 +2847,6 @@  static bool kcompactd_node_suitable(pg_data_t *pgdat)
 		if (!populated_zone(zone))
 			continue;
 
-		if (is_via_compact_memory(pgdat->kcompactd_max_order))
-			return true;
-
 		/* Allocation can already succeed, check other zones */
 		if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
 				      min_wmark_pages(zone),
@@ -2897,9 +2891,6 @@  static void kcompactd_do_work(pg_data_t *pgdat)
 		if (compaction_deferred(zone, cc.order))
 			continue;
 
-		if (is_via_compact_memory(cc.order))
-			goto compact;
-
 		/* Allocation can already succeed, nothing to do */
 		if (zone_watermark_ok(zone, cc.order,
 				      min_wmark_pages(zone), zoneid, 0))
@@ -2908,7 +2899,7 @@  static void kcompactd_do_work(pg_data_t *pgdat)
 		if (compaction_suitable(zone, cc.order,
 					zoneid) != COMPACT_CONTINUE)
 			continue;
-compact:
+
 		if (kthread_should_stop())
 			return;
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c9c0f3e081f5..c0cfa9b86b48 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6397,9 +6397,6 @@  static inline bool should_continue_reclaim(struct pglist_data *pgdat,
 		if (!managed_zone(zone))
 			continue;
 
-		if (sc->order == -1) /* is_via_compact_memory() */
-			return false;
-
 		/* Allocation can already succeed, nothing to do */
 		if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
 				      sc->reclaim_idx, 0))
@@ -6596,9 +6593,6 @@  static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 {
 	unsigned long watermark;
 
-	if (sc->order == -1) /* is_via_compact_memory() */
-		goto suitable;
-
 	/* Allocation can already succeed, nothing to do */
 	if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
 			      sc->reclaim_idx, 0))
@@ -6608,7 +6602,7 @@  static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 	if (compaction_suitable(zone, sc->order,
 				sc->reclaim_idx) == COMPACT_SKIPPED)
 		return false;
-suitable:
+
 	/*
 	 * Compaction is already possible, but it takes time to run and there
 	 * are potentially other callers using the pages just freed. So proceed