@@ -1295,6 +1295,8 @@ static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
#endif /* CONFIG_SHRINKER_DEBUG */
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+extern atomic_t migrc_pause_cnt;
+
/*
* Reset the indicator indicating there are no writable mappings at the
* beginning of every rmap traverse for unmap. Migrc can work only when
@@ -1323,6 +1325,21 @@ static inline bool can_migrc_test(void)
return current->can_migrc && current->tlb_ubc_ro.flush_required;
}
+static inline void migrc_pause(void)
+{
+ atomic_inc(&migrc_pause_cnt);
+}
+
+static inline void migrc_resume(void)
+{
+ atomic_dec(&migrc_pause_cnt);
+}
+
+static inline bool migrc_paused(void)
+{
+ return !!atomic_read(&migrc_pause_cnt);
+}
+
/*
* Return the number of folios pending TLB flush that have yet to get
* freed in the zone.
@@ -1340,6 +1357,9 @@ bool migrc_flush_free_folios(void);
static inline void can_migrc_init(void) {}
static inline void can_migrc_fail(void) {}
static inline bool can_migrc_test(void) { return false; }
+static inline void migrc_pause(void) {}
+static inline void migrc_resume(void) {}
+static inline bool migrc_paused(void) { return false; }
static inline int migrc_pending_nr_in_zone(struct zone *z) { return 0; }
static inline bool migrc_flush_free_folios(void) { return false; }
#endif
@@ -62,6 +62,12 @@ static struct tlbflush_unmap_batch migrc_ubc;
static LIST_HEAD(migrc_folios);
static DEFINE_SPINLOCK(migrc_lock);
+/*
+ * Increase on entry of handling high memory pressure e.g. direct
+ * reclaim, decrease on the exit. See __alloc_pages_slowpath().
+ */
+atomic_t migrc_pause_cnt = ATOMIC_INIT(0);
+
/*
* Need to synchronize between TLB flush and managing pending CPUs in
* migrc_ubc. Take a look at the following scenario:
@@ -1892,6 +1898,7 @@ static int migrate_pages_batch(struct list_head *from,
*/
init_tlb_ubc(&pending_ubc);
do_migrc = (reason == MR_DEMOTION || reason == MR_NUMA_MISPLACED);
+ do_migrc = do_migrc && !migrc_paused();
for (pass = 0; pass < nr_pass && retry; pass++) {
retry = 0;
@@ -1930,6 +1937,15 @@ static int migrate_pages_batch(struct list_head *from,
continue;
}
+ /*
+ * In case that the system is in high memory
+ * pressure, give up migrc mechanism this turn.
+ */
+ if (unlikely(do_migrc && migrc_paused())) {
+ fold_ubc(tlb_ubc, &pending_ubc);
+ do_migrc = false;
+ }
+
can_migrc_init();
rc = migrate_folio_unmap(get_new_folio, put_new_folio,
private, folio, &dst, mode, reason,
@@ -4072,6 +4072,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int cpuset_mems_cookie;
unsigned int zonelist_iter_cookie;
int reserve_flags;
+ bool migrc_paused = false;
restart:
compaction_retries = 0;
@@ -4203,6 +4204,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (page)
goto got_pg;
+ /*
+ * The system is in very high memory pressure. Pause migrc from
+ * expanding its pending queue temporarily.
+ */
+ if (!migrc_paused) {
+ migrc_pause();
+ migrc_paused = true;
+ migrc_flush_free_folios();
+ }
+
/* Caller is not willing to reclaim, we can't balance anything */
if (!can_direct_reclaim)
goto nopage;
@@ -4330,6 +4341,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:
+ if (migrc_paused)
+ migrc_resume();
return page;
}
Regression was observed when the system is in high memory pressure with swap on, where migrc might keep a number of folios in its pending queue, which possibly makes it worse. So temporarily prevented migrc from working on that condition. Signed-off-by: Byungchul Park <byungchul@sk.com> --- mm/internal.h | 20 ++++++++++++++++++++ mm/migrate.c | 16 ++++++++++++++++ mm/page_alloc.c | 13 +++++++++++++ 3 files changed, 49 insertions(+)