diff mbox series

[v5,7/9] mm/demotion: Demote pages according to allocation fallback order

Message ID 20220603134237.131362-8-aneesh.kumar@linux.ibm.com (mailing list archive)
State New
Headers show
Series mm/demotion: Memory tiers and demotion | expand

Commit Message

Aneesh Kumar K.V June 3, 2022, 1:42 p.m. UTC
From: Jagdish Gediya <jvgediya@linux.ibm.com>

currently, a higher tier node can only be demoted to selected
nodes on the next lower tier as defined by the demotion path,
not any other node from any lower tier.  This strict, hard-coded
demotion order does not work in all use cases (e.g. some use cases
may want to allow cross-socket demotion to another node in the same
demotion tier as a fallback when the preferred demotion node is out
of space). This demotion order is also inconsistent with the page
allocation fallback order when all the nodes in a higher tier are
out of space: The page allocation can fall back to any node from any
lower tier, whereas the demotion order doesn't allow that currently.

This patch adds support to get all the allowed demotion targets mask
for node, also demote_page_list() function is modified to utilize this
allowed node mask by filling it in migration_target_control structure
before passing it to migrate_pages().

Signed-off-by: Jagdish Gediya <jvgediya@linux.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 include/linux/memory-tiers.h |  5 ++++
 mm/memory-tiers.c            | 49 ++++++++++++++++++++++++++++++++++--
 mm/vmscan.c                  | 38 +++++++++++++---------------
 3 files changed, 70 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 79bd8d26feb2..cd6e71f702ad 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -21,6 +21,7 @@  void node_remove_from_memory_tier(int node);
 int node_get_memory_tier_id(int node);
 int node_set_memory_tier(int node, int tier);
 int node_reset_memory_tier(int node, int tier);
+void node_get_allowed_targets(int node, nodemask_t *targets);
 #else
 #define numa_demotion_enabled	false
 static inline int next_demotion_node(int node)
@@ -28,6 +29,10 @@  static inline int next_demotion_node(int node)
 	return NUMA_NO_NODE;
 }
 
+static inline void node_get_allowed_targets(int node, nodemask_t *targets)
+{
+	*targets = NODE_MASK_NONE;
+}
 #endif	/* CONFIG_TIERED_MEMORY */
 
 #endif
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index b4e72b672d4d..592d939ec28d 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -18,6 +18,7 @@  struct memory_tier {
 
 struct demotion_nodes {
 	nodemask_t preferred;
+	nodemask_t allowed;
 };
 
 #define to_memory_tier(device) container_of(device, struct memory_tier, dev)
@@ -378,6 +379,25 @@  int node_set_memory_tier(int node, int tier)
 }
 EXPORT_SYMBOL_GPL(node_set_memory_tier);
 
+void node_get_allowed_targets(int node, nodemask_t *targets)
+{
+	/*
+	 * node_demotion[] is updated without excluding this
+	 * function from running.
+	 *
+	 * If any node is moving to lower tiers then modifications
+	 * in node_demotion[] are still valid for this node, if any
+	 * node is moving to higher tier then moving node may be
+	 * used once for demotion which should be ok so rcu should
+	 * be enough here.
+	 */
+	rcu_read_lock();
+
+	*targets = node_demotion[node].allowed;
+
+	rcu_read_unlock();
+}
+
 /**
  * next_demotion_node() - Get the next node in the demotion path
  * @node: The starting node to lookup the next node
@@ -437,8 +457,10 @@  static void __disable_all_migrate_targets(void)
 {
 	int node;
 
-	for_each_node_mask(node, node_states[N_MEMORY])
+	for_each_node_mask(node, node_states[N_MEMORY]) {
 		node_demotion[node].preferred = NODE_MASK_NONE;
+		node_demotion[node].allowed = NODE_MASK_NONE;
+	}
 }
 
 static void disable_all_migrate_targets(void)
@@ -465,7 +487,7 @@  static void establish_migration_targets(void)
 	struct demotion_nodes *nd;
 	int target = NUMA_NO_NODE, node;
 	int distance, best_distance;
-	nodemask_t used;
+	nodemask_t used, allowed = NODE_MASK_NONE;
 
 	if (!node_demotion)
 		return;
@@ -511,6 +533,29 @@  static void establish_migration_targets(void)
 			}
 		} while (1);
 	}
+	/*
+	 * Now build the allowed mask for each node collecting node mask from
+	 * all memory tier below it. This allows us to fallback demotion page
+	 * allocation to a set of nodes that is closer the above selected
+	 * perferred node.
+	 */
+	list_for_each_entry(memtier, &memory_tiers, list)
+		nodes_or(allowed, allowed, memtier->nodelist);
+	/*
+	 * Removes nodes not yet in N_MEMORY.
+	 */
+	nodes_and(allowed, node_states[N_MEMORY], allowed);
+
+	list_for_each_entry(memtier, &memory_tiers, list) {
+		/*
+		 * Keep removing current tier from allowed nodes,
+		 * This will remove all nodes in current and above
+		 * memory tier from the allowed mask.
+		 */
+		nodes_andnot(allowed, allowed, memtier->nodelist);
+		for_each_node_mask(node, memtier->nodelist)
+			node_demotion[node].allowed = allowed;
+	}
 }
 
 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3a8f78277f99..d424b7af2f26 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1460,23 +1460,6 @@  static void folio_check_dirty_writeback(struct folio *folio,
 		mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
 }
 
-static struct page *alloc_demote_page(struct page *page, unsigned long node)
-{
-	struct migration_target_control mtc = {
-		/*
-		 * Allocate from 'node', or fail quickly and quietly.
-		 * When this happens, 'page' will likely just be discarded
-		 * instead of migrated.
-		 */
-		.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
-			    __GFP_THISNODE  | __GFP_NOWARN |
-			    __GFP_NOMEMALLOC | GFP_NOWAIT,
-		.nid = node
-	};
-
-	return alloc_migration_target(page, (unsigned long)&mtc);
-}
-
 /*
  * Take pages on @demote_list and attempt to demote them to
  * another node.  Pages which are not demoted are left on
@@ -1487,6 +1470,19 @@  static unsigned int demote_page_list(struct list_head *demote_pages,
 {
 	int target_nid = next_demotion_node(pgdat->node_id);
 	unsigned int nr_succeeded;
+	nodemask_t allowed_mask;
+
+	struct migration_target_control mtc = {
+		/*
+		 * Allocate from 'node', or fail quickly and quietly.
+		 * When this happens, 'page' will likely just be discarded
+		 * instead of migrated.
+		 */
+		.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
+			__GFP_NOMEMALLOC | GFP_NOWAIT,
+		.nid = target_nid,
+		.nmask = &allowed_mask
+	};
 
 	if (list_empty(demote_pages))
 		return 0;
@@ -1494,10 +1490,12 @@  static unsigned int demote_page_list(struct list_head *demote_pages,
 	if (target_nid == NUMA_NO_NODE)
 		return 0;
 
+	node_get_allowed_targets(pgdat->node_id, &allowed_mask);
+
 	/* Demotion ignores all cpuset and mempolicy settings */
-	migrate_pages(demote_pages, alloc_demote_page, NULL,
-			    target_nid, MIGRATE_ASYNC, MR_DEMOTION,
-			    &nr_succeeded);
+	migrate_pages(demote_pages, alloc_migration_target, NULL,
+		      (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
+		      &nr_succeeded);
 
 	if (current_is_kswapd())
 		__count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);