@@ -39,6 +39,7 @@ int node_reset_memory_tier(int node, int tier);
struct memory_tier *node_get_memory_tier(int node);
void node_put_memory_tier(struct memory_tier *memtier);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
#else
@@ -52,6 +53,11 @@ static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *target
{
*targets = NODE_MASK_NONE;
}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
#endif /* CONFIG_TIERED_MEMORY */
#endif
@@ -185,9 +185,4 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
#define to_node(device) container_of(device, struct node, dev)
-static inline bool node_is_toptier(int node)
-{
- return node_state(node, N_CPU);
-}
-
#endif /* _LINUX_NODE_H_ */
@@ -35,6 +35,7 @@
#include <linux/numa.h>
#include <linux/page_owner.h>
#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -17,7 +17,7 @@ struct demotion_nodes {
static void establish_migration_targets(void);
static DEFINE_MUTEX(memory_tier_lock);
static LIST_HEAD(memory_tiers);
-
+static int top_tier_rank;
/*
* node_demotion[] examples:
*
@@ -126,7 +126,7 @@ static void memory_tier_device_release(struct device *dev)
if (tier->dev.id >= MAX_STATIC_MEMORY_TIERS)
ida_free(&memtier_dev_id, tier->dev.id);
- kfree(tier);
+ kfree_rcu(tier);
}
/*
@@ -443,6 +443,31 @@ void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
rcu_read_unlock();
}
+bool node_is_toptier(int node)
+{
+ bool toptier;
+ pg_data_t *pgdat;
+ struct memory_tier *memtier;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return false;
+
+ rcu_read_lock();
+ memtier = rcu_dereference(pgdat->memtier);
+ if (!memtier) {
+ toptier = true;
+ goto out;
+ }
+ if (memtier->rank >= top_tier_rank)
+ toptier = true;
+ else
+ toptier = false;
+out:
+ rcu_read_unlock();
+ return toptier;
+}
+
/**
* next_demotion_node() - Get the next node in the demotion path
* @node: The starting node to lookup the next node
@@ -592,6 +617,21 @@ static void establish_migration_targets(void)
} while (1);
}
build_lower_tier_mask:
+ /*
+ * Promotion is allowed from a memory tier to higher
+ * memory tier only if the memory tier doesn't include
+ * compute. We want to skip promotion from a memory tier,
+ * if any node that is part of the memory tier have CPUs.
+ * Once we detect such a memory tier, we consider that tier
+ * as top tiper from which promotion is not allowed.
+ */
+ list_for_each_entry_reverse(memtier, &memory_tiers, list) {
+ nodes_and(used, node_states[N_CPU], memtier->nodelist);
+ if (!nodes_empty(used)) {
+ top_tier_rank = memtier->rank;
+ break;
+ }
+ }
/*
* Now build the lower_tier mask for each node collecting node mask from
* all memory tier below it. This allows us to fallback demotion page
@@ -50,6 +50,7 @@
#include <linux/memory.h>
#include <linux/random.h>
#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>
#include <asm/tlbflush.h>
@@ -31,6 +31,7 @@
#include <linux/pgtable.h>
#include <linux/sched/sysctl.h>
#include <linux/userfaultfd_k.h>
+#include <linux/memory-tiers.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
With memory tiers support we can have memory only NUMA nodes in the top tier from which we want to avoid promotion tracking NUMA faults. Update node_is_toptier to work with memory tiers. All NUMA nodes are by default top tier nodes. With lower memory tiers added we consider all memory tiers above a memory tier having CPU NUMA nodes as top memory tier Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- include/linux/memory-tiers.h | 6 +++++ include/linux/node.h | 5 ---- mm/huge_memory.c | 1 + mm/memory-tiers.c | 44 ++++++++++++++++++++++++++++++++++-- mm/migrate.c | 1 + mm/mprotect.c | 1 + 6 files changed, 51 insertions(+), 7 deletions(-)