diff mbox series

[RFC,2/2] mm/demotion: Expose memory tier details via sysfs

Message ID 20220825092325.381517-2-aneesh.kumar@linux.ibm.com (mailing list archive)
State New
Headers show
Series [RFC,1/2] mm/demotion: Expose memory type details via sysfs | expand

Commit Message

Aneesh Kumar K.V Aug. 25, 2022, 9:23 a.m. UTC
All allocated memory tiers will be listed as
/sys/devices/virtual/memtier/memtierN/

Each memtier directory contains symbolic link for the memory types
that are part of the memory tier. A directory hierarchy looks like

:/sys/devices/virtual/memtier# tree memtier512/
memtier512/
├── memtype1 -> ../memtype1
├── memtype2 -> ../memtype2
├── subsystem -> ../../../../bus/memtier
└── uevent

The nodes which are part of a specific memory type can be listed via
/sys/devices/system/memtier/memtypeN/nodes.

The adistance value of a specific memory type can be listed via
/sys/devices/system/memtier/memtypeN/adistance.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 mm/memory-tiers.c | 62 +++++++++++++++++++++++++++++++++++------------
 1 file changed, 47 insertions(+), 15 deletions(-)

Comments

Huang, Ying Aug. 26, 2022, 4:31 a.m. UTC | #1
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:

> All allocated memory tiers will be listed as
> /sys/devices/virtual/memtier/memtierN/
>
> Each memtier directory contains symbolic link for the memory types
> that are part of the memory tier. A directory hierarchy looks like
>
> :/sys/devices/virtual/memtier# tree memtier512/
> memtier512/

So you suggest to use abstract_distance_start as memory tier ID?  That
will make memory tier ID stable unless we change abstract distance chunk
size or abstract distance division points.  That is, we have at least 2
choices here

1. memory_tier0, memory_tier1, memory_tier2, ...

The ID will start from 0.  This is easy to understand by users.  The
main drawback is that the memory tier ID may be changed when a NUMA node
is onlined/offlined.  That is, the memory tier ID is relatively
unstable.

2. memory_tier<abstract_distance_start1>, memory_tier<abstract_distance_start2>, ...

The ID will be discontinuous. So it's not as intuitive as 0,1,2,....
The main advantage is that the memory tier ID will not change when a
NUMA node is onlined/offlined.  The ID will be changed only when we
change abstract distance chunk size or abstract distance division
points.  That is considered relatively seldom.

Personally, I prefer the 2nd choice too.  But I want to collect opinions
from other people too.

> ├── memtype1 -> ../memtype1
> ├── memtype2 -> ../memtype2

I think abstract_distance_start and abstract_distance_end is the key
information of a memory tier too.  So we should show them here.

> ├── subsystem -> ../../../../bus/memtier
> └── uevent
>
> The nodes which are part of a specific memory type can be listed via
> /sys/devices/system/memtier/memtypeN/nodes.
>
> The adistance value of a specific memory type can be listed via
> /sys/devices/system/memtier/memtypeN/adistance.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

Best Regards,
Huang, Ying

[snip]
diff mbox series

Patch

diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 9eef3bd8d134..4005c3124ff0 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -20,6 +20,7 @@  struct memory_tier {
 	 * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE
 	 */
 	int adistance_start;
+	struct device dev;
 	/* All the nodes that are part of all the lower memory tiers. */
 	nodemask_t lower_tier_mask;
 };
@@ -36,6 +37,7 @@  static struct memory_dev_type *default_dram_type;
 #define MAX_MEMORY_TYPE_ID	20
 static DEFINE_IDR(memory_type_idr);
 #define to_memory_type(device) container_of(device, struct memory_dev_type, dev)
+#define to_memory_tier(device) container_of(device, struct memory_tier, dev)
 static struct bus_type memory_tier_subsys = {
 	.name = "memtier",
 	.dev_name = "memtier",
@@ -103,8 +105,25 @@  static int top_tier_adistance;
 static struct demotion_nodes *node_demotion __read_mostly;
 #endif /* CONFIG_MIGRATION */
 
+static void memory_tier_device_release(struct device *dev)
+{
+	struct memory_tier *tier = to_memory_tier(dev);
+	/*
+	 * synchronize_rcu in clear_node_memory_tier makes sure
+	 * we don't have rcu access to this memory tier.
+	 */
+	kfree(tier);
+}
+
+static void destroy_memory_tier(struct memory_tier *memtier)
+{
+	list_del(&memtier->list);
+	device_unregister(&memtier->dev);
+}
+
 static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memtype)
 {
+	int ret;
 	bool found_slot = false;
 	struct memory_tier *memtier, *new_memtier;
 	int adistance = memtype->adistance;
@@ -128,15 +147,14 @@  static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
 
 	list_for_each_entry(memtier, &memory_tiers, list) {
 		if (adistance == memtier->adistance_start) {
-			list_add(&memtype->tier_sibiling, &memtier->memory_types);
-			return memtier;
+			goto link_memtype;
 		} else if (adistance < memtier->adistance_start) {
 			found_slot = true;
 			break;
 		}
 	}
 
-	new_memtier = kmalloc(sizeof(struct memory_tier), GFP_KERNEL);
+	new_memtier = kzalloc(sizeof(struct memory_tier), GFP_KERNEL);
 	if (!new_memtier)
 		return ERR_PTR(-ENOMEM);
 
@@ -147,8 +165,30 @@  static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
 		list_add_tail(&new_memtier->list, &memtier->list);
 	else
 		list_add_tail(&new_memtier->list, &memory_tiers);
-	list_add(&memtype->tier_sibiling, &new_memtier->memory_types);
-	return new_memtier;
+
+	new_memtier->dev.id = adistance;
+	new_memtier->dev.bus = &memory_tier_subsys;
+	new_memtier->dev.release = memory_tier_device_release;
+
+	ret = device_register(&new_memtier->dev);
+	if (ret) {
+		list_del(&memtier->list);
+		put_device(&memtier->dev);
+		return ERR_PTR(ret);
+	}
+	memtier = new_memtier;
+
+link_memtype:
+	list_add(&memtype->tier_sibiling, &memtier->memory_types);
+	/*
+	 * ignore error below because the driver creating the device can get
+	 * unloaded and hence the below sysfs create link can fail. We continue
+	 * with the in memory representation.
+	 */
+	ret = sysfs_create_link(&memtier->dev.kobj,
+				&memtype->dev.kobj, kobject_name(&memtype->dev.kobj));
+
+	return memtier;
 }
 
 static struct memory_tier *__node_get_memory_tier(int node)
@@ -424,16 +464,6 @@  static struct memory_tier *set_node_memory_tier(int node)
 	return memtier;
 }
 
-static void destroy_memory_tier(struct memory_tier *memtier)
-{
-	list_del(&memtier->list);
-	/*
-	 * synchronize_rcu in clear_node_memory_tier makes sure
-	 * we don't have rcu access to this memory tier.
-	 */
-	kfree(memtier);
-}
-
 static bool clear_node_memory_tier(int node)
 {
 	bool cleared = false;
@@ -462,6 +492,8 @@  static bool clear_node_memory_tier(int node)
 		node_clear(node, memtype->nodes);
 		if (nodes_empty(memtype->nodes)) {
 			list_del_init(&memtype->tier_sibiling);
+			sysfs_delete_link(&memtier->dev.kobj,
+					  &memtype->dev.kobj, kobject_name(&memtype->dev.kobj));
 			if (list_empty(&memtier->memory_types))
 				destroy_memory_tier(memtier);
 		}