diff mbox series

[RFC,1/4] Add get_cgroup_local_usage for estimating the top-tier memory usage

Message ID 20240920221202.1734227-2-kaiyang2@cs.cmu.edu (mailing list archive)
State New
Headers show
Series memory tiering fairness by per-cgroup control of promotion and demotion | expand

Commit Message

Kaiyang Zhao Sept. 20, 2024, 10:11 p.m. UTC
From: Kaiyang Zhao <kaiyang2@cs.cmu.edu>

Approximate the usage of top-tier memory of a cgroup by its anon,
file, shmem and slab sizes in the top-tier.

Signed-off-by: Kaiyang Zhao <kaiyang2@cs.cmu.edu>
---
 include/linux/memcontrol.h |  2 ++
 mm/memcontrol.c            | 24 ++++++++++++++++++++++++
 2 files changed, 26 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 34d2da05f2f1..94aba4498fca 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -648,6 +648,8 @@  static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
 		memcg == target;
 }
 
+unsigned long get_cgroup_local_usage(struct mem_cgroup *memcg, bool flush);
+
 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
 					struct mem_cgroup *memcg)
 {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f19a58c252f0..20b715441332 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -855,6 +855,30 @@  unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 	return READ_ONCE(memcg->vmstats->events_local[i]);
 }
 
+/* Usage is in pages. */
+unsigned long get_cgroup_local_usage(struct mem_cgroup *memcg, bool flush)
+{
+	struct lruvec *lruvec;
+	const int local_nid = 0;
+
+	if (!memcg)
+		return 0;
+
+	if (flush)
+		mem_cgroup_flush_stats_ratelimited(memcg);
+
+	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(local_nid));
+	unsigned long anon = lruvec_page_state(lruvec, NR_ANON_MAPPED);
+	unsigned long file = lruvec_page_state(lruvec, NR_FILE_PAGES);
+	unsigned long shmem = lruvec_page_state(lruvec, NR_SHMEM);
+	/* Slab size are in bytes */
+	unsigned long slab =
+		lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE_B) / PAGE_SIZE
+		+ lruvec_page_state(lruvec, NR_SLAB_UNRECLAIMABLE_B) / PAGE_SIZE;
+
+	return anon + file + shmem + slab;
+}
+
 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
 	/*