diff mbox series

[RFC,5/6] mm, thp: add some statistics for zero subpages reclaim

Message ID 1635422215-99394-6-git-send-email-ningzhang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series Reclaim zero subpages of thp to avoid memory bloat | expand

Commit Message

Ning Zhang Oct. 28, 2021, 11:56 a.m. UTC
queue_length show the numbers of huge pages in the queue.
split_hpage shows the numbers of huge pages split by thp reclaim.
split_failed shows the numbers of huge pages split failed
reclaim_subpage shows the numbers of zero subpages reclaimed by
thp reclaim.

Signed-off-by: Ning Zhang <ningzhang@linux.alibaba.com>
---
 include/linux/huge_mm.h |  3 ++-
 include/linux/mmzone.h  |  3 +++
 mm/huge_memory.c        |  8 ++++++--
 mm/memcontrol.c         | 47 +++++++++++++++++++++++++++++++++++++++++++++++
 mm/vmscan.c             |  2 +-
 5 files changed, 59 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f792433..5d4a038 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -189,7 +189,8 @@  unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
 extern int global_thp_reclaim;
 int zsr_get_hpage(struct hpage_reclaim *hr_queue, struct page **reclaim_page,
 		  int threshold);
-unsigned long zsr_reclaim_hpage(struct lruvec *lruvec, struct page *page);
+unsigned long zsr_reclaim_hpage(struct hpage_reclaim *hr_queue,
+				struct lruvec *lruvec, struct page *page);
 void zsr_reclaim_memcg(struct mem_cgroup *memcg);
 static inline struct list_head *hpage_reclaim_list(struct page *page)
 {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 222cd4f..6ce6890 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -792,6 +792,9 @@  struct hpage_reclaim {
 	spinlock_t reclaim_queue_lock;
 	struct list_head reclaim_queue;
 	unsigned long reclaim_queue_len;
+	atomic_long_t split_hpage;
+	atomic_long_t split_failed;
+	atomic_long_t reclaim_subpage;
 };
 #endif
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 633fd0f..5e737d0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3506,7 +3506,8 @@  int zsr_get_hpage(struct hpage_reclaim *hr_queue, struct page **reclaim_page,
 
 }
 
-unsigned long zsr_reclaim_hpage(struct lruvec *lruvec, struct page *page)
+unsigned long zsr_reclaim_hpage(struct hpage_reclaim *hr_queue,
+				struct lruvec *lruvec, struct page *page)
 {
 	struct pglist_data *pgdat = page_pgdat(page);
 	unsigned long reclaimed;
@@ -3523,12 +3524,15 @@  unsigned long zsr_reclaim_hpage(struct lruvec *lruvec, struct page *page)
 		putback_lru_page(page);
 		mod_node_page_state(pgdat, NR_ISOLATED_ANON,
 				    -HPAGE_PMD_NR);
+		atomic_long_inc(&hr_queue->split_failed);
 		return 0;
 	}
 
 	unlock_page(page);
 	list_add_tail(&page->lru, &split_list);
 	reclaimed = reclaim_zero_subpages(&split_list, &keep_list);
+	atomic_long_inc(&hr_queue->split_hpage);
+	atomic_long_add(reclaimed, &hr_queue->reclaim_subpage);
 
 	spin_lock_irqsave(&lruvec->lru_lock, flags);
 	move_pages_to_lru(lruvec, &keep_list);
@@ -3564,7 +3568,7 @@  void zsr_reclaim_memcg(struct mem_cgroup *memcg)
 			if (!page)
 				continue;
 
-			zsr_reclaim_hpage(lruvec, page);
+			zsr_reclaim_hpage(hr_queue, lruvec, page);
 
 			cond_resched();
 		}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a8e3ca1..f8016ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4580,6 +4580,49 @@  static ssize_t memcg_thp_reclaim_ctrl_write(struct kernfs_open_file *of,
 
 	return nbytes;
 }
+
+static int memcg_thp_reclaim_stat_show(struct seq_file *m, void *v)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+	struct mem_cgroup_per_node *mz;
+	int nid;
+	unsigned long len;
+
+	seq_puts(m, "queue_length\t");
+	for_each_node(nid) {
+		mz = memcg->nodeinfo[nid];
+		len = READ_ONCE(mz->hpage_reclaim_queue.reclaim_queue_len);
+		seq_printf(m, "%-24lu", len);
+	}
+
+	seq_puts(m, "\n");
+	seq_puts(m, "split_hpage\t");
+	for_each_node(nid) {
+		mz = memcg->nodeinfo[nid];
+		len = atomic_long_read(&mz->hpage_reclaim_queue.split_hpage);
+		seq_printf(m, "%-24lu", len);
+	}
+
+	seq_puts(m, "\n");
+	seq_puts(m, "split_failed\t");
+	for_each_node(nid) {
+		mz = memcg->nodeinfo[nid];
+		len = atomic_long_read(&mz->hpage_reclaim_queue.split_failed);
+		seq_printf(m, "%-24lu", len);
+	}
+
+	seq_puts(m, "\n");
+	seq_puts(m, "reclaim_subpage\t");
+	for_each_node(nid) {
+		mz = memcg->nodeinfo[nid];
+		len = atomic_long_read(&mz->hpage_reclaim_queue.reclaim_subpage);
+		seq_printf(m, "%-24lu", len);
+	}
+
+	seq_puts(m, "\n");
+
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_CGROUP_WRITEBACK
@@ -5155,6 +5198,10 @@  static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
 		.seq_show = memcg_thp_reclaim_ctrl_show,
 		.write = memcg_thp_reclaim_ctrl_write,
 	},
+	{
+		.name = "thp_reclaim_stat",
+		.seq_show = memcg_thp_reclaim_stat_show,
+	},
 #endif
 	{ },	/* terminate */
 };
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fcc80a6..cb5f53d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2818,7 +2818,7 @@  static unsigned long reclaim_hpage_zero_subpages(struct lruvec *lruvec,
 		if (!page)
 			continue;
 
-		nr_reclaimed += zsr_reclaim_hpage(lruvec, page);
+		nr_reclaimed += zsr_reclaim_hpage(hr_queue, lruvec, page);
 
 		cond_resched();