diff mbox series

[RFC,4/4] mm/zswap: count successful large folio zswap loads

Message ID 20241018105026.2521366-5-usamaarif642@gmail.com (mailing list archive)
State New
Headers show
Series [RFC,1/4] mm/zswap: skip swapcache for swapping in zswap pages | expand

Commit Message

Usama Arif Oct. 18, 2024, 10:48 a.m. UTC
Added a new MTHP_STAT_ZSWPIN entry to the sysfs transparent_hugepage
stats so that successful large folio zswap stores can be accounted under
the per-order sysfs zswpin stats:

/sys/kernel/mm/transparent_hugepage/hugepages-*kB/stats/zswpin

Signed-off-by: Usama Arif <usamaarif642@gmail.com>
---
 Documentation/admin-guide/mm/transhuge.rst | 3 +++
 include/linux/huge_mm.h                    | 1 +
 mm/huge_memory.c                           | 3 +++
 mm/page_io.c                               | 1 +
 4 files changed, 8 insertions(+)
diff mbox series

Patch

diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 2a171ed5206e..68a9790908b2 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -534,6 +534,9 @@  zswpout
 	is incremented every time a huge page is swapped out to zswap in one
 	piece without splitting.
 
+zswpin
+	is incremented every time a huge page is swapped in from zswap.
+
 swpout
 	is incremented every time a huge page is swapped out to a non-zswap
 	swap device in one piece without splitting.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3eca60f3d512..28a275d3107a 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -120,6 +120,7 @@  enum mthp_stat_item {
 	MTHP_STAT_ANON_FAULT_FALLBACK,
 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
 	MTHP_STAT_ZSWPOUT,
+	MTHP_STAT_ZSWPIN,
 	MTHP_STAT_SWPOUT,
 	MTHP_STAT_SWPOUT_FALLBACK,
 	MTHP_STAT_SHMEM_ALLOC,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a7b05f4c2a5e..587f7dd81500 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -612,6 +612,7 @@  DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
+DEFINE_MTHP_STAT_ATTR(zswpin, MTHP_STAT_ZSWPIN);
 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
 #ifdef CONFIG_SHMEM
@@ -631,6 +632,7 @@  static struct attribute *anon_stats_attrs[] = {
 	&anon_fault_fallback_charge_attr.attr,
 #ifndef CONFIG_SHMEM
 	&zswpout_attr.attr,
+	&zswpin_attr.attr,
 	&swpout_attr.attr,
 	&swpout_fallback_attr.attr,
 #endif
@@ -662,6 +664,7 @@  static struct attribute_group file_stats_attr_grp = {
 static struct attribute *any_stats_attrs[] = {
 #ifdef CONFIG_SHMEM
 	&zswpout_attr.attr,
+	&zswpin_attr.attr,
 	&swpout_attr.attr,
 	&swpout_fallback_attr.attr,
 #endif
diff --git a/mm/page_io.c b/mm/page_io.c
index 2a15b197968a..477f9d4fc009 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -620,6 +620,7 @@  void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
 		folio_unlock(folio);
 		goto finish;
 	} else if (zswap_load(folio)) {
+		count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPIN);
 		folio_unlock(folio);
 		goto finish;
 	}