diff mbox series

[RFC,4/8] mm/vmscan: add page demotion counter

Message ID 20200629234510.1BF23254@viggo.jf.intel.com (mailing list archive)
State New, archived
Headers show
Series Migrate Pages in lieu of discard | expand

Commit Message

Dave Hansen June 29, 2020, 11:45 p.m. UTC
From: Yang Shi <yang.shi@linux.alibaba.com>

Account the number of demoted pages into reclaim_state->nr_demoted.

Add pgdemote_kswapd and pgdemote_direct VM counters showed in
/proc/vmstat.

[ daveh:
   - __count_vm_events() a bit, and made them look at the THP
     size directly rather than getting data from migrate_pages()
]

Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
---

 b/include/linux/vm_event_item.h |    2 ++
 b/mm/migrate.c                  |   13 ++++++++++++-
 b/mm/vmscan.c                   |    1 +
 b/mm/vmstat.c                   |    2 ++
 4 files changed, 17 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff -puN include/linux/vm_event_item.h~mm-vmscan-add-page-demotion-counter include/linux/vm_event_item.h
--- a/include/linux/vm_event_item.h~mm-vmscan-add-page-demotion-counter	2020-06-29 16:34:40.332312601 -0700
+++ b/include/linux/vm_event_item.h	2020-06-29 16:34:40.342312601 -0700
@@ -32,6 +32,8 @@  enum vm_event_item { PGPGIN, PGPGOUT, PS
 		PGREFILL,
 		PGSTEAL_KSWAPD,
 		PGSTEAL_DIRECT,
+		PGDEMOTE_KSWAPD,
+		PGDEMOTE_DIRECT,
 		PGSCAN_KSWAPD,
 		PGSCAN_DIRECT,
 		PGSCAN_DIRECT_THROTTLE,
diff -puN mm/migrate.c~mm-vmscan-add-page-demotion-counter mm/migrate.c
--- a/mm/migrate.c~mm-vmscan-add-page-demotion-counter	2020-06-29 16:34:40.334312601 -0700
+++ b/mm/migrate.c	2020-06-29 16:34:40.343312601 -0700
@@ -1187,6 +1187,7 @@  static struct page *alloc_demote_node_pa
 int migrate_demote_mapping(struct page *page)
 {
 	int next_nid = next_demotion_node(page_to_nid(page));
+	int ret;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_PAGE(PageHuge(page), page);
@@ -1198,8 +1199,18 @@  int migrate_demote_mapping(struct page *
 		return -ENOMEM;
 
 	/* MIGRATE_ASYNC is the most light weight and never blocks.*/
-	return __unmap_and_move(alloc_demote_node_page, NULL, next_nid,
+	ret = __unmap_and_move(alloc_demote_node_page, NULL, next_nid,
 				page, MIGRATE_ASYNC, MR_DEMOTION);
+
+	if (ret == MIGRATEPAGE_SUCCESS) {
+		int nr_demoted = hpage_nr_pages(page);
+		if (current_is_kswapd())
+			__count_vm_events(PGDEMOTE_KSWAPD, nr_demoted);
+		else
+			__count_vm_events(PGDEMOTE_DIRECT, nr_demoted);
+	}
+
+	return ret;
 }
 
 
diff -puN mm/vmscan.c~mm-vmscan-add-page-demotion-counter mm/vmscan.c
--- a/mm/vmscan.c~mm-vmscan-add-page-demotion-counter	2020-06-29 16:34:40.336312601 -0700
+++ b/mm/vmscan.c	2020-06-29 16:34:40.344312601 -0700
@@ -140,6 +140,7 @@  struct scan_control {
 		unsigned int immediate;
 		unsigned int file_taken;
 		unsigned int taken;
+		unsigned int demoted;
 	} nr;
 
 	/* for recording the reclaimed slab by now */
diff -puN mm/vmstat.c~mm-vmscan-add-page-demotion-counter mm/vmstat.c
--- a/mm/vmstat.c~mm-vmscan-add-page-demotion-counter	2020-06-29 16:34:40.339312601 -0700
+++ b/mm/vmstat.c	2020-06-29 16:34:40.345312601 -0700
@@ -1198,6 +1198,8 @@  const char * const vmstat_text[] = {
 	"pgrefill",
 	"pgsteal_kswapd",
 	"pgsteal_direct",
+	"pgdemote_kswapd",
+	"pgdemote_direct",
 	"pgscan_kswapd",
 	"pgscan_direct",
 	"pgscan_direct_throttle",