diff mbox series

[RFC,6/7] pass gfp mask of the allocation that waked kswapd to track number of pages scanned on behalf of each alloc type

Message ID 20240320024218.203491-7-kaiyang2@cs.cmu.edu (mailing list archive)
State New
Headers show
Series mm: providing ample physical memory contiguity by confining unmovable allocations | expand

Commit Message

kaiyang2@cs.cmu.edu March 20, 2024, 2:42 a.m. UTC
From: Kaiyang Zhao <kaiyang2@cs.cmu.edu>

In preparation for exporting the number of pages scanned for each alloc
type

Signed-off-by: Kaiyang Zhao <zh_kaiyang@hotmail.com>
---
 include/linux/mmzone.h |  1 +
 mm/vmscan.c            | 13 +++++++++++--
 2 files changed, 12 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a4889c9d4055..abc9f1623c82 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1288,6 +1288,7 @@  typedef struct pglist_data {
 	struct task_struct *kswapd;	/* Protected by kswapd_lock */
 	int kswapd_order;
 	enum zone_type kswapd_highest_zoneidx;
+	gfp_t kswapd_gfp;
 
 	int kswapd_failures;		/* Number of 'reclaimed == 0' runs */
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index aa21da983804..ed0f47e2e810 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -7330,7 +7330,7 @@  clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
  * or lower is eligible for reclaim until at least one usable zone is
  * balanced.
  */
-static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
+static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx, gfp_t gfp_mask)
 {
 	int i;
 	unsigned long nr_soft_reclaimed;
@@ -7345,6 +7345,8 @@  static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
 		.order = order,
 		.may_unmap = 1,
 	};
+	if (is_migrate_movable(gfp_migratetype(gfp_mask)))
+		sc.gfp_mask |= __GFP_MOVABLE;
 
 	set_task_reclaim_state(current, &sc.reclaim_state);
 	psi_memstall_enter(&pflags);
@@ -7659,6 +7661,7 @@  static int kswapd(void *p)
 	pg_data_t *pgdat = (pg_data_t *)p;
 	struct task_struct *tsk = current;
 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+	gfp_t gfp_mask;
 
 	if (!cpumask_empty(cpumask))
 		set_cpus_allowed_ptr(tsk, cpumask);
@@ -7680,6 +7683,7 @@  static int kswapd(void *p)
 
 	WRITE_ONCE(pgdat->kswapd_order, 0);
 	WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
+	WRITE_ONCE(pgdat->kswapd_gfp, 0);
 	atomic_set(&pgdat->nr_writeback_throttled, 0);
 	for ( ; ; ) {
 		bool ret;
@@ -7687,6 +7691,7 @@  static int kswapd(void *p)
 		alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
 		highest_zoneidx = kswapd_highest_zoneidx(pgdat,
 							highest_zoneidx);
+		gfp_mask = READ_ONCE(pgdat->kswapd_gfp);
 
 kswapd_try_sleep:
 		kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
@@ -7696,8 +7701,10 @@  static int kswapd(void *p)
 		alloc_order = READ_ONCE(pgdat->kswapd_order);
 		highest_zoneidx = kswapd_highest_zoneidx(pgdat,
 							highest_zoneidx);
+		gfp_mask = READ_ONCE(pgdat->kswapd_gfp);
 		WRITE_ONCE(pgdat->kswapd_order, 0);
 		WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
+		WRITE_ONCE(pgdat->kswapd_gfp, 0);
 
 		ret = try_to_freeze();
 		if (kthread_should_stop())
@@ -7721,7 +7728,7 @@  static int kswapd(void *p)
 		trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
 						alloc_order);
 		reclaim_order = balance_pgdat(pgdat, alloc_order,
-						highest_zoneidx);
+						highest_zoneidx, gfp_mask);
 		if (reclaim_order < alloc_order)
 			goto kswapd_try_sleep;
 	}
@@ -7759,6 +7766,8 @@  void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
 	if (READ_ONCE(pgdat->kswapd_order) < order)
 		WRITE_ONCE(pgdat->kswapd_order, order);
 
+	WRITE_ONCE(pgdat->kswapd_gfp, gfp_flags);
+
 	if (!waitqueue_active(&pgdat->kswapd_wait))
 		return;