diff mbox series

[-next,4/9] mm: convert xchg_page_access_time to xchg_folio_access_time()

Message ID 20230926005254.2861577-5-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: convert page cpupid functions to folios | expand

Commit Message

Kefeng Wang Sept. 26, 2023, 12:52 a.m. UTC
Make xchg_page_access_time to take a folio, and rename it to
xchg_folio_access_time() since all callers with a folio.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/linux/mm.h  | 7 ++++---
 kernel/sched/fair.c | 2 +-
 mm/huge_memory.c    | 4 ++--
 mm/mprotect.c       | 2 +-
 4 files changed, 8 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a1d0c82ac9a7..49b9fa383e7d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1711,11 +1711,12 @@  static inline void page_cpupid_reset_last(struct page *page)
 }
 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
 
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int xchg_folio_access_time(struct folio *folio, int time)
 {
 	int last_time;
 
-	last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+	last_time = page_cpupid_xchg_last(&folio->page,
+					  time >> PAGE_ACCESS_TIME_BUCKETS);
 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
 }
 
@@ -1734,7 +1735,7 @@  static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 	return page_to_nid(page); /* XXX */
 }
 
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int xchg_folio_access_time(struct folio *folio, int time)
 {
 	return 0;
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b507ec29e1e1..afb9dc98a8ee 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1714,7 +1714,7 @@  static int numa_hint_fault_latency(struct folio *folio)
 	int last_time, time;
 
 	time = jiffies_to_msecs(jiffies);
-	last_time = xchg_page_access_time(&folio->page, time);
+	last_time = xchg_folio_access_time(folio, time);
 
 	return (time - last_time) & PAGE_ACCESS_TIME_MASK;
 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c7efa214add8..c4f4951615fd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1905,8 +1905,8 @@  int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
 		if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
 		    !toptier)
-			xchg_page_access_time(&folio->page,
-					      jiffies_to_msecs(jiffies));
+			xchg_folio_access_time(folio,
+					       jiffies_to_msecs(jiffies));
 	}
 	/*
 	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 459daa987131..1c556651888a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -159,7 +159,7 @@  static long change_pte_range(struct mmu_gather *tlb,
 					continue;
 				if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
 				    !toptier)
-					xchg_page_access_time(&folio->page,
+					xchg_folio_access_time(folio,
 						jiffies_to_msecs(jiffies));
 			}