diff mbox series

[RFC,2/2] mm: Update hint fault count for pages that are skipped during scanning

Message ID 20240327160237.2355-3-bharata@amd.com (mailing list archive)
State New
Headers show
Series Hot page promotion optimization for large address space | expand

Commit Message

Bharata B Rao March 27, 2024, 4:02 p.m. UTC
During scanning, PTE updates are skipped for those pages which
are already marked as PROT_NONE. This is required but update the
scan time fault count so that the fault count which is used to
calculate the latency is kept uptodate based on the recent
scanning iteration.

Signed-off-by: Bharata B Rao <bharata@amd.com>
---
 mm/huge_memory.c | 7 ++++---
 mm/mprotect.c    | 9 +++++----
 2 files changed, 9 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7e62c3c2bbcb..24a4f976323e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2086,9 +2086,6 @@  int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		if (is_huge_zero_pmd(*pmd))
 			goto unlock;
 
-		if (pmd_protnone(*pmd))
-			goto unlock;
-
 		folio = page_folio(pmd_page(*pmd));
 		toptier = node_is_toptier(folio_nid(folio));
 		/*
@@ -2102,6 +2099,10 @@  int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
 		    !toptier)
 			folio_xchg_fault_count(folio, atomic_read(&mm->hint_faults));
+
+		if (pmd_protnone(*pmd))
+			goto unlock;
+
 	}
 	/*
 	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 30118fd492f4..cfd3812302be 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -118,10 +118,6 @@  static long change_pte_range(struct mmu_gather *tlb,
 				int nid;
 				bool toptier;
 
-				/* Avoid TLB flush if possible */
-				if (pte_protnone(oldpte))
-					continue;
-
 				folio = vm_normal_folio(vma, addr, oldpte);
 				if (!folio || folio_is_zone_device(folio) ||
 				    folio_test_ksm(folio))
@@ -162,6 +158,11 @@  static long change_pte_range(struct mmu_gather *tlb,
 					folio_xchg_fault_count(folio,
 							atomic_read(&vma->vm_mm->hint_faults));
 
+				/* Avoid TLB flush if possible */
+				if (pte_protnone(oldpte))
+					continue;
+
+
 			}
 
 			oldpte = ptep_modify_prot_start(vma, addr, pte);