diff mbox series

[v2,4/4] mm/madvise: batch tlb flushes for MADV_DONTNEED[_LOCKED]

Message ID 20250404210700.2156-5-sj@kernel.org (mailing list archive)
State New
Headers show
Series mm/madvise: batch tlb flushes for MADV_DONTNEED and MADV_FREE | expand

Commit Message

SeongJae Park April 4, 2025, 9:07 p.m. UTC
Batch tlb flushes for MADV_DONTNEED[_LOCKED] for better efficiency, in a
way that very similar to the tlb flushes batching for MADV_FREE.

Signed-off-by: SeongJae Park <sj@kernel.org>
---
 mm/internal.h | 3 +++
 mm/madvise.c  | 9 ++++++---
 mm/memory.c   | 4 ++--
 3 files changed, 11 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index e9695baa5922..be0c46837e22 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -435,6 +435,9 @@  void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
+void notify_unmap_single_vma(struct mmu_gather *tlb,
+		struct vm_area_struct *vma, unsigned long addr,
+		unsigned long size, struct zap_details *details);
 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
 			   gfp_t gfp);
 
diff --git a/mm/madvise.c b/mm/madvise.c
index 564095e381b2..c7ac32b4a371 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -851,7 +851,8 @@  static int madvise_free_single_vma(
  * An interface that causes the system to free clean pages and flush
  * dirty pages is already available as msync(MS_INVALIDATE).
  */
-static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
+static long madvise_dontneed_single_vma(struct madvise_behavior *behavior,
+					struct vm_area_struct *vma,
 					unsigned long start, unsigned long end)
 {
 	struct zap_details details = {
@@ -859,7 +860,7 @@  static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
 		.even_cows = true,
 	};
 
-	zap_page_range_single(vma, start, end - start, &details);
+	notify_unmap_single_vma(behavior->tlb, vma, start, end - start, &details);
 	return 0;
 }
 
@@ -949,7 +950,7 @@  static long madvise_dontneed_free(struct vm_area_struct *vma,
 	}
 
 	if (action == MADV_DONTNEED || action == MADV_DONTNEED_LOCKED)
-		return madvise_dontneed_single_vma(vma, start, end);
+		return madvise_dontneed_single_vma(behavior, vma, start, end);
 	else if (action == MADV_FREE)
 		return madvise_free_single_vma(behavior, vma, start, end);
 	else
@@ -1627,6 +1628,8 @@  static void madvise_unlock(struct mm_struct *mm, int behavior)
 static bool madvise_batch_tlb_flush(int behavior)
 {
 	switch (behavior) {
+	case MADV_DONTNEED:
+	case MADV_DONTNEED_LOCKED:
 	case MADV_FREE:
 		return true;
 	default:
diff --git a/mm/memory.c b/mm/memory.c
index 8c9bbb1a008c..6a01b73aa111 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1989,7 +1989,7 @@  void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
 	mmu_notifier_invalidate_range_end(&range);
 }
 
-/*
+/**
  * notify_unmap_single_vma - remove user pages in a given range
  * @tlb: pointer to the caller's struct mmu_gather
  * @vma: vm_area_struct holding the applicable pages
@@ -2000,7 +2000,7 @@  void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
  * @tlb shouldn't be NULL.  The range must fit into one VMA.  If @vma is for
  * hugetlb, @tlb is flushed and re-initialized by this function.
  */
-static void notify_unmap_single_vma(struct mmu_gather *tlb,
+void notify_unmap_single_vma(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, unsigned long address,
 		unsigned long size, struct zap_details *details)
 {