diff mbox series

[v2] mmu_gather: move tlb flush for VM_PFNMAP/VM_MIXEDMAP vmas into free_pgtables()

Message ID 20250122232716.1321171-1-roman.gushchin@linux.dev (mailing list archive)
State New
Headers show
Series [v2] mmu_gather: move tlb flush for VM_PFNMAP/VM_MIXEDMAP vmas into free_pgtables() | expand

Commit Message

Roman Gushchin Jan. 22, 2025, 11:27 p.m. UTC
Commit b67fbebd4cf9 ("mmu_gather: Force tlb-flush VM_PFNMAP vmas")
added a forced tlbflush to tlb_vma_end(), which is required to avoid a
race between munmap() and unmap_mapping_range(). However it added some
overhead to other paths where tlb_vma_end() is used, but vmas are not
removed, e.g. madvise(MADV_DONTNEED).

Fix this by moving the tlb flush out of tlb_end_vma() into
free_pgtables(), somewhat similar to the stable version of the
original commit: e.g. stable commit 895428ee124a ("mm: Force TLB flush
for PFNMAP mappings before unlink_file_vma()").

Note, that if tlb->fullmm is set, no flush is required, as the whole
mm is about to be destroyed.

v2:
  - moved vma_pfn flag handling into tlb.h (by Peter Z.)
  - added comments (by Peter Z.)
  - fixed the vma_pfn flag setting (by Hugh D.)

Suggested-by: Jann Horn <jannh@google.com>
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
---
 include/asm-generic/tlb.h | 41 ++++++++++++++++++++++++++-------------
 mm/memory.c               |  7 +++++++
 2 files changed, 35 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 709830274b75..fbe31f49a5af 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -449,7 +449,14 @@  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
 	 */
 	tlb->vma_huge = is_vm_hugetlb_page(vma);
 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
-	tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
+
+	/*
+	 * vma_pfn is checked and cleared by tlb_flush_mmu_pfnmap()
+	 * for a set of vma's, so it should be set if at least one vma
+	 * has VM_PFNMAP or VM_MIXEDMAP flags set.
+	 */
+	if (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))
+		tlb->vma_pfn = 1;
 }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -466,6 +473,22 @@  static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 	__tlb_reset_range(tlb);
 }
 
+static inline void tlb_flush_mmu_pfnmap(struct mmu_gather *tlb)
+{
+	/*
+	 * VM_PFNMAP and VM_MIXEDMAP maps are fragile because the core mm
+	 * doesn't track the page mapcount -- there might not be page-frames
+	 * for these PFNs after all. Force flush TLBs for such ranges to avoid
+	 * munmap() vs unmap_mapping_range() races.
+	 * Ensure we have no stale TLB entries by the time this mapping is
+	 * removed from the rmap.
+	 */
+	if (unlikely(!tlb->fullmm && tlb->vma_pfn)) {
+		tlb_flush_mmu_tlbonly(tlb);
+		tlb->vma_pfn = 0;
+	}
+}
+
 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 					struct page *page, int page_size)
 {
@@ -549,22 +572,14 @@  static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
 
 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
-	if (tlb->fullmm)
+	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
 		return;
 
 	/*
-	 * VM_PFNMAP is more fragile because the core mm will not track the
-	 * page mapcount -- there might not be page-frames for these PFNs after
-	 * all. Force flush TLBs for such ranges to avoid munmap() vs
-	 * unmap_mapping_range() races.
+	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
+	 * the ranges growing with the unused space between consecutive VMAs.
 	 */
-	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
-		/*
-		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
-		 * the ranges growing with the unused space between consecutive VMAs.
-		 */
-		tlb_flush_mmu_tlbonly(tlb);
-	}
+	tlb_flush_mmu_tlbonly(tlb);
 }
 
 /*
diff --git a/mm/memory.c b/mm/memory.c
index 398c031be9ba..c2a9effb2e32 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -365,6 +365,13 @@  void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 {
 	struct unlink_vma_file_batch vb;
 
+	/*
+	 * VM_PFNMAP and VM_MIXEDMAP maps require a special handling here:
+	 * force flush TLBs for such ranges to avoid munmap() vs
+	 * unmap_mapping_range() races.
+	 */
+	tlb_flush_mmu_pfnmap(tlb);
+
 	do {
 		unsigned long addr = vma->vm_start;
 		struct vm_area_struct *next;