diff mbox series

[4/4] mmu_gather: Force tlb-flush VM_PFNMAP vmas

Message ID 20220708071834.149930530@infradead.org (mailing list archive)
State New
Headers show
Series munmap() vs unmap_mapping_range() | expand

Commit Message

Peter Zijlstra July 8, 2022, 7:18 a.m. UTC
Jann reported a race between munmap() and unmap_mapping_range(), where
unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
VMA; however munmap() will not yet have invalidated the TLBs.

Therefore unmap_mapping_range() will complete while there are still
(stale) TLB entries for the specified range.

Mitigate this by force flushing TLBs for VM_PFNMAP ranges.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/asm-generic/tlb.h |   33 +++++++++++++++++----------------
 1 file changed, 17 insertions(+), 16 deletions(-)

Comments

Will Deacon July 8, 2022, 1:36 p.m. UTC | #1
On Fri, Jul 08, 2022 at 09:18:06AM +0200, Peter Zijlstra wrote:
> Jann reported a race between munmap() and unmap_mapping_range(), where
> unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
> VMA; however munmap() will not yet have invalidated the TLBs.
> 
> Therefore unmap_mapping_range() will complete while there are still
> (stale) TLB entries for the specified range.
> 
> Mitigate this by force flushing TLBs for VM_PFNMAP ranges.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  include/asm-generic/tlb.h |   33 +++++++++++++++++----------------
>  1 file changed, 17 insertions(+), 16 deletions(-)
> 
> --- a/include/asm-generic/tlb.h
> +++ b/include/asm-generic/tlb.h
> @@ -303,6 +303,7 @@ struct mmu_gather {
>  	 */
>  	unsigned int		vma_exec : 1;
>  	unsigned int		vma_huge : 1;
> +	unsigned int		vma_pfn  : 1;
>  
>  	unsigned int		batch_count;
>  
> @@ -373,7 +374,6 @@ tlb_update_vma_flags(struct mmu_gather *
>  #else /* CONFIG_MMU_GATHER_NO_RANGE */
>  
>  #ifndef tlb_flush
> -
>  /*
>   * When an architecture does not provide its own tlb_flush() implementation
>   * but does have a reasonably efficient flush_vma_range() implementation
> @@ -393,6 +393,9 @@ static inline void tlb_flush(struct mmu_
>  		flush_tlb_range(&vma, tlb->start, tlb->end);
>  	}
>  }
> +#endif
> +
> +#endif /* CONFIG_MMU_GATHER_NO_RANGE */
>  
>  static inline void
>  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
> @@ -410,17 +413,9 @@ tlb_update_vma_flags(struct mmu_gather *
>  	 */
>  	tlb->vma_huge = is_vm_hugetlb_page(vma);
>  	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
> +	tlb->vma_pfn  = !!(vma->vm_flags & VM_PFNMAP);
>  }
>  
> -#else
> -
> -static inline void
> -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
> -
> -#endif
> -
> -#endif /* CONFIG_MMU_GATHER_NO_RANGE */
> -
>  static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
>  {
>  	/*
> @@ -507,16 +502,22 @@ static inline void tlb_start_vma(struct
>  
>  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
>  {
> -	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
> +	if (tlb->fullmm)
>  		return;
>  
>  	/*
> -	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
> -	 * the ranges growing with the unused space between consecutive VMAs,
> -	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
> -	 * this.
> +	 * VM_PFNMAP is more fragile because the core mm will not track the
> +	 * page mapcount -- there might not be page-frames for these PFNs after
> +	 * all. Force flush TLBs for such ranges to avoid munmap() vs
> +	 * unmap_mapping_range() races.
>  	 */
> -	tlb_flush_mmu_tlbonly(tlb);
> +	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
> +		/*
> +		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
> +		 * the ranges growing with the unused space between consecutive VMAs.
> +		 */
> +		tlb_flush_mmu_tlbonly(tlb);
> +	}

We already have the vma here, so I'm not sure how much the new 'vma_pfn'
field really buys us over checking the 'vm_flags', but perhaps that's
cleanup for another day.

Acked-by: Will Deacon <will@kernel.org>

Will
Peter Zijlstra July 8, 2022, 2:03 p.m. UTC | #2
On Fri, Jul 08, 2022 at 02:36:06PM +0100, Will Deacon wrote:
> On Fri, Jul 08, 2022 at 09:18:06AM +0200, Peter Zijlstra wrote:

> > @@ -507,16 +502,22 @@ static inline void tlb_start_vma(struct
> >  
> >  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
> >  {
> > -	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
> > +	if (tlb->fullmm)
> >  		return;
> >  
> >  	/*
> > -	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
> > -	 * the ranges growing with the unused space between consecutive VMAs,
> > -	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
> > -	 * this.
> > +	 * VM_PFNMAP is more fragile because the core mm will not track the
> > +	 * page mapcount -- there might not be page-frames for these PFNs after
> > +	 * all. Force flush TLBs for such ranges to avoid munmap() vs
> > +	 * unmap_mapping_range() races.
> >  	 */
> > -	tlb_flush_mmu_tlbonly(tlb);
> > +	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
> > +		/*
> > +		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
> > +		 * the ranges growing with the unused space between consecutive VMAs.
> > +		 */
> > +		tlb_flush_mmu_tlbonly(tlb);
> > +	}
> 
> We already have the vma here, so I'm not sure how much the new 'vma_pfn'
> field really buys us over checking the 'vm_flags', but perhaps that's
> cleanup for another day.

Duh, that's just me being daft again. For some raisin I was convinced
(and failed to check) that we only had the vma at start.

I can easily respin this to not need the extra variable.

How's this then?

---
Subject: mmu_gather: Force tlb-flush VM_PFNMAP vmas
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu Jul 7 11:51:16 CEST 2022

Jann reported a race between munmap() and unmap_mapping_range(), where
unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
VMA; however munmap() will not yet have invalidated the TLBs.

Therefore unmap_mapping_range() will complete while there are still
(stale) TLB entries for the specified range.

Mitigate this by force flushing TLBs for VM_PFNMAP ranges.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/asm-generic/tlb.h |   18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -507,16 +507,22 @@ static inline void tlb_start_vma(struct
 
 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
-	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
+	if (tlb->fullmm)
 		return;
 
 	/*
-	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
-	 * the ranges growing with the unused space between consecutive VMAs,
-	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
-	 * this.
+	 * VM_PFNMAP is more fragile because the core mm will not track the
+	 * page mapcount -- there might not be page-frames for these PFNs after
+	 * all. Force flush TLBs for such ranges to avoid munmap() vs
+	 * unmap_mapping_range() races.
 	 */
-	tlb_flush_mmu_tlbonly(tlb);
+	if ((vma->vm_flags & VM_PFNMAP) || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+		/*
+		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
+		 * the ranges growing with the unused space between consecutive VMAs.
+		 */
+		tlb_flush_mmu_tlbonly(tlb);
+	}
 }
 
 /*
Jann Horn July 8, 2022, 2:04 p.m. UTC | #3
On Fri, Jul 8, 2022 at 9:19 AM Peter Zijlstra <peterz@infradead.org> wrote:
> Jann reported a race between munmap() and unmap_mapping_range(), where
> unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
> VMA; however munmap() will not yet have invalidated the TLBs.
>
> Therefore unmap_mapping_range() will complete while there are still
> (stale) TLB entries for the specified range.
>
> Mitigate this by force flushing TLBs for VM_PFNMAP ranges.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  include/asm-generic/tlb.h |   33 +++++++++++++++++----------------
>  1 file changed, 17 insertions(+), 16 deletions(-)
>
> --- a/include/asm-generic/tlb.h
> +++ b/include/asm-generic/tlb.h
> @@ -303,6 +303,7 @@ struct mmu_gather {
>          */
>         unsigned int            vma_exec : 1;
>         unsigned int            vma_huge : 1;
> +       unsigned int            vma_pfn  : 1;
>
>         unsigned int            batch_count;
>
> @@ -373,7 +374,6 @@ tlb_update_vma_flags(struct mmu_gather *
>  #else /* CONFIG_MMU_GATHER_NO_RANGE */
>
>  #ifndef tlb_flush
> -
>  /*
>   * When an architecture does not provide its own tlb_flush() implementation
>   * but does have a reasonably efficient flush_vma_range() implementation
> @@ -393,6 +393,9 @@ static inline void tlb_flush(struct mmu_
>                 flush_tlb_range(&vma, tlb->start, tlb->end);
>         }
>  }
> +#endif
> +
> +#endif /* CONFIG_MMU_GATHER_NO_RANGE */
>
>  static inline void
>  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
> @@ -410,17 +413,9 @@ tlb_update_vma_flags(struct mmu_gather *
>          */
>         tlb->vma_huge = is_vm_hugetlb_page(vma);
>         tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
> +       tlb->vma_pfn  = !!(vma->vm_flags & VM_PFNMAP);

We should probably handle VM_MIXEDMAP the same way as VM_PFNMAP here,
I think? Conceptually I think the same issue can happen with
device-owned pages that aren't managed by the kernel's page allocator,
and for those, VM_MIXEDMAP is the same as VM_PFNMAP.

>  }
>
> -#else
> -
> -static inline void
> -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
> -
> -#endif
> -
> -#endif /* CONFIG_MMU_GATHER_NO_RANGE */
> -
>  static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
>  {
>         /*
> @@ -507,16 +502,22 @@ static inline void tlb_start_vma(struct
>
>  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
>  {
> -       if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
> +       if (tlb->fullmm)
>                 return;

Is this correct, or would there still be a race between MM teardown
(which sets ->fullmm, see exit_mmap()->tlb_gather_mmu_fullmm()) and
unmap_mapping_range()? My understanding is that ->fullmm only
guarantees a flush at tlb_finish_mmu(), but here we're trying to
ensure a flush before unlink_file_vma().

>         /*
> -        * Do a TLB flush and reset the range at VMA boundaries; this avoids
> -        * the ranges growing with the unused space between consecutive VMAs,
> -        * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
> -        * this.
> +        * VM_PFNMAP is more fragile because the core mm will not track the
> +        * page mapcount -- there might not be page-frames for these PFNs after
> +        * all. Force flush TLBs for such ranges to avoid munmap() vs
> +        * unmap_mapping_range() races.

Maybe add: "We do *not* guarantee that after munmap() has passed
through tlb_end_vma(), there are no more stale TLB entries for this
VMA; there could be a parallel PTE-zapping operation that has zapped
PTEs before we looked at them but hasn't done the corresponding TLB
flush yet. However, such a parallel zap can't be done through the
mm_struct (we've unlinked the VMA), so it would have to be done under
the ->i_mmap_sem in read mode, which we synchronize against in
unlink_file_vma()."

I'm not convinced it's particularly nice to do a flush in
tlb_end_vma() when we can't make guarantees about the TLB state wrt
parallel invalidations, and when we only really care about having a
flush between unmap_vmas() and free_pgtables(), but I guess it works?

>          */
> -       tlb_flush_mmu_tlbonly(tlb);
> +       if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
> +               /*
> +                * Do a TLB flush and reset the range at VMA boundaries; this avoids
> +                * the ranges growing with the unused space between consecutive VMAs.
> +                */
> +               tlb_flush_mmu_tlbonly(tlb);
> +       }
>  }
>
>  /*
>
>
Peter Zijlstra July 9, 2022, 8:38 a.m. UTC | #4
On Fri, Jul 08, 2022 at 04:04:38PM +0200, Jann Horn wrote:
> On Fri, Jul 8, 2022 at 9:19 AM Peter Zijlstra <peterz@infradead.org> wrote:
> > Jann reported a race between munmap() and unmap_mapping_range(), where
> > unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
> > VMA; however munmap() will not yet have invalidated the TLBs.
> >
> > Therefore unmap_mapping_range() will complete while there are still
> > (stale) TLB entries for the specified range.
> >
> > Mitigate this by force flushing TLBs for VM_PFNMAP ranges.
> >
> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> > ---
> >  include/asm-generic/tlb.h |   33 +++++++++++++++++----------------
> >  1 file changed, 17 insertions(+), 16 deletions(-)
> >
> > --- a/include/asm-generic/tlb.h
> > +++ b/include/asm-generic/tlb.h
> > @@ -303,6 +303,7 @@ struct mmu_gather {
> >          */
> >         unsigned int            vma_exec : 1;
> >         unsigned int            vma_huge : 1;
> > +       unsigned int            vma_pfn  : 1;
> >
> >         unsigned int            batch_count;
> >
> > @@ -373,7 +374,6 @@ tlb_update_vma_flags(struct mmu_gather *
> >  #else /* CONFIG_MMU_GATHER_NO_RANGE */
> >
> >  #ifndef tlb_flush
> > -
> >  /*
> >   * When an architecture does not provide its own tlb_flush() implementation
> >   * but does have a reasonably efficient flush_vma_range() implementation
> > @@ -393,6 +393,9 @@ static inline void tlb_flush(struct mmu_
> >                 flush_tlb_range(&vma, tlb->start, tlb->end);
> >         }
> >  }
> > +#endif
> > +
> > +#endif /* CONFIG_MMU_GATHER_NO_RANGE */
> >
> >  static inline void
> >  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
> > @@ -410,17 +413,9 @@ tlb_update_vma_flags(struct mmu_gather *
> >          */
> >         tlb->vma_huge = is_vm_hugetlb_page(vma);
> >         tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
> > +       tlb->vma_pfn  = !!(vma->vm_flags & VM_PFNMAP);
> 
> We should probably handle VM_MIXEDMAP the same way as VM_PFNMAP here,
> I think? Conceptually I think the same issue can happen with
> device-owned pages that aren't managed by the kernel's page allocator,
> and for those, VM_MIXEDMAP is the same as VM_PFNMAP.

Hmm, yeah, that seems to make sense.

> >  }
> >
> > -#else
> > -
> > -static inline void
> > -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
> > -
> > -#endif
> > -
> > -#endif /* CONFIG_MMU_GATHER_NO_RANGE */
> > -
> >  static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
> >  {
> >         /*
> > @@ -507,16 +502,22 @@ static inline void tlb_start_vma(struct
> >
> >  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
> >  {
> > -       if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
> > +       if (tlb->fullmm)
> >                 return;
> 
> Is this correct, or would there still be a race between MM teardown
> (which sets ->fullmm, see exit_mmap()->tlb_gather_mmu_fullmm()) and
> unmap_mapping_range()? My understanding is that ->fullmm only
> guarantees a flush at tlb_finish_mmu(), but here we're trying to
> ensure a flush before unlink_file_vma().

fullmm is when the last user of the mm goes away, there should not be
any races on the address space at that time. Also see the comment with
tlb_gather_mmu_fullmm() and its users.

> >         /*
> > -        * Do a TLB flush and reset the range at VMA boundaries; this avoids
> > -        * the ranges growing with the unused space between consecutive VMAs,
> > -        * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
> > -        * this.
> > +        * VM_PFNMAP is more fragile because the core mm will not track the
> > +        * page mapcount -- there might not be page-frames for these PFNs after
> > +        * all. Force flush TLBs for such ranges to avoid munmap() vs
> > +        * unmap_mapping_range() races.
> 
> Maybe add: "We do *not* guarantee that after munmap() has passed
                         ^ otherwise?
> through tlb_end_vma(), there are no more stale TLB entries for this
> VMA; there could be a parallel PTE-zapping operation that has zapped
> PTEs before we looked at them but hasn't done the corresponding TLB
> flush yet. However, such a parallel zap can't be done through the
> mm_struct (we've unlinked the VMA), so it would have to be done under
> the ->i_mmap_sem in read mode, which we synchronize against in
> unlink_file_vma()."

Done.

> I'm not convinced it's particularly nice to do a flush in
> tlb_end_vma() when we can't make guarantees about the TLB state wrt
> parallel invalidations, and when we only really care about having a
> flush between unmap_vmas() and free_pgtables(), but I guess it works?

Yeah, none of this is pretty. I despise this whole parallel invalidation
stuff with a passion, we've had ever so many bugs because of that :-(

We could add another mmu_gather callback and place it between
unmap_vmas() and free_pgtables(), but it's a much larger patch and I'm
not entirely sure it's worth the complexity.

OTOH having a callback between freeing pages and freeing page-tables
might not be the worst idea. Let me ponder that for a bit.

Meanwhile; updated patch below.

---
Subject: mmu_gather: Force TLB-flush VM_PFNMAP|VM_MIXEDMAP vmas
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu Jul 7 11:51:16 CEST 2022

Jann reported a race between munmap() and unmap_mapping_range(), where
unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
VMA; however munmap() will not yet have invalidated the TLBs.

Therefore unmap_mapping_range() will complete while there are still
(stale) TLB entries for the specified range.

Mitigate this by force flushing TLBs for VM_PFNMAP ranges.

Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/asm-generic/tlb.h |   28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -507,16 +507,32 @@ static inline void tlb_start_vma(struct
 
 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
-	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
+	if (tlb->fullmm)
 		return;
 
 	/*
-	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
-	 * the ranges growing with the unused space between consecutive VMAs,
-	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
-	 * this.
+	 * VM_PFNMAP|VM_MIXEDMAP is more fragile because the core mm will not
+	 * track the page mapcount -- there might not be page-frames for these
+	 * PFNs after all. Force flush TLBs for such ranges to avoid munmap()
+	 * vs unmap_mapping_range() races.
+	 *
+	 * We do *NOT* guarantee that after munmap() has passed through
+	 * tlb_end_vma(), there are no more stale TLB entries for this VMA;
+	 * there could be a parallel PTE-zapping operation that has zapped PTEs
+	 * before we looked at them but hasn't done the corresponding TLB flush
+	 * yet. However, such a parallel zap can't be done through the
+	 * mm_struct (we've unliked the VMA), so it would have to be done under
+	 * the ->i_mmap_sem in read move, which we synchronize against in
+	 * unlink_file_vma().
 	 */
-	tlb_flush_mmu_tlbonly(tlb);
+	if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ||
+	    !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+		/*
+		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
+		 * the ranges growing with the unused space between consecutive VMAs.
+		 */
+		tlb_flush_mmu_tlbonly(tlb);
+	}
 }
 
 /*
Jann Horn July 11, 2022, 3:04 p.m. UTC | #5
On Sat, Jul 9, 2022 at 10:38 AM Peter Zijlstra <peterz@infradead.org> wrote:
> On Fri, Jul 08, 2022 at 04:04:38PM +0200, Jann Horn wrote:
> > On Fri, Jul 8, 2022 at 9:19 AM Peter Zijlstra <peterz@infradead.org> wrote:
> > > @@ -507,16 +502,22 @@ static inline void tlb_start_vma(struct
> > >
> > >  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
> > >  {
> > > -       if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
> > > +       if (tlb->fullmm)
> > >                 return;
> >
> > Is this correct, or would there still be a race between MM teardown
> > (which sets ->fullmm, see exit_mmap()->tlb_gather_mmu_fullmm()) and
> > unmap_mapping_range()? My understanding is that ->fullmm only
> > guarantees a flush at tlb_finish_mmu(), but here we're trying to
> > ensure a flush before unlink_file_vma().
>
> fullmm is when the last user of the mm goes away, there should not be

(FWIW, there also seems to be an error path in write_ldt ->
free_ldt_pgtables -> tlb_gather_mmu_fullmm where ->fullmm can be set
for a TLB shootdown in a live process, but that's irrelevant for this
patch.)

> any races on the address space at that time. Also see the comment with
> tlb_gather_mmu_fullmm() and its users.

Ah, right, aside from the LDT weirdness, fullmm is only used in
exit_mmap, and at that point there can be no more parallel access to
the MM except for remote memory reaping (which is synchronized against
using mmap_write_lock()) and rmap walks...

> Subject: mmu_gather: Force TLB-flush VM_PFNMAP|VM_MIXEDMAP vmas
> From: Peter Zijlstra <peterz@infradead.org>
> Date: Thu Jul 7 11:51:16 CEST 2022
>
> Jann reported a race between munmap() and unmap_mapping_range(), where
> unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
> VMA; however munmap() will not yet have invalidated the TLBs.
>
> Therefore unmap_mapping_range() will complete while there are still
> (stale) TLB entries for the specified range.
>
> Mitigate this by force flushing TLBs for VM_PFNMAP ranges.
>
> Reported-by: Jann Horn <jannh@google.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

Looks good to me.
diff mbox series

Patch

--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -303,6 +303,7 @@  struct mmu_gather {
 	 */
 	unsigned int		vma_exec : 1;
 	unsigned int		vma_huge : 1;
+	unsigned int		vma_pfn  : 1;
 
 	unsigned int		batch_count;
 
@@ -373,7 +374,6 @@  tlb_update_vma_flags(struct mmu_gather *
 #else /* CONFIG_MMU_GATHER_NO_RANGE */
 
 #ifndef tlb_flush
-
 /*
  * When an architecture does not provide its own tlb_flush() implementation
  * but does have a reasonably efficient flush_vma_range() implementation
@@ -393,6 +393,9 @@  static inline void tlb_flush(struct mmu_
 		flush_tlb_range(&vma, tlb->start, tlb->end);
 	}
 }
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
 
 static inline void
 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -410,17 +413,9 @@  tlb_update_vma_flags(struct mmu_gather *
 	 */
 	tlb->vma_huge = is_vm_hugetlb_page(vma);
 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+	tlb->vma_pfn  = !!(vma->vm_flags & VM_PFNMAP);
 }
 
-#else
-
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#endif
-
-#endif /* CONFIG_MMU_GATHER_NO_RANGE */
-
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
 	/*
@@ -507,16 +502,22 @@  static inline void tlb_start_vma(struct
 
 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
-	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
+	if (tlb->fullmm)
 		return;
 
 	/*
-	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
-	 * the ranges growing with the unused space between consecutive VMAs,
-	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
-	 * this.
+	 * VM_PFNMAP is more fragile because the core mm will not track the
+	 * page mapcount -- there might not be page-frames for these PFNs after
+	 * all. Force flush TLBs for such ranges to avoid munmap() vs
+	 * unmap_mapping_range() races.
 	 */
-	tlb_flush_mmu_tlbonly(tlb);
+	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+		/*
+		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
+		 * the ranges growing with the unused space between consecutive VMAs.
+		 */
+		tlb_flush_mmu_tlbonly(tlb);
+	}
 }
 
 /*