diff mbox series

[v2,2/4] mm: Support tlbbatch flush for a range of PTEs

Message ID 20250113033901.68951-3-21cnbao@gmail.com (mailing list archive)
State Superseded
Headers show
Series mm: batched unmap lazyfree large folios during reclamation | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/patch-2-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh took 106.01s
conchuod/patch-2-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh took 2035.07s
conchuod/patch-2-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh took 2397.68s
conchuod/patch-2-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh took 16.41s
conchuod/patch-2-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh took 18.16s
conchuod/patch-2-test-6 warning .github/scripts/patches/tests/checkpatch.sh took 1.33s
conchuod/patch-2-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh took 37.41s
conchuod/patch-2-test-8 success .github/scripts/patches/tests/header_inline.sh took 0.01s
conchuod/patch-2-test-9 success .github/scripts/patches/tests/kdoc.sh took 0.57s
conchuod/patch-2-test-10 success .github/scripts/patches/tests/module_param.sh took 0.02s
conchuod/patch-2-test-11 success .github/scripts/patches/tests/verify_fixes.sh took 0.00s
conchuod/patch-2-test-12 success .github/scripts/patches/tests/verify_signedoff.sh took 0.02s

Commit Message

Barry Song Jan. 13, 2025, 3:38 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

This is a preparatory patch to support batch PTE unmapping in
`try_to_unmap_one`. It first introduces range handling for
`tlbbatch` flush. Currently, the range is always set to the size of
PAGE_SIZE.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shaoqin Huang <shahuang@redhat.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++------------
 arch/arm64/mm/contpte.c           |  2 +-
 arch/riscv/include/asm/tlbflush.h |  3 ++-
 arch/riscv/mm/tlbflush.c          |  3 ++-
 arch/x86/include/asm/tlbflush.h   |  3 ++-
 mm/rmap.c                         | 12 +++++++-----
 6 files changed, 28 insertions(+), 21 deletions(-)

Comments

Will Deacon Jan. 13, 2025, 4:48 p.m. UTC | #1
On Mon, Jan 13, 2025 at 04:38:59PM +1300, Barry Song wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> This is a preparatory patch to support batch PTE unmapping in
> `try_to_unmap_one`. It first introduces range handling for
> `tlbbatch` flush. Currently, the range is always set to the size of
> PAGE_SIZE.
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: Anshuman Khandual <anshuman.khandual@arm.com>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Shaoqin Huang <shahuang@redhat.com>
> Cc: Gavin Shan <gshan@redhat.com>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Lance Yang <ioworker0@gmail.com>
> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> Cc: Yosry Ahmed <yosryahmed@google.com>
> Cc: Paul Walmsley <paul.walmsley@sifive.com>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>
> Cc: Albert Ou <aou@eecs.berkeley.edu>
> Cc: Yicong Yang <yangyicong@hisilicon.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>  arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++------------
>  arch/arm64/mm/contpte.c           |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  3 ++-
>  arch/riscv/mm/tlbflush.c          |  3 ++-
>  arch/x86/include/asm/tlbflush.h   |  3 ++-
>  mm/rmap.c                         | 12 +++++++-----
>  6 files changed, 28 insertions(+), 21 deletions(-)

The arm64 bits look correct to me:

Acked-by: Will Deacon <will@kernel.org>

Will
David Hildenbrand Jan. 14, 2025, 9:52 a.m. UTC | #2
On 13.01.25 04:38, Barry Song wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> This is a preparatory patch to support batch PTE unmapping in
> `try_to_unmap_one`. It first introduces range handling for
> `tlbbatch` flush. Currently, the range is always set to the size of
> PAGE_SIZE.

You could have spelled out why you perform the VMA -> MM change.

> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: Anshuman Khandual <anshuman.khandual@arm.com>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Shaoqin Huang <shahuang@redhat.com>
> Cc: Gavin Shan <gshan@redhat.com>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Lance Yang <ioworker0@gmail.com>
> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> Cc: Yosry Ahmed <yosryahmed@google.com>
> Cc: Paul Walmsley <paul.walmsley@sifive.com>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>
> Cc: Albert Ou <aou@eecs.berkeley.edu>
> Cc: Yicong Yang <yangyicong@hisilicon.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>   arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++------------
>   arch/arm64/mm/contpte.c           |  2 +-
>   arch/riscv/include/asm/tlbflush.h |  3 ++-
>   arch/riscv/mm/tlbflush.c          |  3 ++-
>   arch/x86/include/asm/tlbflush.h   |  3 ++-
>   mm/rmap.c                         | 12 +++++++-----
>   6 files changed, 28 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index bc94e036a26b..f34e4fab5aa2 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -322,13 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
>   	return true;
>   }
>   
> -static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> -					     struct mm_struct *mm,
> -					     unsigned long uaddr)
> -{
> -	__flush_tlb_page_nosync(mm, uaddr);
> -}
> -
>   /*
>    * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
>    * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
> @@ -448,7 +441,7 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long start,
>   	return false;
>   }
>   
> -static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
> +static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
>   				     unsigned long start, unsigned long end,
>   				     unsigned long stride, bool last_level,
>   				     int tlb_level)
> @@ -460,12 +453,12 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>   	pages = (end - start) >> PAGE_SHIFT;
>   
>   	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
> -		flush_tlb_mm(vma->vm_mm);
> +		flush_tlb_mm(mm);
>   		return;
>   	}
>   
>   	dsb(ishst);
> -	asid = ASID(vma->vm_mm);
> +	asid = ASID(mm);
>   
>   	if (last_level)
>   		__flush_tlb_range_op(vale1is, start, pages, stride, asid,
> @@ -474,7 +467,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>   		__flush_tlb_range_op(vae1is, start, pages, stride, asid,
>   				     tlb_level, true, lpa2_is_enabled());
>   
> -	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
> +	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
>   }
>   
>   static inline void __flush_tlb_range(struct vm_area_struct *vma,
> @@ -482,7 +475,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
>   				     unsigned long stride, bool last_level,
>   				     int tlb_level)
>   {
> -	__flush_tlb_range_nosync(vma, start, end, stride,
> +	__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
>   				 last_level, tlb_level);
>   	dsb(ish);
>   }
> @@ -533,6 +526,15 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
>   	dsb(ish);
>   	isb();
>   }
> +
> +static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> +					     struct mm_struct *mm,
> +					     unsigned long uaddr,
> +					     unsigned long size)
> +{
> +	__flush_tlb_range_nosync(mm, uaddr, uaddr + size,
> +				 PAGE_SIZE, true, 3);
> +}
>   #endif
>   
>   #endif
> diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
> index 55107d27d3f8..bcac4f55f9c1 100644
> --- a/arch/arm64/mm/contpte.c
> +++ b/arch/arm64/mm/contpte.c
> @@ -335,7 +335,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
>   		 * eliding the trailing DSB applies here.
>   		 */
>   		addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
> -		__flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE,
> +		__flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE,
>   					 PAGE_SIZE, true, 3);
>   	}
>   
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 72e559934952..7f3ea687ce33 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -61,7 +61,8 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>   bool arch_tlbbatch_should_defer(struct mm_struct *mm);
>   void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
>   			       struct mm_struct *mm,
> -			       unsigned long uaddr);
> +			       unsigned long uaddr,
> +			       unsigned long size);

While we're at it, can we just convert this to the "two tabs" 
indentation starting on second parameter line" way of doing things? Same 
for all other cases. (at least in core code, if some arch wants their 
own weird rules on how to handle these things)

[...]

>   	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
>   	int batch;
> @@ -681,7 +682,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
>   	if (!pte_accessible(mm, pteval))
>   		return;
>   
> -	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
> +	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size);
>   	tlb_ubc->flush_required = true;
>   
>   	/*
> @@ -757,7 +758,8 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
>   }
>   #else
>   static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
> -				      unsigned long uaddr)
> +				      unsigned long uaddr,
> +				      unsigned long size)

I'll note that mist tlb functions seem to consume start+end instead of 
start+size, like

flush_tlb_mm_range()
flush_tlb_kernel_range()

So I'm wondering if this should be start+end instead of uaddr+size.
Barry Song Jan. 14, 2025, 10:37 a.m. UTC | #3
On Tue, Jan 14, 2025 at 10:52 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 13.01.25 04:38, Barry Song wrote:
> > From: Barry Song <v-songbaohua@oppo.com>
> >
> > This is a preparatory patch to support batch PTE unmapping in
> > `try_to_unmap_one`. It first introduces range handling for
> > `tlbbatch` flush. Currently, the range is always set to the size of
> > PAGE_SIZE.
>
> You could have spelled out why you perform the VMA -> MM change.

Sure, I’ll include some additional details in v3.

>
> >
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: Will Deacon <will@kernel.org>
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Ingo Molnar <mingo@redhat.com>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > Cc: Anshuman Khandual <anshuman.khandual@arm.com>
> > Cc: Ryan Roberts <ryan.roberts@arm.com>
> > Cc: Shaoqin Huang <shahuang@redhat.com>
> > Cc: Gavin Shan <gshan@redhat.com>
> > Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> > Cc: Mark Rutland <mark.rutland@arm.com>
> > Cc: David Hildenbrand <david@redhat.com>
> > Cc: Lance Yang <ioworker0@gmail.com>
> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> > Cc: Yosry Ahmed <yosryahmed@google.com>
> > Cc: Paul Walmsley <paul.walmsley@sifive.com>
> > Cc: Palmer Dabbelt <palmer@dabbelt.com>
> > Cc: Albert Ou <aou@eecs.berkeley.edu>
> > Cc: Yicong Yang <yangyicong@hisilicon.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > ---
> >   arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++------------
> >   arch/arm64/mm/contpte.c           |  2 +-
> >   arch/riscv/include/asm/tlbflush.h |  3 ++-
> >   arch/riscv/mm/tlbflush.c          |  3 ++-
> >   arch/x86/include/asm/tlbflush.h   |  3 ++-
> >   mm/rmap.c                         | 12 +++++++-----
> >   6 files changed, 28 insertions(+), 21 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> > index bc94e036a26b..f34e4fab5aa2 100644
> > --- a/arch/arm64/include/asm/tlbflush.h
> > +++ b/arch/arm64/include/asm/tlbflush.h
> > @@ -322,13 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
> >       return true;
> >   }
> >
> > -static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> > -                                          struct mm_struct *mm,
> > -                                          unsigned long uaddr)
> > -{
> > -     __flush_tlb_page_nosync(mm, uaddr);
> > -}
> > -
> >   /*
> >    * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
> >    * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
> > @@ -448,7 +441,7 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long start,
> >       return false;
> >   }
> >
> > -static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
> > +static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
> >                                    unsigned long start, unsigned long end,
> >                                    unsigned long stride, bool last_level,
> >                                    int tlb_level)
> > @@ -460,12 +453,12 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
> >       pages = (end - start) >> PAGE_SHIFT;
> >
> >       if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
> > -             flush_tlb_mm(vma->vm_mm);
> > +             flush_tlb_mm(mm);
> >               return;
> >       }
> >
> >       dsb(ishst);
> > -     asid = ASID(vma->vm_mm);
> > +     asid = ASID(mm);
> >
> >       if (last_level)
> >               __flush_tlb_range_op(vale1is, start, pages, stride, asid,
> > @@ -474,7 +467,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
> >               __flush_tlb_range_op(vae1is, start, pages, stride, asid,
> >                                    tlb_level, true, lpa2_is_enabled());
> >
> > -     mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
> > +     mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
> >   }
> >
> >   static inline void __flush_tlb_range(struct vm_area_struct *vma,
> > @@ -482,7 +475,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
> >                                    unsigned long stride, bool last_level,
> >                                    int tlb_level)
> >   {
> > -     __flush_tlb_range_nosync(vma, start, end, stride,
> > +     __flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
> >                                last_level, tlb_level);
> >       dsb(ish);
> >   }
> > @@ -533,6 +526,15 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
> >       dsb(ish);
> >       isb();
> >   }
> > +
> > +static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> > +                                          struct mm_struct *mm,
> > +                                          unsigned long uaddr,
> > +                                          unsigned long size)
> > +{
> > +     __flush_tlb_range_nosync(mm, uaddr, uaddr + size,
> > +                              PAGE_SIZE, true, 3);
> > +}
> >   #endif
> >
> >   #endif
> > diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
> > index 55107d27d3f8..bcac4f55f9c1 100644
> > --- a/arch/arm64/mm/contpte.c
> > +++ b/arch/arm64/mm/contpte.c
> > @@ -335,7 +335,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
> >                * eliding the trailing DSB applies here.
> >                */
> >               addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
> > -             __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE,
> > +             __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE,
> >                                        PAGE_SIZE, true, 3);
> >       }
> >
> > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> > index 72e559934952..7f3ea687ce33 100644
> > --- a/arch/riscv/include/asm/tlbflush.h
> > +++ b/arch/riscv/include/asm/tlbflush.h
> > @@ -61,7 +61,8 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
> >   bool arch_tlbbatch_should_defer(struct mm_struct *mm);
> >   void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> >                              struct mm_struct *mm,
> > -                            unsigned long uaddr);
> > +                            unsigned long uaddr,
> > +                            unsigned long size);
>
> While we're at it, can we just convert this to the "two tabs"
> indentation starting on second parameter line" way of doing things? Same
> for all other cases. (at least in core code, if some arch wants their
> own weird rules on how to handle these things)

Sure.

>
> [...]
>
> >       struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
> >       int batch;
> > @@ -681,7 +682,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
> >       if (!pte_accessible(mm, pteval))
> >               return;
> >
> > -     arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
> > +     arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size);
> >       tlb_ubc->flush_required = true;
> >
> >       /*
> > @@ -757,7 +758,8 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
> >   }
> >   #else
> >   static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
> > -                                   unsigned long uaddr)
> > +                                   unsigned long uaddr,
> > +                                   unsigned long size)
>
> I'll note that mist tlb functions seem to consume start+end instead of
> start+size, like
>
> flush_tlb_mm_range()
> flush_tlb_kernel_range()
>
> So I'm wondering if this should be start+end instead of uaddr+size.

For some reason, I can’t recall why I originally named it "uaddr" instead
of "address" at:
https://lore.kernel.org/lkml/20220707125242.425242-4-21cnbao@gmail.com/

The name seems a bit odd now.

It's probably because the arm64 functions listed below are using "uaddr":

static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
 unsigned long uaddr);
static inline void flush_tlb_page(struct vm_area_struct *vma,
                                  unsigned long uaddr);

At that time, we only supported a single page for TLB deferred shootdown.

Regarding set_tlb_ubc_flush_pending(), I’m fine with either approach:
1. start, size
2. start, end

For option 1, it is eventually converted to (start, start + size) when calling
__flush_tlb_range_nosync().

But if we convert to (start, end), it seems to align with

static inline void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end);

So if there is no objection from Will, I will move to (start, end)
in v3.

>
> --
> Cheers,
>
> David / dhildenb
>

Thanks
Barry
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index bc94e036a26b..f34e4fab5aa2 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -322,13 +322,6 @@  static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
 	return true;
 }
 
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
-					     struct mm_struct *mm,
-					     unsigned long uaddr)
-{
-	__flush_tlb_page_nosync(mm, uaddr);
-}
-
 /*
  * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
  * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
@@ -448,7 +441,7 @@  static inline bool __flush_tlb_range_limit_excess(unsigned long start,
 	return false;
 }
 
-static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
+static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
 				     unsigned long start, unsigned long end,
 				     unsigned long stride, bool last_level,
 				     int tlb_level)
@@ -460,12 +453,12 @@  static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
 	pages = (end - start) >> PAGE_SHIFT;
 
 	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
-		flush_tlb_mm(vma->vm_mm);
+		flush_tlb_mm(mm);
 		return;
 	}
 
 	dsb(ishst);
-	asid = ASID(vma->vm_mm);
+	asid = ASID(mm);
 
 	if (last_level)
 		__flush_tlb_range_op(vale1is, start, pages, stride, asid,
@@ -474,7 +467,7 @@  static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
 		__flush_tlb_range_op(vae1is, start, pages, stride, asid,
 				     tlb_level, true, lpa2_is_enabled());
 
-	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
+	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
 }
 
 static inline void __flush_tlb_range(struct vm_area_struct *vma,
@@ -482,7 +475,7 @@  static inline void __flush_tlb_range(struct vm_area_struct *vma,
 				     unsigned long stride, bool last_level,
 				     int tlb_level)
 {
-	__flush_tlb_range_nosync(vma, start, end, stride,
+	__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
 				 last_level, tlb_level);
 	dsb(ish);
 }
@@ -533,6 +526,15 @@  static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
 	dsb(ish);
 	isb();
 }
+
+static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+					     struct mm_struct *mm,
+					     unsigned long uaddr,
+					     unsigned long size)
+{
+	__flush_tlb_range_nosync(mm, uaddr, uaddr + size,
+				 PAGE_SIZE, true, 3);
+}
 #endif
 
 #endif
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 55107d27d3f8..bcac4f55f9c1 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -335,7 +335,7 @@  int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
 		 * eliding the trailing DSB applies here.
 		 */
 		addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
-		__flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE,
+		__flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE,
 					 PAGE_SIZE, true, 3);
 	}
 
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 72e559934952..7f3ea687ce33 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -61,7 +61,8 @@  void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
 bool arch_tlbbatch_should_defer(struct mm_struct *mm);
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 			       struct mm_struct *mm,
-			       unsigned long uaddr);
+			       unsigned long uaddr,
+			       unsigned long size);
 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 9b6e86ce3867..aeda64a36d50 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -187,7 +187,8 @@  bool arch_tlbbatch_should_defer(struct mm_struct *mm)
 
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 			       struct mm_struct *mm,
-			       unsigned long uaddr)
+			       unsigned long uaddr,
+			       unsigned long size)
 {
 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 }
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 69e79fff41b8..4b62a6329b8f 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -279,7 +279,8 @@  static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 
 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 					     struct mm_struct *mm,
-					     unsigned long uaddr)
+					     unsigned long uaddr,
+					     unsigned long size)
 {
 	inc_mm_tlb_gen(mm);
 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
diff --git a/mm/rmap.c b/mm/rmap.c
index de6b8c34e98c..365112af5291 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -672,7 +672,8 @@  void try_to_unmap_flush_dirty(void)
 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
 
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
-				      unsigned long uaddr)
+				      unsigned long uaddr,
+				      unsigned long size)
 {
 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 	int batch;
@@ -681,7 +682,7 @@  static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
 	if (!pte_accessible(mm, pteval))
 		return;
 
-	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
+	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size);
 	tlb_ubc->flush_required = true;
 
 	/*
@@ -757,7 +758,8 @@  void flush_tlb_batched_pending(struct mm_struct *mm)
 }
 #else
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
-				      unsigned long uaddr)
+				      unsigned long uaddr,
+				      unsigned long size)
 {
 }
 
@@ -1792,7 +1794,7 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 				 */
 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-				set_tlb_ubc_flush_pending(mm, pteval, address);
+				set_tlb_ubc_flush_pending(mm, pteval, address, PAGE_SIZE);
 			} else {
 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 			}
@@ -2164,7 +2166,7 @@  static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
 				 */
 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-				set_tlb_ubc_flush_pending(mm, pteval, address);
+				set_tlb_ubc_flush_pending(mm, pteval, address, PAGE_SIZE);
 			} else {
 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 			}