diff mbox series

[3/4] mm: rmap: Extend tlbbatch APIs to fit new platforms

Message ID 20220707125242.425242-4-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series mm: arm64: bring up BATCHED_UNMAP_TLB_FLUSH | expand

Commit Message

Barry Song July 7, 2022, 12:52 p.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

Add vma and uaddr to tlbbatch APIs so that platforms like ARM64
are able to apply these two parameters on their specific hard-
ware features. For ARM64, this could be sending tlbi into hard-
ware queues without waiting for its completion.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 arch/x86/include/asm/tlbflush.h |  4 +++-
 mm/rmap.c                       | 12 ++++++++----
 2 files changed, 11 insertions(+), 5 deletions(-)

Comments

Dave Hansen July 7, 2022, 4:43 p.m. UTC | #1
On 7/7/22 05:52, Barry Song wrote:
>  static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
> -					struct mm_struct *mm)
> +					struct mm_struct *mm,
> +					struct vm_area_struct *vma,
> +					unsigned long uaddr)
>  {

It's not a huge deal, but could we pass 'vma' _instead_ of 'mm'?  The
implementations could then just use vma->vm_mm instead of the passed-in mm.
Barry Song July 7, 2022, 9:12 p.m. UTC | #2
On Fri, Jul 8, 2022 at 4:43 AM Dave Hansen <dave.hansen@intel.com> wrote:
>
> On 7/7/22 05:52, Barry Song wrote:
> >  static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
> > -                                     struct mm_struct *mm)
> > +                                     struct mm_struct *mm,
> > +                                     struct vm_area_struct *vma,
> > +                                     unsigned long uaddr)
> >  {
>
> It's not a huge deal, but could we pass 'vma' _instead_ of 'mm'?  The
> implementations could then just use vma->vm_mm instead of the passed-in mm.

Yes, Dave. Peter made the same suggestion in 4/4.
will get this fixed in v2.

Thanks
Barry
diff mbox series

Patch

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4af5579c7ef7..9fc48c103b31 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -251,7 +251,9 @@  static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 }
 
 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
-					struct mm_struct *mm)
+					struct mm_struct *mm,
+					struct vm_area_struct *vma,
+					unsigned long uaddr)
 {
 	inc_mm_tlb_gen(mm);
 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
diff --git a/mm/rmap.c b/mm/rmap.c
index d320c29a4ad8..2b5b740d0001 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -642,12 +642,14 @@  void try_to_unmap_flush_dirty(void)
 #define TLB_FLUSH_BATCH_PENDING_LARGE			\
 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
 
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
+				      struct vm_area_struct *vma,
+				      unsigned long uaddr)
 {
 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 	int batch, nbatch;
 
-	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
+	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm, vma, uaddr);
 	tlb_ubc->flush_required = true;
 
 	/*
@@ -737,7 +739,9 @@  void flush_tlb_batched_pending(struct mm_struct *mm)
 	}
 }
 #else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
+				      struct vm_area_struct *vma,
+				      unsigned long uaddr)
 {
 }
 
@@ -1600,7 +1604,7 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 				 */
 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval), vma, address);
 			} else {
 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 			}