diff mbox series

[v3,3/4] mm: rmap: Extend tlbbatch APIs to fit new platforms

Message ID 20220822082120.8347-4-yangyicong@huawei.com (mailing list archive)
State Superseded
Headers show
Series mm: arm64: bring up BATCHED_UNMAP_TLB_FLUSH | expand

Commit Message

Yicong Yang Aug. 22, 2022, 8:21 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

Add uaddr to tlbbatch APIs so that platforms like ARM64 are
able to apply this on their specific hardware features. For
ARM64, this could be sending tlbi into hardware queues for
the page with this particular uaddr.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Mel Gorman <mgorman@suse.de>
Tested-by: Xin Hao <xhao@linux.alibaba.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
---
 arch/x86/include/asm/tlbflush.h |  3 ++-
 mm/rmap.c                       | 10 ++++++----
 2 files changed, 8 insertions(+), 5 deletions(-)

Comments

Kefeng Wang Aug. 24, 2022, 9:43 a.m. UTC | #1
On 2022/8/22 16:21, Yicong Yang wrote:
> From: Barry Song <v-songbaohua@oppo.com>
>
> Add uaddr to tlbbatch APIs so that platforms like ARM64 are
> able to apply this on their specific hardware features. For
> ARM64, this could be sending tlbi into hardware queues for
> the page with this particular uaddr.
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: Nadav Amit <namit@vmware.com>
> Cc: Mel Gorman <mgorman@suse.de>
> Tested-by: Xin Hao <xhao@linux.alibaba.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>   arch/x86/include/asm/tlbflush.h |  3 ++-
>   mm/rmap.c                       | 10 ++++++----
>   2 files changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index 8a497d902c16..5bd78ae55cd4 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -264,7 +264,8 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
>   }
>   
>   static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
> -					struct mm_struct *mm)
> +					struct mm_struct *mm,
> +					unsigned long uaddr)
>   {
>   	inc_mm_tlb_gen(mm);
>   	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> diff --git a/mm/rmap.c b/mm/rmap.c
> index a17a004550c6..7187a72b63b1 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -642,12 +642,13 @@ void try_to_unmap_flush_dirty(void)
>   #define TLB_FLUSH_BATCH_PENDING_LARGE			\
>   	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
>   
> -static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
> +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
> +				      unsigned long uaddr)
>   {
>   	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
>   	int batch, nbatch;
>   
> -	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
> +	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm, uaddr);
>   	tlb_ubc->flush_required = true;
>   
>   	/*
> @@ -725,7 +726,8 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
>   	}
>   }
>   #else
> -static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
> +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
> +				      unsigned long uaddr)
>   {
>   }
>   
> @@ -1587,7 +1589,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   				 */
>   				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
>   
> -				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
> +				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval), address);
>   			} else {
>   				pteval = ptep_clear_flush(vma, address, pvmw.pte);
>   			}
Anshuman Khandual Sept. 9, 2022, 4:51 a.m. UTC | #2
On 8/22/22 13:51, Yicong Yang wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> Add uaddr to tlbbatch APIs so that platforms like ARM64 are

I guess 'uaddr' refers to a virtual address from the process address
space itself ? Please be more specific.

> able to apply this on their specific hardware features. For
> ARM64, this could be sending tlbi into hardware queues for
> the page with this particular uaddr.

This subject line and commit message here are misleading. The patch
adds an address argument to arch callback arch_tlbbatch_add_mm() as
arm64 platform could use that to perform the TLB flush batching ?

This patch can be folded into the next one, so that the requirement
for an additional argument 'uaddr' in the arch callback will be self
evident. OR if this is going to be a preparatory patch, then it must
explain how 'uaddr' argument is helpful on platforms like arm64 while
performing TLB flush batching. But TBH, just folding it to next patch
explains the context better.

> 
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: Nadav Amit <namit@vmware.com>
> Cc: Mel Gorman <mgorman@suse.de>
> Tested-by: Xin Hao <xhao@linux.alibaba.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
> ---
>  arch/x86/include/asm/tlbflush.h |  3 ++-
>  mm/rmap.c                       | 10 ++++++----
>  2 files changed, 8 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index 8a497d902c16..5bd78ae55cd4 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -264,7 +264,8 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
>  }
>  
>  static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
> -					struct mm_struct *mm)
> +					struct mm_struct *mm,
> +					unsigned long uaddr)
>  {
>  	inc_mm_tlb_gen(mm);
>  	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> diff --git a/mm/rmap.c b/mm/rmap.c
> index a17a004550c6..7187a72b63b1 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -642,12 +642,13 @@ void try_to_unmap_flush_dirty(void)
>  #define TLB_FLUSH_BATCH_PENDING_LARGE			\
>  	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
>  
> -static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
> +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
> +				      unsigned long uaddr)
>  {
>  	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
>  	int batch, nbatch;
>  
> -	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
> +	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm, uaddr);
>  	tlb_ubc->flush_required = true;
>  
>  	/*
> @@ -725,7 +726,8 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
>  	}
>  }
>  #else
> -static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
> +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
> +				      unsigned long uaddr)
>  {
>  }
>  
> @@ -1587,7 +1589,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>  				 */
>  				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
>  
> -				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
> +				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval), address);
>  			} else {
>  				pteval = ptep_clear_flush(vma, address, pvmw.pte);
>  			}
Barry Song Sept. 9, 2022, 5:25 a.m. UTC | #3
On Fri, Sep 9, 2022 at 4:51 PM Anshuman Khandual
<anshuman.khandual@arm.com> wrote:
>
>
>
> On 8/22/22 13:51, Yicong Yang wrote:
> > From: Barry Song <v-songbaohua@oppo.com>
> >
> > Add uaddr to tlbbatch APIs so that platforms like ARM64 are
>
> I guess 'uaddr' refers to a virtual address from the process address
> space itself ? Please be more specific.
>
> > able to apply this on their specific hardware features. For
> > ARM64, this could be sending tlbi into hardware queues for
> > the page with this particular uaddr.
>
> This subject line and commit message here are misleading. The patch
> adds an address argument to arch callback arch_tlbbatch_add_mm() as
> arm64 platform could use that to perform the TLB flush batching ?
>
> This patch can be folded into the next one, so that the requirement
> for an additional argument 'uaddr' in the arch callback will be self
> evident. OR if this is going to be a preparatory patch, then it must
> explain how 'uaddr' argument is helpful on platforms like arm64 while
> performing TLB flush batching. But TBH, just folding it to next patch
> explains the context better.

The intention was to keep each change small, while still functionally
independent,
so that it was easier to be reviewed.

but yes, i agree in this particular case, if we fold this one to the
last one, we are
actually able to make the modification self-evident while the new patch seems
still small.

>
> >
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Ingo Molnar <mingo@redhat.com>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > Cc: Nadav Amit <namit@vmware.com>
> > Cc: Mel Gorman <mgorman@suse.de>
> > Tested-by: Xin Hao <xhao@linux.alibaba.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
> > ---
> >  arch/x86/include/asm/tlbflush.h |  3 ++-
> >  mm/rmap.c                       | 10 ++++++----
> >  2 files changed, 8 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> > index 8a497d902c16..5bd78ae55cd4 100644
> > --- a/arch/x86/include/asm/tlbflush.h
> > +++ b/arch/x86/include/asm/tlbflush.h
> > @@ -264,7 +264,8 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
> >  }
> >
> >  static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
> > -                                     struct mm_struct *mm)
> > +                                     struct mm_struct *mm,
> > +                                     unsigned long uaddr)
> >  {
> >       inc_mm_tlb_gen(mm);
> >       cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index a17a004550c6..7187a72b63b1 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -642,12 +642,13 @@ void try_to_unmap_flush_dirty(void)
> >  #define TLB_FLUSH_BATCH_PENDING_LARGE                        \
> >       (TLB_FLUSH_BATCH_PENDING_MASK / 2)
> >
> > -static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
> > +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
> > +                                   unsigned long uaddr)
> >  {
> >       struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
> >       int batch, nbatch;
> >
> > -     arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
> > +     arch_tlbbatch_add_mm(&tlb_ubc->arch, mm, uaddr);
> >       tlb_ubc->flush_required = true;
> >
> >       /*
> > @@ -725,7 +726,8 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
> >       }
> >  }
> >  #else
> > -static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
> > +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
> > +                                   unsigned long uaddr)
> >  {
> >  }
> >
> > @@ -1587,7 +1589,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> >                                */
> >                               pteval = ptep_get_and_clear(mm, address, pvmw.pte);
> >
> > -                             set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
> > +                             set_tlb_ubc_flush_pending(mm, pte_dirty(pteval), address);
> >                       } else {
> >                               pteval = ptep_clear_flush(vma, address, pvmw.pte);
> >                       }

Thanks
Barry
diff mbox series

Patch

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 8a497d902c16..5bd78ae55cd4 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -264,7 +264,8 @@  static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 }
 
 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
-					struct mm_struct *mm)
+					struct mm_struct *mm,
+					unsigned long uaddr)
 {
 	inc_mm_tlb_gen(mm);
 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
diff --git a/mm/rmap.c b/mm/rmap.c
index a17a004550c6..7187a72b63b1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -642,12 +642,13 @@  void try_to_unmap_flush_dirty(void)
 #define TLB_FLUSH_BATCH_PENDING_LARGE			\
 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
 
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
+				      unsigned long uaddr)
 {
 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 	int batch, nbatch;
 
-	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
+	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm, uaddr);
 	tlb_ubc->flush_required = true;
 
 	/*
@@ -725,7 +726,8 @@  void flush_tlb_batched_pending(struct mm_struct *mm)
 	}
 }
 #else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
+				      unsigned long uaddr)
 {
 }
 
@@ -1587,7 +1589,7 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 				 */
 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval), address);
 			} else {
 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 			}