diff mbox series

[v3,1/2] arm64: tlbflush: add __flush_tlb_range_limit_excess()

Message ID 20240923131351.713304-2-wangkefeng.wang@huawei.com (mailing list archive)
State New, archived
Headers show
Series arm64: tlbflush: optimize flush tlb kernel range | expand

Commit Message

Kefeng Wang Sept. 23, 2024, 1:13 p.m. UTC
The __flush_tlb_range_limit_excess() helper will be used when
flush tlb kernel range soon.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 arch/arm64/include/asm/tlbflush.h | 27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

Comments

Anshuman Khandual Sept. 24, 2024, 3:46 a.m. UTC | #1
On 9/23/24 18:43, Kefeng Wang wrote:
> The __flush_tlb_range_limit_excess() helper will be used when
> flush tlb kernel range soon.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  arch/arm64/include/asm/tlbflush.h | 27 ++++++++++++++++++---------
>  1 file changed, 18 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index 95fbc8c05607..5f5e7d1f2e7d 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -431,6 +431,23 @@ do {									\
>  #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
>  	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
>  
> +static inline bool __flush_tlb_range_limit_excess(unsigned long start,
> +		unsigned long end, unsigned long pages, unsigned long stride)
> +{
> +	/*
> +	 * When the system does not support TLB range based flush
> +	 * operation, (MAX_DVM_OPS - 1) pages can be handled. But
> +	 * with TLB range based operation, MAX_TLBI_RANGE_PAGES
> +	 * pages can be handled.
> +	 */
> +	if ((!system_supports_tlb_range() &&
> +	     (end - start) >= (MAX_DVM_OPS * stride)) ||
> +	    pages > MAX_TLBI_RANGE_PAGES)
> +		return true;
> +
> +	return false;
> +}
> +
>  static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>  				     unsigned long start, unsigned long end,
>  				     unsigned long stride, bool last_level,
> @@ -442,15 +459,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>  	end = round_up(end, stride);
>  	pages = (end - start) >> PAGE_SHIFT;
>  
> -	/*
> -	 * When not uses TLB range ops, we can handle up to
> -	 * (MAX_DVM_OPS - 1) pages;
> -	 * When uses TLB range ops, we can handle up to
> -	 * MAX_TLBI_RANGE_PAGES pages.
> -	 */
> -	if ((!system_supports_tlb_range() &&
> -	     (end - start) >= (MAX_DVM_OPS * stride)) ||
> -	    pages > MAX_TLBI_RANGE_PAGES) {
> +	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
>  		flush_tlb_mm(vma->vm_mm);
>  		return;
>  	}

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 95fbc8c05607..5f5e7d1f2e7d 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -431,6 +431,23 @@  do {									\
 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
 	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
 
+static inline bool __flush_tlb_range_limit_excess(unsigned long start,
+		unsigned long end, unsigned long pages, unsigned long stride)
+{
+	/*
+	 * When the system does not support TLB range based flush
+	 * operation, (MAX_DVM_OPS - 1) pages can be handled. But
+	 * with TLB range based operation, MAX_TLBI_RANGE_PAGES
+	 * pages can be handled.
+	 */
+	if ((!system_supports_tlb_range() &&
+	     (end - start) >= (MAX_DVM_OPS * stride)) ||
+	    pages > MAX_TLBI_RANGE_PAGES)
+		return true;
+
+	return false;
+}
+
 static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
 				     unsigned long start, unsigned long end,
 				     unsigned long stride, bool last_level,
@@ -442,15 +459,7 @@  static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
 	end = round_up(end, stride);
 	pages = (end - start) >> PAGE_SHIFT;
 
-	/*
-	 * When not uses TLB range ops, we can handle up to
-	 * (MAX_DVM_OPS - 1) pages;
-	 * When uses TLB range ops, we can handle up to
-	 * MAX_TLBI_RANGE_PAGES pages.
-	 */
-	if ((!system_supports_tlb_range() &&
-	     (end - start) >= (MAX_DVM_OPS * stride)) ||
-	    pages > MAX_TLBI_RANGE_PAGES) {
+	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
 		flush_tlb_mm(vma->vm_mm);
 		return;
 	}