diff mbox series

[RESEND,v5,3/6] arm64: Add tlbi_user_level TLB invalidation helper

Message ID 20200625080314.230-4-yezhenyu2@huawei.com (mailing list archive)
State New, archived
Headers show
Series arm64: tlb: add support for TTL feature | expand

Commit Message

Zhenyu Ye June 25, 2020, 8:03 a.m. UTC
Add a level-hinted parameter to __tlbi_user, which only gets used
if ARMv8.4-TTL gets detected.

ARMv8.4-TTL provides the TTL field in tlbi instruction to indicate
the level of translation table walk holding the leaf entry for the
address that is being invalidated.

This patch set the default level value of flush_tlb_range() to 0,
which will be updated in future patches.  And set the ttl value of
flush_tlb_page_nosync() to 3 because it is only called to flush a
single pte page.

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/tlbflush.h | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

Comments

Catalin Marinas July 9, 2020, 4:48 p.m. UTC | #1
On Thu, Jun 25, 2020 at 04:03:11PM +0800, Zhenyu Ye wrote:
> @@ -189,8 +195,9 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
>  	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
>  
>  	dsb(ishst);
> -	__tlbi(vale1is, addr);
> -	__tlbi_user(vale1is, addr);
> +	/* This function is only called on a small page */
> +	__tlbi_level(vale1is, addr, 3);
> +	__tlbi_user_level(vale1is, addr, 3);
>  }

Actually, that's incorrect. It was ok in v2 of your patches when I
suggested to drop level 0, just leave the function unchanged but I
missed that you updated it to pass level 3.

pmdp_set_access_flags -> ptep_set_access_flags ->
flush_tlb_fix_spurious_fault -> flush_tlb_page -> flush_tlb_page_nosync.
Zhenyu Ye July 10, 2020, 1:20 a.m. UTC | #2
Hi Catalin,

On 2020/7/10 0:48, Catalin Marinas wrote:
> On Thu, Jun 25, 2020 at 04:03:11PM +0800, Zhenyu Ye wrote:
>> @@ -189,8 +195,9 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
>>  	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
>>  
>>  	dsb(ishst);
>> -	__tlbi(vale1is, addr);
>> -	__tlbi_user(vale1is, addr);
>> +	/* This function is only called on a small page */
>> +	__tlbi_level(vale1is, addr, 3);
>> +	__tlbi_user_level(vale1is, addr, 3);
>>  }
> 
> Actually, that's incorrect. It was ok in v2 of your patches when I
> suggested to drop level 0, just leave the function unchanged but I
> missed that you updated it to pass level 3.
> 
> pmdp_set_access_flags -> ptep_set_access_flags ->
> flush_tlb_fix_spurious_fault -> flush_tlb_page -> flush_tlb_page_nosync.

How do you want to fix this error? I notice that this series have been applied
to arm64 (for-next/tlbi).  Should I send a new series based on arm64 (for-next/tlbi)?

Thanks,
Zhenyu
Catalin Marinas July 10, 2020, 8:53 a.m. UTC | #3
On Fri, Jul 10, 2020 at 09:20:59AM +0800, Zhenyu Ye wrote:
> On 2020/7/10 0:48, Catalin Marinas wrote:
> > On Thu, Jun 25, 2020 at 04:03:11PM +0800, Zhenyu Ye wrote:
> >> @@ -189,8 +195,9 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
> >>  	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
> >>  
> >>  	dsb(ishst);
> >> -	__tlbi(vale1is, addr);
> >> -	__tlbi_user(vale1is, addr);
> >> +	/* This function is only called on a small page */
> >> +	__tlbi_level(vale1is, addr, 3);
> >> +	__tlbi_user_level(vale1is, addr, 3);
> >>  }
> > 
> > Actually, that's incorrect. It was ok in v2 of your patches when I
> > suggested to drop level 0, just leave the function unchanged but I
> > missed that you updated it to pass level 3.
> > 
> > pmdp_set_access_flags -> ptep_set_access_flags ->
> > flush_tlb_fix_spurious_fault -> flush_tlb_page -> flush_tlb_page_nosync.
> 
> How do you want to fix this error? I notice that this series have been applied
> to arm64 (for-next/tlbi).  Should I send a new series based on arm64 (for-next/tlbi)?

Just a patch on top with a Fixes: tag.

Thanks.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8adbd6fd8489..bfb58e62c127 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -88,6 +88,12 @@ 
 	__tlbi(op,  arg);					\
 } while (0)
 
+#define __tlbi_user_level(op, arg, level) do {				\
+	if (arm64_kernel_unmapped_at_el0())				\
+		__tlbi_level(op, (arg | USER_ASID_FLAG), level);	\
+} while (0)
+
+
 /*
  *	TLB Invalidation
  *	================
@@ -189,8 +195,9 @@  static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
 
 	dsb(ishst);
-	__tlbi(vale1is, addr);
-	__tlbi_user(vale1is, addr);
+	/* This function is only called on a small page */
+	__tlbi_level(vale1is, addr, 3);
+	__tlbi_user_level(vale1is, addr, 3);
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -230,11 +237,11 @@  static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi(vale1is, addr);
-			__tlbi_user(vale1is, addr);
+			__tlbi_level(vale1is, addr, 0);
+			__tlbi_user_level(vale1is, addr, 0);
 		} else {
-			__tlbi(vae1is, addr);
-			__tlbi_user(vae1is, addr);
+			__tlbi_level(vae1is, addr, 0);
+			__tlbi_user_level(vae1is, addr, 0);
 		}
 	}
 	dsb(ish);