@@ -89,6 +89,36 @@
__tlbi(op, arg); \
} while(0)
+#define __tlbi_user_level(op, addr, level) \
+ do { \
+ u64 arg = addr; \
+ \
+ if (!arm64_kernel_unmapped_at_el0()) \
+ break; \
+ \
+ if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
+ level) { \
+ u64 ttl = level; \
+ \
+ switch (PAGE_SIZE) { \
+ case SZ_4K: \
+ ttl |= 1 << 2; \
+ break; \
+ case SZ_16K: \
+ ttl |= 2 << 2; \
+ break; \
+ case SZ_64K: \
+ ttl |= 3 << 2; \
+ break; \
+ } \
+ \
+ arg &= ~TLBI_TTL_MASK; \
+ arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
+ } \
+ \
+ __tlbi(op, (arg) | USER_ASID_FLAG); \
+ } while (0)
+
/*
* TLB Invalidation
* ================
@@ -190,8 +220,8 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst);
- __tlbi(vale1is, addr);
- __tlbi_user(vale1is, addr);
+ __tlbi_level(vale1is, addr, 0);
+ __tlbi_user_level(vale1is, addr, 0);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -231,11 +261,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += stride) {
if (last_level) {
- __tlbi(vale1is, addr);
- __tlbi_user(vale1is, addr);
+ __tlbi_level(vale1is, addr, 0);
+ __tlbi_user_level(vale1is, addr, 0);
} else {
- __tlbi(vae1is, addr);
- __tlbi_user(vae1is, addr);
+ __tlbi_level(vae1is, addr, 0);
+ __tlbi_user_level(vae1is, addr, 0);
}
}
dsb(ish);
Add a level-hinted parameter to __tlbi_user, which only gets used if ARMv8.4-TTL gets detected. ARMv8.4-TTL provides the TTL field in tlbi instruction to indicate the level of translation table walk holding the leaf entry for the address that is being invalidated. This patch set the default level value to 0. Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com> --- arch/arm64/include/asm/tlbflush.h | 42 ++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 6 deletions(-)