Message ID | 1521666172-2494-1-git-send-email-pelcan@codeaurora.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 21/03/18 21:02, Philip Elcan wrote: > Bits [47:44] of the TLBI register operand are RES0 for instructions that > require a VA, per the ARM ARM spec, so TLBI operations should avoid writing > non-zero values to these bits. If we're going to start sanitising addresses to respect RES0 bits, then we should probably do it properly to cope with the cases where bits 63:47 are also RES0, and others - I guess we never actually try to do something like VAE1IS with a kernel VA, but AFAICS that would still be busted even after this patch. Robin. > Signed-off-by: Philip Elcan <pelcan@codeaurora.org> > --- > arch/arm64/include/asm/tlbflush.h | 16 ++++++++++------ > 1 file changed, 10 insertions(+), 6 deletions(-) > > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h > index 9e82dd7..dbd22a9 100644 > --- a/arch/arm64/include/asm/tlbflush.h > +++ b/arch/arm64/include/asm/tlbflush.h > @@ -60,6 +60,9 @@ > __tlbi(op, (arg) | USER_ASID_FLAG); \ > } while (0) > > +/* This macro masks out RES0 bits in the TLBI operand */ > +#define __TLBI_VADDR(addr) (addr & ~GENMASK_ULL(47, 44)) > + > /* > * TLB Management > * ============== > @@ -128,7 +131,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm) > static inline void flush_tlb_page(struct vm_area_struct *vma, > unsigned long uaddr) > { > - unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); > + unsigned long addr = __TLBI_VADDR(uaddr >> 12) | > + (ASID(vma->vm_mm) << 48); > > dsb(ishst); > __tlbi(vale1is, addr); > @@ -154,8 +158,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, > return; > } > > - start = asid | (start >> 12); > - end = asid | (end >> 12); > + start = asid | __TLBI_VADDR(start >> 12); > + end = asid | __TLBI_VADDR(end >> 12); > > dsb(ishst); > for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { > @@ -185,8 +189,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end > return; > } > > - start >>= 12; > - end >>= 12; > + start = __TLBI_VADDR(start >> 12); > + end = __TLBI_VADDR(end >> 12); > > dsb(ishst); > for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) > @@ -202,7 +206,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end > static inline void __flush_tlb_pgtable(struct mm_struct *mm, > unsigned long uaddr) > { > - unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); > + unsigned long addr = __TLBI_VADDR(uaddr >> 12) | (ASID(mm) << 48); > > __tlbi(vae1is, addr); > __tlbi_user(vae1is, addr); >
On Wed, Mar 21, 2018 at 05:02:52PM -0400, Philip Elcan wrote: > Bits [47:44] of the TLBI register operand are RES0 for instructions that > require a VA, per the ARM ARM spec, so TLBI operations should avoid writing > non-zero values to these bits. > > Signed-off-by: Philip Elcan <pelcan@codeaurora.org> > --- > arch/arm64/include/asm/tlbflush.h | 16 ++++++++++------ > 1 file changed, 10 insertions(+), 6 deletions(-) > > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h > index 9e82dd7..dbd22a9 100644 > --- a/arch/arm64/include/asm/tlbflush.h > +++ b/arch/arm64/include/asm/tlbflush.h > @@ -60,6 +60,9 @@ > __tlbi(op, (arg) | USER_ASID_FLAG); \ > } while (0) > > +/* This macro masks out RES0 bits in the TLBI operand */ > +#define __TLBI_VADDR(addr) (addr & ~GENMASK_ULL(47, 44)) If we're going to mask the address bits, it would be simpler to keep the valid bits than to clear the invalid bits. i.e. #define __TLBI_VADDR(addr) (addr & GENMASK_ULL(43, 0)) Maybe we want a helper that does all of the addr / asid shifting and masking, so we do that in one place rather than spreading it across all helpers, e.g. #define __tlbi_addr(addr, asid) \ ({ \ unsigned long __ta = (addr) >> 12; \ __ta &= GENMASK_ULL(43, 0); \ __ta |= (asid) << 48; \ __ta; \ }) Thanks, Mark.
On 3/26/2018 6:02 AM, Mark Rutland wrote: > On Wed, Mar 21, 2018 at 05:02:52PM -0400, Philip Elcan wrote: >> Bits [47:44] of the TLBI register operand are RES0 for instructions that >> require a VA, per the ARM ARM spec, so TLBI operations should avoid writing >> non-zero values to these bits. >> >> Signed-off-by: Philip Elcan <pelcan@codeaurora.org> >> --- >> arch/arm64/include/asm/tlbflush.h | 16 ++++++++++------ >> 1 file changed, 10 insertions(+), 6 deletions(-) >> >> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h >> index 9e82dd7..dbd22a9 100644 >> --- a/arch/arm64/include/asm/tlbflush.h >> +++ b/arch/arm64/include/asm/tlbflush.h >> @@ -60,6 +60,9 @@ >> __tlbi(op, (arg) | USER_ASID_FLAG); \ >> } while (0) >> >> +/* This macro masks out RES0 bits in the TLBI operand */ >> +#define __TLBI_VADDR(addr) (addr & ~GENMASK_ULL(47, 44)) > > If we're going to mask the address bits, it would be simpler to keep the > valid bits than to clear the invalid bits. i.e. > > #define __TLBI_VADDR(addr) (addr & GENMASK_ULL(43, 0)) > > Maybe we want a helper that does all of the addr / asid shifting and > masking, so we do that in one place rather than spreading it across all > helpers, e.g. > > #define __tlbi_addr(addr, asid) \ > ({ \ > unsigned long __ta = (addr) >> 12; \ > __ta &= GENMASK_ULL(43, 0); \ > __ta |= (asid) << 48; \ > __ta; \ > }) > > Thanks, > Mark. > That makes sense and addresses Robin's comment as well. I'll send out a v2 with your suggestion. Thanks, Philip
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 9e82dd7..dbd22a9 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -60,6 +60,9 @@ __tlbi(op, (arg) | USER_ASID_FLAG); \ } while (0) +/* This macro masks out RES0 bits in the TLBI operand */ +#define __TLBI_VADDR(addr) (addr & ~GENMASK_ULL(47, 44)) + /* * TLB Management * ============== @@ -128,7 +131,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); + unsigned long addr = __TLBI_VADDR(uaddr >> 12) | + (ASID(vma->vm_mm) << 48); dsb(ishst); __tlbi(vale1is, addr); @@ -154,8 +158,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, return; } - start = asid | (start >> 12); - end = asid | (end >> 12); + start = asid | __TLBI_VADDR(start >> 12); + end = asid | __TLBI_VADDR(end >> 12); dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { @@ -185,8 +189,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end return; } - start >>= 12; - end >>= 12; + start = __TLBI_VADDR(start >> 12); + end = __TLBI_VADDR(end >> 12); dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) @@ -202,7 +206,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long uaddr) { - unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); + unsigned long addr = __TLBI_VADDR(uaddr >> 12) | (ASID(mm) << 48); __tlbi(vae1is, addr); __tlbi_user(vae1is, addr);
Bits [47:44] of the TLBI register operand are RES0 for instructions that require a VA, per the ARM ARM spec, so TLBI operations should avoid writing non-zero values to these bits. Signed-off-by: Philip Elcan <pelcan@codeaurora.org> --- arch/arm64/include/asm/tlbflush.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)