Message ID | 20220222165212.2005066-6-kaleshsingh@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: Hypervisor stack enhancements | expand |
On Tue, Feb 22, 2022 at 08:51:06AM -0800, Kalesh Singh wrote: > From: Quentin Perret <qperret@google.com> > > The asm entry code in the kernel uses a trick to check if VMAP'd stacks > have overflowed by aligning them at THREAD_SHIFT * 2 granularity and > checking the SP's THREAD_SHIFT bit. > > Protected KVM will soon make use of a similar trick to detect stack > overflows, so factor out the asm code in a re-usable macro. > > Signed-off-by: Quentin Perret <qperret@google.com> > [Kalesh - Resolve minor conflicts] > Signed-off-by: Kalesh Singh <kaleshsingh@google.com> > --- > arch/arm64/include/asm/assembler.h | 11 +++++++++++ > arch/arm64/kernel/entry.S | 7 +------ > 2 files changed, 12 insertions(+), 6 deletions(-) > > diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h > index e8bd0af0141c..ad40eb0eee83 100644 > --- a/arch/arm64/include/asm/assembler.h > +++ b/arch/arm64/include/asm/assembler.h > @@ -850,4 +850,15 @@ alternative_endif > > #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ > > +/* > + * Test whether the SP has overflowed, without corrupting a GPR. > + */ > +.macro test_sp_overflow shift, label > + add sp, sp, x0 // sp' = sp + x0 > + sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp > + tbnz x0, #\shift, \label > + sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 > + sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp > +.endm I'm a little unhappy about factoring this out, since it's not really self-contained and leaves sp and x0 partially-swapped when it branches to the label. You can't really make that clear with comments on the macro, and you need comments at each use-sire, so I'd ratehr we just open-coded a copy of this. > + > #endif /* __ASM_ASSEMBLER_H */ > diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S > index 772ec2ecf488..ce99ee30c77e 100644 > --- a/arch/arm64/kernel/entry.S > +++ b/arch/arm64/kernel/entry.S > @@ -53,15 +53,10 @@ alternative_else_nop_endif > sub sp, sp, #PT_REGS_SIZE > #ifdef CONFIG_VMAP_STACK > /* > - * Test whether the SP has overflowed, without corrupting a GPR. > * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) > * should always be zero. > */ > - add sp, sp, x0 // sp' = sp + x0 > - sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp > - tbnz x0, #THREAD_SHIFT, 0f > - sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 > - sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp > + test_sp_overflow THREAD_SHIFT, 0f > b el\el\ht\()_\regsize\()_\label > > 0: Further to my comment above, immediately after this we have: /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ msr tpidr_el0, x0 /* Recover the original x0 value and stash it in tpidrro_el0 */ sub x0, sp, x0 msr tpidrro_el0, x0 ... which is really surprising with the `test_sp_overflow` macro because it's not clear that modifies x0 and sp in this way. Thanks, Mark. ... > -- > 2.35.1.473.g83b2b277ed-goog >
On Tue, Feb 22, 2022 at 10:32 AM Mark Rutland <mark.rutland@arm.com> wrote: > > On Tue, Feb 22, 2022 at 08:51:06AM -0800, Kalesh Singh wrote: > > From: Quentin Perret <qperret@google.com> > > > > The asm entry code in the kernel uses a trick to check if VMAP'd stacks > > have overflowed by aligning them at THREAD_SHIFT * 2 granularity and > > checking the SP's THREAD_SHIFT bit. > > > > Protected KVM will soon make use of a similar trick to detect stack > > overflows, so factor out the asm code in a re-usable macro. > > > > Signed-off-by: Quentin Perret <qperret@google.com> > > [Kalesh - Resolve minor conflicts] > > Signed-off-by: Kalesh Singh <kaleshsingh@google.com> > > --- > > arch/arm64/include/asm/assembler.h | 11 +++++++++++ > > arch/arm64/kernel/entry.S | 7 +------ > > 2 files changed, 12 insertions(+), 6 deletions(-) > > > > diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h > > index e8bd0af0141c..ad40eb0eee83 100644 > > --- a/arch/arm64/include/asm/assembler.h > > +++ b/arch/arm64/include/asm/assembler.h > > @@ -850,4 +850,15 @@ alternative_endif > > > > #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ > > > > +/* > > + * Test whether the SP has overflowed, without corrupting a GPR. > > + */ > > +.macro test_sp_overflow shift, label > > + add sp, sp, x0 // sp' = sp + x0 > > + sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp > > + tbnz x0, #\shift, \label > > + sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 > > + sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp > > +.endm > > I'm a little unhappy about factoring this out, since it's not really > self-contained and leaves sp and x0 partially-swapped when it branches > to the label. You can't really make that clear with comments on the > macro, and you need comments at each use-sire, so I'd ratehr we just > open-coded a copy of this. > > > + > > #endif /* __ASM_ASSEMBLER_H */ > > diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S > > index 772ec2ecf488..ce99ee30c77e 100644 > > --- a/arch/arm64/kernel/entry.S > > +++ b/arch/arm64/kernel/entry.S > > @@ -53,15 +53,10 @@ alternative_else_nop_endif > > sub sp, sp, #PT_REGS_SIZE > > #ifdef CONFIG_VMAP_STACK > > /* > > - * Test whether the SP has overflowed, without corrupting a GPR. > > * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) > > * should always be zero. > > */ > > - add sp, sp, x0 // sp' = sp + x0 > > - sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp > > - tbnz x0, #THREAD_SHIFT, 0f > > - sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 > > - sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp > > + test_sp_overflow THREAD_SHIFT, 0f > > b el\el\ht\()_\regsize\()_\label > > > > 0: > > Further to my comment above, immediately after this we have: > > /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ > msr tpidr_el0, x0 > > /* Recover the original x0 value and stash it in tpidrro_el0 */ > sub x0, sp, x0 > msr tpidrro_el0, x0 > > ... which is really surprising with the `test_sp_overflow` macro because > it's not clear that modifies x0 and sp in this way. Hi Mark, I agree the macro hides the fact that sp and x0 are left in an 'corrupt' state if the branch happens. Not a problem in this case but it could be misleading to new users. I'll remove this per your suggestion in the next version. Thanks, Kalesh > > Thanks, > Mark. > ... > > > -- > > 2.35.1.473.g83b2b277ed-goog > >
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e8bd0af0141c..ad40eb0eee83 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -850,4 +850,15 @@ alternative_endif #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ +/* + * Test whether the SP has overflowed, without corrupting a GPR. + */ +.macro test_sp_overflow shift, label + add sp, sp, x0 // sp' = sp + x0 + sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp + tbnz x0, #\shift, \label + sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 + sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp +.endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 772ec2ecf488..ce99ee30c77e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -53,15 +53,10 @@ alternative_else_nop_endif sub sp, sp, #PT_REGS_SIZE #ifdef CONFIG_VMAP_STACK /* - * Test whether the SP has overflowed, without corrupting a GPR. * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) * should always be zero. */ - add sp, sp, x0 // sp' = sp + x0 - sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp - tbnz x0, #THREAD_SHIFT, 0f - sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 - sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp + test_sp_overflow THREAD_SHIFT, 0f b el\el\ht\()_\regsize\()_\label 0: