Message ID | 20200915104643.2543892-11-ascull@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce separate nVHE hyp context | expand |
Hi Andrew, On Tue, 15 Sep 2020 11:46:34 +0100, Andrew Scull <ascull@google.com> wrote: > > If the guest context is loaded when a panic is triggered, restore the > hyp context so e.g. the shadow call stack works when hyp_panic() is > called and SP_EL0 is valid when the host's panic() is called. > > Use the hyp context's __hyp_running_vcpu field to track when hyp > transitions to and from the guest vcpu so the exception handlers know > whether the context needs to be restored. > > Signed-off-by: Andrew Scull <ascull@google.com> > --- > arch/arm64/include/asm/kvm_asm.h | 10 ++++++++++ > arch/arm64/kvm/hyp/entry.S | 24 ++++++++++++++++++++++++ > arch/arm64/kvm/hyp/hyp-entry.S | 5 ++--- > arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +++- > arch/arm64/kvm/hyp/nvhe/host.S | 5 +++++ > 5 files changed, 44 insertions(+), 4 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h > index fe51c06d480d..4df2bd8882bc 100644 > --- a/arch/arm64/include/asm/kvm_asm.h > +++ b/arch/arm64/include/asm/kvm_asm.h > @@ -236,6 +236,16 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; > ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] > .endm > > +.macro get_loaded_vcpu vcpu, ctxt > + hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu > + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] > +.endm > + > +.macro set_loaded_vcpu vcpu, ctxt, tmp > + hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp > + str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] > +.endm > + > /* > * KVM extable for unexpected exceptions. > * In the same format _asm_extable, but output to a different section so that > diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S > index 38cca690a6ff..4787fc82790c 100644 > --- a/arch/arm64/kvm/hyp/entry.S > +++ b/arch/arm64/kvm/hyp/entry.S > @@ -86,6 +86,8 @@ alternative_else_nop_endif > ret > > 1: > + set_loaded_vcpu x0, x1, x2 > + > add x29, x0, #VCPU_CONTEXT > > // Macro ptrauth_switch_to_guest format: > @@ -116,6 +118,26 @@ alternative_else_nop_endif > eret > sb > > +SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) > + // x2-x29,lr: vcpu regs > + // vcpu x0-x1 on the stack > + > + // If the hyp context is loaded, go straight to hyp_panic > + get_loaded_vcpu x0, x1 > + cbz x0, hyp_panic > + > + // The hyp context is saved so make sure it is restored to allow > + // hyp_panic to run at hyp and, subsequently, panic to run in the host. > + // This makes use of __guest_exit to avoid duplication but sets the > + // return address to tail call into hyp_panic. As a side effect, the > + // current state is saved to the guest context but it will only be > + // accurate if the guest had been completely restored. > + hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1 > + adr x1, hyp_panic > + str x1, [x0, #CPU_XREG_OFFSET(30)] > + > + get_vcpu_ptr x1, x0 > + > SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) > // x0: return code > // x1: vcpu > @@ -163,6 +185,8 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) > // Now restore the hyp regs > restore_callee_saved_regs x2 > > + set_loaded_vcpu xzr, x1, x2 > + > alternative_if ARM64_HAS_RAS_EXTN > // If we have the RAS extensions we can consume a pending error > // without an unmask-SError and isb. The ESB-instruction consumed any > diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S > index f92489250dfc..bc9f53df46f5 100644 > --- a/arch/arm64/kvm/hyp/hyp-entry.S > +++ b/arch/arm64/kvm/hyp/hyp-entry.S > @@ -145,7 +145,7 @@ el2_error: > eret > sb > > -.macro invalid_vector label, target = hyp_panic > +.macro invalid_vector label, target = __guest_exit_panic > .align 2 > SYM_CODE_START(\label) > b \target > @@ -186,10 +186,9 @@ check_preamble_length 661b, 662b > .macro invalid_vect target > .align 7 > 661: > - b \target > nop > + stp x0, x1, [sp, #-16]! > 662: > - ldp x0, x1, [sp], #16 > b \target > > check_preamble_length 661b, 662b > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index afe714056b97..821721b78ad9 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -509,6 +509,7 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) > > static inline void __kvm_unexpected_el2_exception(void) > { > + extern char __guest_exit_panic[]; > unsigned long addr, fixup; > struct exception_table_entry *entry, *end; > unsigned long elr_el2 = read_sysreg(elr_el2); > @@ -529,7 +530,8 @@ static inline void __kvm_unexpected_el2_exception(void) > return; > } > > - hyp_panic(); > + /* Trigger a panic after restoring the hyp context. */ > + write_sysreg(__guest_exit_panic, elr_el2); > } > > #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ > diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S > index da21fddcef75..9ab7814e6114 100644 > --- a/arch/arm64/kvm/hyp/nvhe/host.S > +++ b/arch/arm64/kvm/hyp/nvhe/host.S > @@ -75,6 +75,11 @@ SYM_FUNC_END(__hyp_do_panic) > > .macro invalid_host_vect > .align 7 > + /* If a guest is loaded, panic out of it. */ > + stp x0, x1, [sp, #-16]! > + get_loaded_vcpu x0, x1 > + cbnz x0, __guest_exit_panic > + add sp, sp, #16 > b hyp_panic > .endm Given that we have switched vectors when entering the guest, is this only to deal with the lack of an ISB when performing the VBAR_EL2 update? Thanks, M. > > -- > 2.28.0.618.gf4bc123cb7-goog > >
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index fe51c06d480d..4df2bd8882bc 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -236,6 +236,16 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] .endm +.macro get_loaded_vcpu vcpu, ctxt + hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] +.endm + +.macro set_loaded_vcpu vcpu, ctxt, tmp + hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp + str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] +.endm + /* * KVM extable for unexpected exceptions. * In the same format _asm_extable, but output to a different section so that diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 38cca690a6ff..4787fc82790c 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -86,6 +86,8 @@ alternative_else_nop_endif ret 1: + set_loaded_vcpu x0, x1, x2 + add x29, x0, #VCPU_CONTEXT // Macro ptrauth_switch_to_guest format: @@ -116,6 +118,26 @@ alternative_else_nop_endif eret sb +SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) + // x2-x29,lr: vcpu regs + // vcpu x0-x1 on the stack + + // If the hyp context is loaded, go straight to hyp_panic + get_loaded_vcpu x0, x1 + cbz x0, hyp_panic + + // The hyp context is saved so make sure it is restored to allow + // hyp_panic to run at hyp and, subsequently, panic to run in the host. + // This makes use of __guest_exit to avoid duplication but sets the + // return address to tail call into hyp_panic. As a side effect, the + // current state is saved to the guest context but it will only be + // accurate if the guest had been completely restored. + hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1 + adr x1, hyp_panic + str x1, [x0, #CPU_XREG_OFFSET(30)] + + get_vcpu_ptr x1, x0 + SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // x0: return code // x1: vcpu @@ -163,6 +185,8 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // Now restore the hyp regs restore_callee_saved_regs x2 + set_loaded_vcpu xzr, x1, x2 + alternative_if ARM64_HAS_RAS_EXTN // If we have the RAS extensions we can consume a pending error // without an unmask-SError and isb. The ESB-instruction consumed any diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index f92489250dfc..bc9f53df46f5 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -145,7 +145,7 @@ el2_error: eret sb -.macro invalid_vector label, target = hyp_panic +.macro invalid_vector label, target = __guest_exit_panic .align 2 SYM_CODE_START(\label) b \target @@ -186,10 +186,9 @@ check_preamble_length 661b, 662b .macro invalid_vect target .align 7 661: - b \target nop + stp x0, x1, [sp, #-16]! 662: - ldp x0, x1, [sp], #16 b \target check_preamble_length 661b, 662b diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index afe714056b97..821721b78ad9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -509,6 +509,7 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __kvm_unexpected_el2_exception(void) { + extern char __guest_exit_panic[]; unsigned long addr, fixup; struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); @@ -529,7 +530,8 @@ static inline void __kvm_unexpected_el2_exception(void) return; } - hyp_panic(); + /* Trigger a panic after restoring the hyp context. */ + write_sysreg(__guest_exit_panic, elr_el2); } #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index da21fddcef75..9ab7814e6114 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -75,6 +75,11 @@ SYM_FUNC_END(__hyp_do_panic) .macro invalid_host_vect .align 7 + /* If a guest is loaded, panic out of it. */ + stp x0, x1, [sp, #-16]! + get_loaded_vcpu x0, x1 + cbnz x0, __guest_exit_panic + add sp, sp, #16 b hyp_panic .endm
If the guest context is loaded when a panic is triggered, restore the hyp context so e.g. the shadow call stack works when hyp_panic() is called and SP_EL0 is valid when the host's panic() is called. Use the hyp context's __hyp_running_vcpu field to track when hyp transitions to and from the guest vcpu so the exception handlers know whether the context needs to be restored. Signed-off-by: Andrew Scull <ascull@google.com> --- arch/arm64/include/asm/kvm_asm.h | 10 ++++++++++ arch/arm64/kvm/hyp/entry.S | 24 ++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp-entry.S | 5 ++--- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +++- arch/arm64/kvm/hyp/nvhe/host.S | 5 +++++ 5 files changed, 44 insertions(+), 4 deletions(-)