@@ -12,6 +12,7 @@
#include <asm/alternative.h>
#include <asm/sysreg.h>
+DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
#define read_sysreg_elx(r,nvh,vh) \
@@ -89,7 +90,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
#endif
-u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+u64 __guest_enter(struct kvm_vcpu *vcpu);
void __noreturn hyp_panic(void);
#ifdef __KVM_NVHE_HYPERVISOR__
@@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
+KVM_NVHE_ALIAS(kvm_hyp_ctxt);
KVM_NVHE_ALIAS(kvm_hyp_vector);
KVM_NVHE_ALIAS(kvm_vgic_global_state);
@@ -47,6 +47,7 @@ __asm__(".arch_extension virt");
#endif
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
+DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
@@ -1542,6 +1543,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
struct kvm_host_data *cpu_data;
+ struct kvm_cpu_context *hyp_ctxt;
unsigned long *vector;
cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
@@ -1552,6 +1554,14 @@ static int init_hyp_mode(void)
goto out_err;
}
+ hyp_ctxt = per_cpu_ptr(&kvm_hyp_ctxt, cpu);
+ err = create_hyp_mappings(hyp_ctxt, hyp_ctxt + 1, PAGE_HYP);
+
+ if (err) {
+ kvm_err("Cannot map hyp context: %d\n", err);
+ goto out_err;
+ }
+
vector = per_cpu_ptr(&kvm_hyp_vector, cpu);
err = create_hyp_mappings(vector, vector + 1, PAGE_HYP);
@@ -57,15 +57,15 @@
.endm
/*
- * u64 __guest_enter(struct kvm_vcpu *vcpu,
- * struct kvm_cpu_context *host_ctxt);
+ * u64 __guest_enter(struct kvm_vcpu *vcpu);
*/
SYM_FUNC_START(__guest_enter)
// x0: vcpu
- // x1: host context
- // x2-x17: clobbered by macros
+ // x1-x17: clobbered by macros
// x29: guest context
+ hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
+
// Store the host regs
save_callee_saved_regs x1
@@ -148,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Store the guest's sp_el0
save_sp_el0 x1, x2
- get_host_ctxt x2, x3
+ hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
// Macro ptrauth_switch_to_guest format:
// ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
@@ -381,7 +381,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
return false;
- ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
__ptrauth_save_key(ctxt, APIA);
__ptrauth_save_key(ctxt, APIB);
__ptrauth_save_key(ctxt, APDA);
@@ -209,7 +209,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
do {
/* Jump in the fire! */
- exit_code = __guest_enter(vcpu, host_ctxt);
+ exit_code = __guest_enter(vcpu);
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
@@ -135,7 +135,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
do {
/* Jump in the fire! */
- exit_code = __guest_enter(vcpu, host_ctxt);
+ exit_code = __guest_enter(vcpu);
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
During __guest_enter, save and restore from a new hyp context rather than the host context. This is preparation for separation of the hyp and host context in nVHE. Signed-off-by: Andrew Scull <ascull@google.com> --- arch/arm64/include/asm/kvm_hyp.h | 3 ++- arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kvm/arm.c | 10 ++++++++++ arch/arm64/kvm/hyp/entry.S | 10 +++++----- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 7 files changed, 21 insertions(+), 9 deletions(-)