@@ -271,6 +271,19 @@ extern u32 __kvm_get_mdcr_el2(void);
.macro set_loaded_vcpu vcpu, ctxt, tmp
adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+
+ add \tmp, \vcpu, #VCPU_CONTEXT
+ str \tmp, [\ctxt, #HOST_CONTEXT_CTXT]
+
+ add \tmp, \vcpu, #VCPU_HYPS
+ str \tmp, [\ctxt, #HOST_CONTEXT_HYPS]
+.endm
+
+.macro clear_loaded_vcpu ctxt, tmp
+ adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
+ str xzr, [\ctxt, #HOST_CONTEXT_VCPU]
+ str xzr, [\ctxt, #HOST_CONTEXT_CTXT]
+ str xzr, [\ctxt, #HOST_CONTEXT_HYPS]
.endm
.macro get_loaded_vcpu_ctxt vcpu, ctxt
@@ -228,14 +228,27 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
struct kvm_vcpu *__hyp_running_vcpu;
+ struct kvm_cpu_context *__hyp_running_ctxt;
+ struct vcpu_hyp_state *__hyp_running_hyps;
};
#define get_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_vcpu
-#define set_hyp_running_vcpu(ctxt, vcpu) (ctxt)->__hyp_running_vcpu = (vcpu)
+#define set_hyp_running_vcpu(host_ctxt, vcpu) do { \
+ struct kvm_vcpu *v = (vcpu); \
+ (host_ctxt)->__hyp_running_vcpu = v; \
+ if (vcpu) { \
+ (host_ctxt)->__hyp_running_ctxt = &v->arch.ctxt; \
+ (host_ctxt)->__hyp_running_hyps = &v->arch.hyp_state; \
+ } else { \
+ (host_ctxt)->__hyp_running_ctxt = NULL; \
+ (host_ctxt)->__hyp_running_hyps = NULL; \
+ }\
+} while(0)
+
#define is_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_vcpu
-#define get_hyp_running_ctxt(host_ctxt) (host_ctxt)->__hyp_running_vcpu ? &(host_ctxt)->__hyp_running_vcpu->arch.ctxt : NULL
-#define get_hyp_running_hyps(host_ctxt) (host_ctxt)->__hyp_running_vcpu ? &(host_ctxt)->__hyp_running_vcpu->arch.hyp_state : NULL
+#define get_hyp_running_ctxt(host_ctxt) (host_ctxt)->__hyp_running_ctxt
+#define get_hyp_running_hyps(host_ctxt) (host_ctxt)->__hyp_running_hyps
struct kvm_pmu_events {
u32 events_host;
@@ -117,6 +117,8 @@ int main(void)
DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ DEFINE(HOST_CONTEXT_CTXT, offsetof(struct kvm_cpu_context, __hyp_running_ctxt));
+ DEFINE(HOST_CONTEXT_HYPS, offsetof(struct kvm_cpu_context, __hyp_running_hyps));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
DEFINE(NVHE_INIT_MAIR_EL2, offsetof(struct kvm_nvhe_init_params, mair_el2));
DEFINE(NVHE_INIT_TCR_EL2, offsetof(struct kvm_nvhe_init_params, tcr_el2));
@@ -145,7 +145,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Now restore the hyp regs
restore_callee_saved_regs x2
- set_loaded_vcpu xzr, x2, x3
+ clear_loaded_vcpu x2, x3
alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
In order to prepare to remove __hyp_running_vcpu, add __hyp_running_ctxt and __hyp_running_hyps to access the running kvm_cpu_ctxt and the hyp_state, as well as their associated assembly offsets. These new fields are updated but not accessed yet. Their state is consistent with __hyp_running_vcpu. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/kvm_asm.h | 13 +++++++++++++ arch/arm64/include/asm/kvm_host.h | 19 ++++++++++++++++--- arch/arm64/kernel/asm-offsets.c | 2 ++ arch/arm64/kvm/hyp/entry.S | 2 +- 4 files changed, 32 insertions(+), 4 deletions(-)