@@ -246,31 +246,18 @@ extern u32 __kvm_get_mdcr_el2(void);
add \reg, \reg, #HOST_DATA_CONTEXT
.endm
-.macro get_vcpu_ptr vcpu, ctxt
- get_host_ctxt \ctxt, \vcpu
- ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
-.endm
-
.macro get_vcpu_ctxt_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
- ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
- add \vcpu, \vcpu, #VCPU_CONTEXT
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_CTXT]
.endm
.macro get_vcpu_hyps_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
- ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
- add \vcpu, \vcpu, #VCPU_HYPS
-.endm
-
-.macro get_loaded_vcpu vcpu, ctxt
- adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
- ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_HYPS]
.endm
.macro set_loaded_vcpu vcpu, ctxt, tmp
adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
- str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
add \tmp, \vcpu, #VCPU_CONTEXT
str \tmp, [\ctxt, #HOST_CONTEXT_CTXT]
@@ -281,21 +268,18 @@ extern u32 __kvm_get_mdcr_el2(void);
.macro clear_loaded_vcpu ctxt, tmp
adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
- str xzr, [\ctxt, #HOST_CONTEXT_VCPU]
str xzr, [\ctxt, #HOST_CONTEXT_CTXT]
str xzr, [\ctxt, #HOST_CONTEXT_HYPS]
.endm
.macro get_loaded_vcpu_ctxt vcpu, ctxt
adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
- ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
- add \vcpu, \vcpu, #VCPU_CONTEXT
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_CTXT]
.endm
.macro get_loaded_vcpu_hyps vcpu, ctxt
adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
- ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
- add \vcpu, \vcpu, #VCPU_HYPS
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_HYPS]
.endm
/*
@@ -227,15 +227,12 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
- struct kvm_vcpu *__hyp_running_vcpu;
struct kvm_cpu_context *__hyp_running_ctxt;
struct vcpu_hyp_state *__hyp_running_hyps;
};
-#define get_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_vcpu
#define set_hyp_running_vcpu(host_ctxt, vcpu) do { \
struct kvm_vcpu *v = (vcpu); \
- (host_ctxt)->__hyp_running_vcpu = v; \
if (vcpu) { \
(host_ctxt)->__hyp_running_ctxt = &v->arch.ctxt; \
(host_ctxt)->__hyp_running_hyps = &v->arch.hyp_state; \
@@ -245,7 +242,7 @@ struct kvm_cpu_context {
}\
} while(0)
-#define is_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_vcpu
+#define is_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_ctxt
#define get_hyp_running_ctxt(host_ctxt) (host_ctxt)->__hyp_running_ctxt
#define get_hyp_running_hyps(host_ctxt) (host_ctxt)->__hyp_running_hyps
@@ -116,7 +116,6 @@ int main(void)
DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
- DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
DEFINE(HOST_CONTEXT_CTXT, offsetof(struct kvm_cpu_context, __hyp_running_ctxt));
DEFINE(HOST_CONTEXT_HYPS, offsetof(struct kvm_cpu_context, __hyp_running_hyps));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
@@ -293,7 +293,7 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
}
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
- u64 par, uintptr_t vcpu,
+ u64 par, uintptr_t vcpu_ctxt,
u64 far, u64 hpfar) {
u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr));
u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr;
@@ -333,6 +333,6 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
*/
kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
- panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
- spsr, elr, esr, far, hpfar, par, vcpu);
+ panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU_CTXT:%016lx\n",
+ spsr, elr, esr, far, hpfar, par, vcpu_ctxt);
}
@@ -87,7 +87,7 @@ SYM_FUNC_START(__hyp_do_panic)
/* Load the panic arguments into x0-7 */
mrs x0, esr_el2
- get_vcpu_ptr x4, x5
+ get_vcpu_ctxt_ptr x4, x5
mrs x5, far_el2
mrs x6, hpfar_el2
mov x7, xzr // Unused argument
@@ -272,14 +272,12 @@ void __noreturn hyp_panic(void)
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg_par();
struct kvm_cpu_context *host_ctxt;
- struct kvm_vcpu *vcpu;
struct vcpu_hyp_state *vcpu_hyps;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- vcpu = get_hyp_running_vcpu(host_ctxt);
vcpu_hyps = get_hyp_running_hyps(host_ctxt);
- if (vcpu) {
+ if (vcpu_hyps) {
__timer_disable_traps();
__deactivate_traps(vcpu_hyps);
__load_host_stage2();
@@ -203,20 +203,20 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
{
struct kvm_cpu_context *host_ctxt;
- struct kvm_vcpu *vcpu;
+ struct kvm_cpu_context *vcpu_ctxt;
struct vcpu_hyp_state *vcpu_hyps;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- vcpu = get_hyp_running_vcpu(host_ctxt);
+ vcpu_ctxt = get_hyp_running_ctxt(host_ctxt);
vcpu_hyps = get_hyp_running_hyps(host_ctxt);
__deactivate_traps(vcpu_hyps);
sysreg_restore_host_state_vhe(host_ctxt);
- panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n",
+ panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU_CTXT:%p\n",
spsr, elr,
read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
- read_sysreg(hpfar_el2), par, vcpu);
+ read_sysreg(hpfar_el2), par, vcpu_ctxt);
}
NOKPROBE_SYMBOL(__hyp_call_panic);
Transition code for to use the new hyp_running pointers. Everything is consistent, because all fields are in-sync. Remove __hyp_running_vcpu now that no one is using it. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/kvm_asm.h | 24 ++++-------------------- arch/arm64/include/asm/kvm_host.h | 5 +---- arch/arm64/kernel/asm-offsets.c | 1 - arch/arm64/kvm/handle_exit.c | 6 +++--- arch/arm64/kvm/hyp/nvhe/host.S | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 4 +--- arch/arm64/kvm/hyp/vhe/switch.c | 8 ++++---- 7 files changed, 14 insertions(+), 36 deletions(-)