@@ -33,8 +33,8 @@ enum exception_type {
except_type_serror = 0x180,
};
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
-void kvm_skip_instr32(struct kvm_vcpu *vcpu);
+bool kvm_condition_valid32(const struct kvm_cpu_context *vcpu_ctxt, const struct vcpu_hyp_state *vcpu_hyps);
+void kvm_skip_instr32(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
@@ -162,14 +162,19 @@ static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
return ctxt_mode_is_32bit(&vcpu_ctxt(vcpu));
}
-static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+static __always_inline bool __kvm_condition_valid(const struct kvm_cpu_context *vcpu_ctxt, const struct vcpu_hyp_state *vcpu_hyps)
{
- if (vcpu_mode_is_32bit(vcpu))
- return kvm_condition_valid32(vcpu);
+ if (ctxt_mode_is_32bit(vcpu_ctxt))
+ return kvm_condition_valid32(vcpu_ctxt, vcpu_hyps);
return true;
}
+static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+ return __kvm_condition_valid(&vcpu->arch.ctxt, &hyp_state(vcpu));
+}
+
static inline void ctxt_set_thumb(struct kvm_cpu_context *ctxt)
{
*ctxt_cpsr(ctxt) |= PSR_AA32_T_BIT;
@@ -44,20 +44,18 @@ static const unsigned short cc_map[16] = {
/*
* Check if a trapped instruction should have been executed or not.
*/
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+bool kvm_condition_valid32(const struct kvm_cpu_context *vcpu_ctxt, const struct vcpu_hyp_state *vcpu_hyps)
{
- const struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- const struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
unsigned long cpsr;
u32 cpsr_cond;
int cond;
/* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_esr(vcpu) >> 30)
+ if (kvm_hyp_state_get_esr(vcpu_hyps) >> 30)
return true;
/* Is condition field valid? */
- cond = kvm_vcpu_get_condition(vcpu);
+ cond = kvm_hyp_state_get_condition(vcpu_hyps);
if (cond == 0xE)
return true;
@@ -125,15 +123,13 @@ static void kvm_adjust_itstate(struct kvm_cpu_context *vcpu_ctxt)
* kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
-void kvm_skip_instr32(struct kvm_vcpu *vcpu)
+void kvm_skip_instr32(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps)
{
- struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
u32 pc = *ctxt_pc(vcpu_ctxt);
bool is_thumb;
is_thumb = !!(*ctxt_cpsr(vcpu_ctxt) & PSR_AA32_T_BIT);
- if (is_thumb && !kvm_vcpu_trap_il_is32bit(vcpu))
+ if (is_thumb && !kvm_hyp_state_trap_il_is32bit(vcpu_hyps))
pc += 2;
else
pc += 4;
@@ -329,11 +329,9 @@ static void enter_exception32(struct kvm_cpu_context *vcpu_ctxt, u32 mode,
*ctxt_pc(vcpu_ctxt) = vect_offset;
}
-static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps)
{
- struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
- if (vcpu_el1_is_32bit(vcpu)) {
+ if (hyp_state_el1_is_32bit(vcpu_hyps)) {
switch (hyp_state_flags(vcpu_hyps) & KVM_ARM64_EXCEPT_MASK) {
case KVM_ARM64_EXCEPT_AA32_UND:
enter_exception32(vcpu_ctxt, PSR_AA32_MODE_UND, 4);
@@ -370,16 +368,19 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
* Adjust the guest PC (and potentially exception state) depending on
* flags provided by the emulation code.
*/
-void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+void kvm_adjust_pc(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps)
{
- struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
if (hyp_state_flags(vcpu_hyps) & KVM_ARM64_PENDING_EXCEPTION) {
- kvm_inject_exception(vcpu);
+ kvm_inject_exception(vcpu_ctxt, vcpu_hyps);
hyp_state_flags(vcpu_hyps) &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (hyp_state_flags(vcpu_hyps) & KVM_ARM64_INCREMENT_PC) {
- kvm_skip_instr(vcpu);
+ kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
hyp_state_flags(vcpu_hyps) &= ~KVM_ARM64_INCREMENT_PC;
}
}
+
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+ kvm_adjust_pc(&vcpu_ctxt(vcpu), &hyp_state(vcpu));
+}
@@ -13,12 +13,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
+static inline void kvm_skip_instr(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps)
{
- struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
if (ctxt_mode_is_32bit(vcpu_ctxt)) {
- kvm_skip_instr32(vcpu);
+ kvm_skip_instr32(vcpu_ctxt, vcpu_hyps);
} else {
*ctxt_pc(vcpu_ctxt) += 4;
*ctxt_cpsr(vcpu_ctxt) &= ~PSR_BTYPE_MASK;
@@ -32,14 +30,12 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
* Skip an instruction which has been emulated at hyp while most guest sysregs
* are live.
*/
-static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
+static inline void __kvm_skip_instr(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps)
{
- struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
*ctxt_pc(vcpu_ctxt) = read_sysreg_el2(SYS_ELR);
ctxt_gp_regs(vcpu_ctxt)->pstate = read_sysreg_el2(SYS_SPSR);
- kvm_skip_instr(vcpu);
+ kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
write_sysreg_el2(ctxt_gp_regs(vcpu_ctxt)->pstate, SYS_SPSR);
write_sysreg_el2(*ctxt_pc(vcpu_ctxt), SYS_ELR);
@@ -54,4 +50,6 @@ static inline void kvm_skip_host_instr(void)
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
}
+void kvm_adjust_pc(struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps);
+
#endif
@@ -350,7 +350,7 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return false;
}
- __kvm_skip_instr(vcpu);
+ __kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
return true;
}
@@ -204,7 +204,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);
- __kvm_adjust_pc(vcpu);
+ kvm_adjust_pc(vcpu_ctxt, vcpu_hyps);
/*
* We must restore the 32-bit state before the sysregs, thanks
@@ -55,13 +55,13 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
/* Reject anything but a 32bit access */
if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
- __kvm_skip_instr(vcpu);
+ __kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
return -1;
}
/* Not aligned? Don't bother */
if (fault_ipa & 3) {
- __kvm_skip_instr(vcpu);
+ __kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
return -1;
}
@@ -85,7 +85,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
ctxt_set_reg(vcpu_ctxt, rd, data);
}
- __kvm_skip_instr(vcpu);
+ __kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
return 1;
}
@@ -1086,7 +1086,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
esr = kvm_vcpu_get_esr(vcpu);
if (ctxt_mode_is_32bit(vcpu_ctxt)) {
if (!kvm_condition_valid(vcpu)) {
- __kvm_skip_instr(vcpu);
+ __kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
return 1;
}
@@ -1198,7 +1198,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
rt = kvm_vcpu_sys_get_rt(vcpu);
fn(vcpu, vmcr, rt);
- __kvm_skip_instr(vcpu);
+ __kvm_skip_instr(vcpu_ctxt, vcpu_hyps);
return 1;
}
@@ -135,7 +135,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);
- __kvm_adjust_pc(vcpu);
+ kvm_adjust_pc(vcpu_ctxt, vcpu_hyps);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
__kvm_skip_instr, kvm_condition_valid, and __kvm_adjust_pc are passed the vcpu when all they need is the context as well as the hypervisor state. Refactor them to use these instead. These functions are called directly or indirectly in future patches from contexts that don't have access to the whole vcpu. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/kvm_emulate.h | 15 ++++++++++----- arch/arm64/kvm/hyp/aarch32.c | 14 +++++--------- arch/arm64/kvm/hyp/exception.c | 19 ++++++++++--------- arch/arm64/kvm/hyp/include/hyp/adjust_pc.h | 14 ++++++-------- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c | 6 +++--- arch/arm64/kvm/hyp/vgic-v3-sr.c | 4 ++-- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 9 files changed, 39 insertions(+), 39 deletions(-)