@@ -423,11 +423,8 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
* the guest, false when we should restore the host state and return to the
* main run loop.
*/
-static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgic, u64 *exit_code)
+static inline bool _fixup_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgic, struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps, u64 *exit_code)
{
- struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
- struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
-
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
hyp_state_fault(vcpu_hyps).esr_el2 = read_sysreg_el2(SYS_ESR);
@@ -518,6 +515,24 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgi
return true;
}
+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
+ struct vcpu_hyp_state *hyps = &vcpu->arch.hyp_state;
+ // TODO: create helper for getting VA
+ struct kvm *kvm = vcpu->kvm;
+
+ if (is_nvhe_hyp_code())
+ kvm = kern_hyp_va(kvm);
+
+ return _fixup_guest_exit(vcpu, &kvm->arch.vgic, ctxt, hyps, exit_code);
+}
+
+static inline bool fixup_pvm_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgic, struct kvm_cpu_context *ctxt, struct vcpu_hyp_state *hyps, u64 *exit_code)
+{
+ return _fixup_guest_exit(vcpu, vgic, ctxt, hyps, exit_code);
+}
+
static inline void __kvm_unexpected_el2_exception(void)
{
extern char __guest_exit_panic[];
@@ -182,8 +182,6 @@ static int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct vcpu_hyp_state *vcpu_hyps = &vcpu->arch.hyp_state;
struct kvm_cpu_context *vcpu_ctxt = &vcpu->arch.ctxt;
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- struct vgic_dist *vgic = &kvm->arch.vgic;
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
bool pmu_switch_needed;
@@ -245,7 +243,7 @@ static int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
exit_code = __guest_enter(guest_ctxt);
/* And we're baaack! */
- } while (fixup_guest_exit(vcpu, vgic, &exit_code));
+ } while (fixup_guest_exit(vcpu, &exit_code));
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
@@ -285,7 +283,6 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- struct vgic_dist *vgic = &kvm->arch.vgic;
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
@@ -325,7 +322,7 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
exit_code = __guest_enter(guest_ctxt);
/* And we're baaack! */
- } while (fixup_guest_exit(vcpu, vgic, &exit_code));
+ } while (fixup_pvm_guest_exit(vcpu, &kvm->arch.vgic, vcpu_ctxt, vcpu_hyps, &exit_code));
__sysreg_save_state_nvhe(guest_ctxt);
__timer_disable_traps();
@@ -110,7 +110,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
- struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
@@ -148,7 +147,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
exit_code = __guest_enter(guest_ctxt);
/* And we're baaack! */
- } while (fixup_guest_exit(vcpu, vgic, &exit_code));
+ } while (fixup_guest_exit(vcpu, &exit_code));
sysreg_save_guest_state_vhe(guest_ctxt);
Reduce the scope of fixup_guest_exit for protected VMs to only need hyp_state and kvm_cpu_ctxt Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/kvm/hyp/include/hyp/switch.h | 23 +++++++++++++++++++---- arch/arm64/kvm/hyp/nvhe/switch.c | 7 ++----- arch/arm64/kvm/hyp/vhe/switch.c | 3 +-- 3 files changed, 22 insertions(+), 11 deletions(-)