diff mbox series

[RFC,v1,28/30] KVM: arm64: reduce scope of pVM fixup_guest_exit to hyp_state and kvm_cpu_ctxt

Message ID 20210924125359.2587041-29-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series Reduce scope of vcpu state at hyp by refactoring out state hyp needs | expand

Commit Message

Fuad Tabba Sept. 24, 2021, 12:53 p.m. UTC
Reduce the scope of fixup_guest_exit for protected VMs to only
need hyp_state and kvm_cpu_ctxt

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h | 23 +++++++++++++++++++----
 arch/arm64/kvm/hyp/nvhe/switch.c        |  7 ++-----
 arch/arm64/kvm/hyp/vhe/switch.c         |  3 +--
 3 files changed, 22 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 3ef429cfd9af..ea9571f712c6 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -423,11 +423,8 @@  static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
  * the guest, false when we should restore the host state and return to the
  * main run loop.
  */
-static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgic, u64 *exit_code)
+static inline bool _fixup_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgic, struct kvm_cpu_context *vcpu_ctxt, struct vcpu_hyp_state *vcpu_hyps, u64 *exit_code)
 {
-	struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
-	struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
-
 	if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
 		hyp_state_fault(vcpu_hyps).esr_el2 = read_sysreg_el2(SYS_ESR);
 
@@ -518,6 +515,24 @@  static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgi
 	return true;
 }
 
+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+	struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
+	struct vcpu_hyp_state *hyps = &vcpu->arch.hyp_state;
+	// TODO: create helper for getting VA
+	struct kvm *kvm = vcpu->kvm;
+
+	if (is_nvhe_hyp_code())
+		kvm = kern_hyp_va(kvm);
+
+	return _fixup_guest_exit(vcpu, &kvm->arch.vgic, ctxt, hyps, exit_code);
+}
+
+static inline bool fixup_pvm_guest_exit(struct kvm_vcpu *vcpu, struct vgic_dist *vgic, struct kvm_cpu_context *ctxt, struct vcpu_hyp_state *hyps, u64 *exit_code)
+{
+	return _fixup_guest_exit(vcpu, vgic, ctxt, hyps, exit_code);
+}
+
 static inline void __kvm_unexpected_el2_exception(void)
 {
 	extern char __guest_exit_panic[];
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index aa0dc4f0433b..1920aebbe49a 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -182,8 +182,6 @@  static int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_hyp_state *vcpu_hyps = &vcpu->arch.hyp_state;
 	struct kvm_cpu_context *vcpu_ctxt = &vcpu->arch.ctxt;
-	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-	struct vgic_dist *vgic = &kvm->arch.vgic;
 	struct kvm_cpu_context *host_ctxt;
 	struct kvm_cpu_context *guest_ctxt;
 	bool pmu_switch_needed;
@@ -245,7 +243,7 @@  static int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
 		exit_code = __guest_enter(guest_ctxt);
 
 		/* And we're baaack! */
-	} while (fixup_guest_exit(vcpu, vgic, &exit_code));
+	} while (fixup_guest_exit(vcpu, &exit_code));
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__sysreg32_save_state(vcpu);
@@ -285,7 +283,6 @@  static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
 	struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
 	struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-	struct vgic_dist *vgic = &kvm->arch.vgic;
 	struct kvm_cpu_context *host_ctxt;
 	struct kvm_cpu_context *guest_ctxt;
 	u64 exit_code;
@@ -325,7 +322,7 @@  static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
 		exit_code = __guest_enter(guest_ctxt);
 
 		/* And we're baaack! */
-	} while (fixup_guest_exit(vcpu, vgic, &exit_code));
+	} while (fixup_pvm_guest_exit(vcpu, &kvm->arch.vgic, vcpu_ctxt, vcpu_hyps, &exit_code));
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__timer_disable_traps();
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 7f926016cebe..4a05aff37325 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -110,7 +110,6 @@  static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
 	struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
-	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
 	struct kvm_cpu_context *host_ctxt;
 	struct kvm_cpu_context *guest_ctxt;
 	u64 exit_code;
@@ -148,7 +147,7 @@  static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 		exit_code = __guest_enter(guest_ctxt);
 
 		/* And we're baaack! */
-	} while (fixup_guest_exit(vcpu, vgic, &exit_code));
+	} while (fixup_guest_exit(vcpu, &exit_code));
 
 	sysreg_save_guest_state_vhe(guest_ctxt);