@@ -232,6 +232,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
struct vcpu_hyp_state *vcpu_hyps = &hyp_state(vcpu);
struct kvm_cpu_context *vcpu_ctxt = &vcpu_ctxt(vcpu);
+ const bool is_protected = is_nvhe_hyp_code() && kvm_vm_is_protected(kern_hyp_va(vcpu->kvm));
bool sve_guest, sve_host;
u8 esr_ec;
u64 reg;
@@ -239,7 +240,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
if (!system_supports_fpsimd())
return false;
- if (system_supports_sve()) {
+ if (system_supports_sve() && !is_protected) {
sve_guest = hyp_state_has_sve(vcpu_hyps);
sve_host = hyp_state_flags(vcpu_hyps) & KVM_ARM64_HOST_SVE_IN_USE;
} else {
@@ -247,7 +248,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
sve_host = false;
}
- esr_ec = kvm_vcpu_trap_get_class(vcpu);
+ esr_ec = kvm_hyp_state_trap_get_class(vcpu_hyps);
if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
esr_ec != ESR_ELx_EC_SVE)
return false;
@@ -288,7 +288,6 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
struct vgic_dist *vgic = &kvm->arch.vgic;
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
- bool pmu_switch_needed;
u64 exit_code;
/*
@@ -306,29 +305,10 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
set_hyp_running_vcpu(host_ctxt, vcpu);
guest_ctxt = &vcpu->arch.ctxt;
- pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
-
__sysreg_save_state_nvhe(host_ctxt);
- /*
- * We must flush and disable the SPE buffer for nVHE, as
- * the translation regime(EL1&0) is going to be loaded with
- * that of the guest. And we must do this before we change the
- * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
- * before we load guest Stage1.
- */
- __debug_save_host_buffers_nvhe(vcpu);
kvm_adjust_pc(vcpu_ctxt, vcpu_hyps);
- /*
- * We must restore the 32-bit state before the sysregs, thanks
- * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
- *
- * Also, and in order to be able to deal with erratum #1319537 (A57)
- * and #1319367 (A72), we must ensure that all VM-related sysreg are
- * restored before we enable S2 translation.
- */
- __sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt);
__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
@@ -337,8 +317,6 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
__hyp_vgic_restore_state(vcpu);
__timer_enable_traps();
- __debug_switch_to_guest(vcpu);
-
do {
struct kvm_cpu_context *hyp_ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
set_hyp_running_vcpu(hyp_ctxt, vcpu);
@@ -350,7 +328,6 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
} while (fixup_guest_exit(vcpu, vgic, &exit_code));
__sysreg_save_state_nvhe(guest_ctxt);
- __sysreg32_save_state(vcpu);
__timer_disable_traps();
__hyp_vgic_save_state(vcpu);
@@ -359,19 +336,6 @@ static int __kvm_vcpu_run_pvm(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(host_ctxt);
- if (hyp_state_flags(vcpu_hyps) & KVM_ARM64_FP_ENABLED)
- __fpsimd_save_fpexc32(vcpu);
-
- __debug_switch_to_host(vcpu);
- /*
- * This must come after restoring the host sysregs, since a non-VHE
- * system may enable SPE here and make use of the TTBRs.
- */
- __debug_restore_host_buffers_nvhe(vcpu);
-
- if (pmu_switch_needed)
- __pmu_switch_to_host(host_ctxt);
-
/* Returning to host will clear PSR.I, remask PMR if needed */
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQOFF);
Remove code for unsupported features for protected VMs from __kvm_vcpu_run_pvm(). Do not run unsupported code (SVE) in __hyp_handle_fpsimd(). Enforcement of this is in the fixed features patch series [1]. The code removed or disabled is related to the following: - PMU - Debug - Arm32 - SPE - SVE [1] Link: https://lore.kernel.org/kvmarm/20210922124704.600087-1-tabba@google.com/T/#u Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/kvm/hyp/include/hyp/switch.h | 5 ++-- arch/arm64/kvm/hyp/nvhe/switch.c | 36 ------------------------- 2 files changed, 3 insertions(+), 38 deletions(-)