@@ -201,11 +201,60 @@ static bool kvm_hyp_handle_tlbi_el1(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
+static bool kvm_hyp_handle_ecv(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ u64 esr, val;
+
+ /*
+ * Having FEAT_ECV allows for a better quality of timer emulation.
+ * However, this comes at a huge cost in terms of traps. Try and
+ * satisfy the reads without returning to the kernel if we can.
+ */
+ if (!cpus_have_final_cap(ARM64_HAS_ECV))
+ return false;
+
+ if (!vcpu_has_nv2(vcpu))
+ return false;
+
+ esr = kvm_vcpu_get_esr(vcpu);
+ if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) != ESR_ELx_SYS64_ISS_DIR_READ)
+ return false;
+
+ switch (esr_sys64_to_sysreg(esr)) {
+ case SYS_CNTP_CTL_EL02:
+ case SYS_CNTP_CTL_EL0:
+ val = __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
+ break;
+ case SYS_CNTP_CVAL_EL02:
+ case SYS_CNTP_CVAL_EL0:
+ val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ break;
+ case SYS_CNTV_CTL_EL02:
+ case SYS_CNTV_CTL_EL0:
+ val = __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
+ break;
+ case SYS_CNTV_CVAL_EL02:
+ case SYS_CNTV_CVAL_EL0:
+ val = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
+ break;
+ default:
+ return false;
+ }
+
+ vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
+ __kvm_skip_instr(vcpu);
+
+ return true;
+}
+
static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_tlbi_el1(vcpu, exit_code))
return true;
+ if (kvm_hyp_handle_ecv(vcpu, exit_code))
+ return true;
+
return kvm_hyp_handle_sysreg(vcpu, exit_code);
}
Although FEAT_ECV allows us to correctly enable the timers, it also reduces performances pretty badly (a L2 guest doing a lot of virtio emulated in L1 userspace results in a 30% degradation). Mitigate this by emulating the CTL/CVAL register reads in the inner run loop, without returning to the general kernel. This halves the overhead described above. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/hyp/vhe/switch.c | 49 +++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+)