diff mbox series

[08/10] KVM: VMX: Add anti-retpoline accessors for RIP and RSP

Message ID 20200502043234.12481-9-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Misc anti-retpoline optimizations | expand

Commit Message

Sean Christopherson May 2, 2020, 4:32 a.m. UTC
Add VMX specific accessors for RIP and RSP that are used if and only if
CONFIG_RETPOLINE=y to avoid bouncing through kvm_x86_ops.cache_reg() and
taking the associated retpoline hit.  This eliminates a retpoline in the
vast majority of exits by avoiding the RIP read needed to skip the
emulated instruction.  This also saves two retpolines on nested VM-Exits
as both RIP and RSP need to be saved from vmcs02 to vmcs12.

Make the accessors dependent on CONFIG_RETPOLINE so that they can be
easily ripped out if/when the kernel gains support for static calls.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/nested.c |  6 +++---
 arch/x86/kvm/vmx/vmx.c    |  6 +++---
 arch/x86/kvm/vmx/vmx.h    | 28 ++++++++++++++++++++++++++++
 3 files changed, 34 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 3b4f1408b4e1..a7639818b814 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3943,8 +3943,8 @@  static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
 	vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
 
-	vmcs12->guest_rsp = kvm_rsp_read(vcpu);
-	vmcs12->guest_rip = kvm_rip_read(vcpu);
+	vmcs12->guest_rsp = vmx_rsp_read(vcpu);
+	vmcs12->guest_rip = vmx_rip_read(vcpu);
 	vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
 
 	vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
@@ -5854,7 +5854,7 @@  bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
 	exit_intr_info = vmx_get_intr_info(vcpu);
 	exit_qual = vmx_get_exit_qual(vcpu);
 
-	trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual,
+	trace_kvm_nested_vmexit(vmx_rip_read(vcpu), exit_reason, exit_qual,
 				vmx->idt_vectoring_info, exit_intr_info,
 				vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
 				KVM_ISA_VMX);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0cb0c347de04..d826ac541eed 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1569,7 +1569,7 @@  static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	 */
 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
 	    to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
-		rip = kvm_rip_read(vcpu);
+		rip = vmx_rip_read(vcpu);
 		rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
 		kvm_rip_write(vcpu, rip);
 	} else {
@@ -2185,7 +2185,7 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	return ret;
 }
 
-static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 {
 	unsigned long guest_owned_bits;
 
@@ -4750,7 +4750,7 @@  static int handle_exception_nmi(struct kvm_vcpu *vcpu)
 		vmx->vcpu.arch.event_exit_inst_len =
 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
-		rip = kvm_rip_read(vcpu);
+		rip = vmx_rip_read(vcpu);
 		kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
 		kvm_run->debug.arch.exception = ex_no;
 		break;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 5f3f141d7254..63baa0d5fe41 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -500,6 +500,34 @@  static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
 	return &(to_vmx(vcpu)->pi_desc);
 }
 
+#ifdef CONFIG_RETPOLINE
+void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+static __always_inline unsigned long vmx_register_read(struct kvm_vcpu *vcpu,
+						       enum kvm_reg reg)
+{
+	BUILD_BUG_ON(!__builtin_constant_p(reg));
+	BUILD_BUG_ON(reg != VCPU_REGS_RIP && reg != VCPU_REGS_RSP);
+
+	if (!kvm_register_is_available(vcpu, reg))
+		vmx_cache_reg(vcpu, reg);
+
+	return vcpu->arch.regs[reg];
+}
+
+static inline unsigned long vmx_rip_read(struct kvm_vcpu *vcpu)
+{
+	return vmx_register_read(vcpu, VCPU_REGS_RIP);
+}
+
+static inline unsigned long vmx_rsp_read(struct kvm_vcpu *vcpu)
+{
+	return vmx_register_read(vcpu, VCPU_REGS_RSP);
+}
+#else
+#define vmx_rip_read kvm_rip_read
+#define vmx_rsp_read kvm_rsp_read
+#endif
+
 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);