diff mbox series

[25/29] KVM: VMX: Fold __vmx_vcpu_run() back into vmx_vcpu_run()

Message ID 20190118212037.24412-26-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Move vCPU-run to proper asm sub-routine | expand

Commit Message

Sean Christopherson Jan. 18, 2019, 9:20 p.m. UTC
...now that the code is no longer tagged with STACK_FRAME_NON_STANDARD.
Arguably, providing __vmx_vcpu_run() to break up vmx_vcpu_run() is
valuable on its own, but the previous split was purposely made as small
as possible to limit the effects STACK_FRAME_NON_STANDARD.  In other
words, the current split is now completely arbitrary and likely not the
most logical.

This also allows renaming ____vmx_vcpu_run() to __vmx_vcpu_run() in a
future patch.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 59 +++++++++++++++++++-----------------------
 1 file changed, 27 insertions(+), 32 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4f19e32f0922..c088d91da9fa 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6370,37 +6370,6 @@  void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
 	}
 }
 
-static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
-{
-	if (static_branch_unlikely(&vmx_l1d_should_flush))
-		vmx_l1d_flush(vcpu);
-
-	if (vcpu->arch.cr2 != read_cr2())
-		write_cr2(vcpu->arch.cr2);
-
-	asm(
-		"call ____vmx_vcpu_run \n\t"
-	      : ASM_CALL_CONSTRAINT, "=ebx"(vmx->fail),
-#ifdef CONFIG_X86_64
-		"=D"((int){0}), "=S"((int){0})
-	      : "D"(vmx), "S"(&vcpu->arch.regs),
-#else
-		"=a"((int){0}), "=d"((int){0})
-	      : "a"(vmx), "d"(&vcpu->arch.regs),
-#endif
-		"bl"(vmx->loaded_vmcs->launched)
-	      : "cc", "memory"
-#ifdef CONFIG_X86_64
-		, "rax", "rcx", "rdx"
-		, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-#else
-		, "ecx", "edi", "esi"
-#endif
-	      );
-
-	vcpu->arch.cr2 = read_cr2();
-}
-
 static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6468,7 +6437,33 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 */
 	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
-	__vmx_vcpu_run(vcpu, vmx);
+	if (static_branch_unlikely(&vmx_l1d_should_flush))
+		vmx_l1d_flush(vcpu);
+
+	if (vcpu->arch.cr2 != read_cr2())
+		write_cr2(vcpu->arch.cr2);
+
+	asm(
+		"call ____vmx_vcpu_run \n\t"
+	      : ASM_CALL_CONSTRAINT, "=ebx"(vmx->fail),
+#ifdef CONFIG_X86_64
+		"=D"((int){0}), "=S"((int){0})
+	      : "D"(vmx), "S"(&vcpu->arch.regs),
+#else
+		"=a"((int){0}), "=d"((int){0})
+	      : "a"(vmx), "d"(&vcpu->arch.regs),
+#endif
+		"bl"(vmx->loaded_vmcs->launched)
+	      : "cc", "memory"
+#ifdef CONFIG_X86_64
+		, "rax", "rcx", "rdx"
+		, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+		, "ecx", "edi", "esi"
+#endif
+	      );
+
+	vcpu->arch.cr2 = read_cr2();
 
 	/*
 	 * We do not use IBRS in the kernel. If this vCPU has used the