diff mbox series

[v3,29/33] KVM: VMX: Pass @launched to the vCPU-run asm via standard ABI regs

Message ID 20190125154120.19385-30-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Move vCPU-run to proper asm sub-routine | expand

Commit Message

Sean Christopherson Jan. 25, 2019, 3:41 p.m. UTC
...to prepare for making the sub-routine callable from C code.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/vmenter.S |  5 ++++-
 arch/x86/kvm/vmx/vmx.c     | 13 ++++++-------
 2 files changed, 10 insertions(+), 8 deletions(-)

Comments

Paolo Bonzini Jan. 30, 2019, 3:59 p.m. UTC | #1
On 25/01/19 16:41, Sean Christopherson wrote:
> +	/* Copy @launched to BL, _ASM_ARG3 is volatile. */

Another tiny tiny change,

        /*
         * Copy @launched to BL, _ASM_ARG3 is caller-save so
vmx_update_host_rsp
         * will overwrite it.
         */


Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 8b3d9e071095..3299fafbaa9b 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -87,7 +87,7 @@  ENDPROC(vmx_vmexit)
  * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
  * @vmx:	struct vcpu_vmx *
  * @regs:	unsigned long * (to guest registers)
- * %RBX:	VMCS launched status (non-zero indicates already launched)
+ * @launched:	%true if the VMCS has been launched
  *
  * Returns:
  *	%RBX is 0 on VM-Exit, 1 on VM-Fail
@@ -102,6 +102,9 @@  ENTRY(__vmx_vcpu_run)
 	 */
 	push %_ASM_ARG2
 
+	/* Copy @launched to BL, _ASM_ARG3 is volatile. */
+	mov %_ASM_ARG3B, %bl
+
 	/* Adjust RSP to account for the CALL to vmx_vmenter(). */
 	lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
 	call vmx_update_host_rsp
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0ddd08f9337c..70e9b1820cc9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6447,19 +6447,18 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"call __vmx_vcpu_run \n\t"
 	      : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
 #ifdef CONFIG_X86_64
-		"=D"((int){0}), "=S"((int){0})
-	      : "D"(vmx), "S"(&vcpu->arch.regs),
+		"=D"((int){0}), "=S"((int){0}), "=d"((int){0})
+	      : "D"(vmx), "S"(&vcpu->arch.regs), "d"(vmx->loaded_vmcs->launched)
 #else
-		"=a"((int){0}), "=d"((int){0})
-	      : "a"(vmx), "d"(&vcpu->arch.regs),
+		"=a"((int){0}), "=d"((int){0}), "=c"((int){0})
+	      : "a"(vmx), "d"(&vcpu->arch.regs), "c"(vmx->loaded_vmcs->launched)
 #endif
-		"b"(vmx->loaded_vmcs->launched)
 	      : "cc", "memory"
 #ifdef CONFIG_X86_64
-		, "rax", "rcx", "rdx"
+		, "rax", "rcx"
 		, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
 #else
-		, "ecx", "edi", "esi"
+		, "edi", "esi"
 #endif
 	      );