diff mbox series

[v3,30/33] KVM: VMX: Return VM-Fail from vCPU-run assembly via standard ABI reg

Message ID 20190125154120.19385-31-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Move vCPU-run to proper asm sub-routine | expand

Commit Message

Sean Christopherson Jan. 25, 2019, 3:41 p.m. UTC
...to prepare for making the assembly sub-routine callable from C code.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/vmenter.S | 16 ++++++++--------
 arch/x86/kvm/vmx/vmx.c     |  8 ++++----
 2 files changed, 12 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 3299fafbaa9b..246404b38b37 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -90,7 +90,7 @@  ENDPROC(vmx_vmexit)
  * @launched:	%true if the VMCS has been launched
  *
  * Returns:
- *	%RBX is 0 on VM-Exit, 1 on VM-Fail
+ *	0 on VM-Exit, 1 on VM-Fail
  */
 ENTRY(__vmx_vcpu_run)
 	push %_ASM_BP
@@ -166,17 +166,17 @@  ENTRY(__vmx_vcpu_run)
 	mov %r15, VCPU_R15(%_ASM_AX)
 #endif
 
-	/* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
-	xor %ebx, %ebx
+	/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
+	xor %eax, %eax
 
 	/*
-	 * Clear all general purpose registers except RSP and RBX to prevent
+	 * Clear all general purpose registers except RSP and RAX to prevent
 	 * speculative use of the guest's values, even those that are reloaded
 	 * via the stack.  In theory, an L1 cache miss when restoring registers
 	 * could lead to speculative execution with the guest's values.
 	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
-	 * free.  RSP and RBX are exempt as RSP is restored by hardware during
-	 * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
+	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
+	 * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
 	 */
 1:
 #ifdef CONFIG_X86_64
@@ -189,7 +189,7 @@  ENTRY(__vmx_vcpu_run)
 	xor %r14d, %r14d
 	xor %r15d, %r15d
 #endif
-	xor %eax, %eax
+	xor %ebx, %ebx
 	xor %ecx, %ecx
 	xor %edx, %edx
 	xor %esi, %esi
@@ -202,6 +202,6 @@  ENTRY(__vmx_vcpu_run)
 	ret
 
 	/* VM-Fail.  Out-of-line to avoid a taken Jcc after VM-Exit. */
-2:	mov $1, %ebx
+2:	mov $1, %eax
 	jmp 1b
 ENDPROC(__vmx_vcpu_run)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 70e9b1820cc9..73a1e656b123 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6445,20 +6445,20 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	asm(
 		"call __vmx_vcpu_run \n\t"
-	      : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
+	      : ASM_CALL_CONSTRAINT, "=a"(vmx->fail),
 #ifdef CONFIG_X86_64
 		"=D"((int){0}), "=S"((int){0}), "=d"((int){0})
 	      : "D"(vmx), "S"(&vcpu->arch.regs), "d"(vmx->loaded_vmcs->launched)
 #else
-		"=a"((int){0}), "=d"((int){0}), "=c"((int){0})
+		"=d"((int){0}), "=c"((int){0})
 	      : "a"(vmx), "d"(&vcpu->arch.regs), "c"(vmx->loaded_vmcs->launched)
 #endif
 	      : "cc", "memory"
 #ifdef CONFIG_X86_64
-		, "rax", "rcx"
+		, "rbx", "rcx"
 		, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
 #else
-		, "edi", "esi"
+		, "ebx", "edi", "esi"
 #endif
 	      );