diff mbox series

[10/11] KVM: VMX: Add macros to handle HOST_RSP updates at VM-Enter

Message ID 20181220203051.23256-1-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Clean up VM-Enter/VM-Exit asm code | expand

Commit Message

Sean Christopherson Dec. 20, 2018, 8:30 p.m. UTC
...now that nested_vmx_check_vmentry_hw() conditionally synchronizes
RSP with the {e,}VMCS, i.e. duplicates vmx_vcpu_run()'s esoteric RSP
assembly blob.

Note that VMX_UPDATE_VMCS_HOST_RSP_OUTPUTS "incorrectly" marks RDI as
being clobbered (by sending it to a dummy output param).  RDI needs to
be marked as clobbered in the vmx_vcpu_run() case, but trying to do so
by adding RDI to the clobber list would generate a compiler error due
to it being an input parameter.  Alternatively vmx_vcpu_run() could
manually specify '"=D"((int){0}),' but creating a subtle dependency on
the macro's internals is more likely to cause problems than cloberring
RDI unnecessarily in nested_vmx_check_vmentry_hw().

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/nested.c | 24 ++++--------------------
 arch/x86/kvm/vmx/vmx.c    | 21 ++++-----------------
 arch/x86/kvm/vmx/vmx.h    | 28 ++++++++++++++++++++++++++++
 3 files changed, 36 insertions(+), 37 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d6d88dfad39b..99a972fac7e3 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2745,21 +2745,7 @@  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 		(unsigned long)&current_evmcs->host_rsp : 0;
 
 	asm(
-		/* Set HOST_RSP */
-		"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
-		"cmp %%" _ASM_SP ", (%% " _ASM_DI") \n\t"
-		"je 1f \n\t"
-		"mov %%" _ASM_SP ", (%% " _ASM_DI") \n\t"
-		/* Avoid VMWRITE when Enlightened VMCS is in use */
-		"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
-		"jz 2f \n\t"
-		"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
-		"jmp 1f \n\t"
-		"2: \n\t"
-		"mov $%c[HOST_RSP], %%" _ASM_DI " \n\t"
-		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DI) "\n\t"
-		"1: \n\t"
-		"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
+		VMX_UPDATE_VMCS_HOST_RSP
 
 		/* Check if vmlaunch or vmresume is needed */
 		"cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
@@ -2768,12 +2754,10 @@  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 
 		/* Set vmx->fail accordingly */
 		"setbe %c[fail](%% " _ASM_CX")\n\t"
-	      : ASM_CALL_CONSTRAINT, "=D"((int){0}), "=S"((int){0})
-	      : "c"(vmx), "D"(&vmx->loaded_vmcs->host_state.rsp), "S"(evmcs_rsp),
-		[HOST_RSP]"i"(HOST_RSP),
+	      : ASM_CALL_CONSTRAINT, VMX_UPDATE_VMCS_HOST_RSP_OUTPUTS
+	      : "c"(vmx), VMX_UPDATE_VMCS_HOST_RSP_INPUTS,
 		[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
-		[fail]"i"(offsetof(struct vcpu_vmx, fail)),
-		[wordsize]"i"(sizeof(ulong))
+		[fail]"i"(offsetof(struct vcpu_vmx, fail))
 	      : "rax", "cc", "memory"
 	);
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 3ecb4c86a240..de709769f2ed 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6126,20 +6126,8 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"push %%" _ASM_BP " \n\t"
 		"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* placeholder for guest rcx */
 		"push %%" _ASM_CX " \n\t"
-		"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
-		"cmp %%" _ASM_SP ", (%%" _ASM_DI ") \n\t"
-		"je 1f \n\t"
-		"mov %%" _ASM_SP ", (%%" _ASM_DI ") \n\t"
-		/* Avoid VMWRITE when Enlightened VMCS is in use */
-		"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
-		"jz 2f \n\t"
-		"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
-		"jmp 1f \n\t"
-		"2: \n\t"
-		"mov $%c[HOST_RSP], %%" _ASM_DX " \n\t"
-		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
-		"1: \n\t"
-		"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
+
+		VMX_UPDATE_VMCS_HOST_RSP
 
 		/* Reload cr2 if changed */
 		"mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
@@ -6221,11 +6209,10 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"xor %%esi, %%esi \n\t"
 		"xor %%edi, %%edi \n\t"
 		"pop  %%" _ASM_BP " \n\t"
-	      : ASM_CALL_CONSTRAINT, "=D"((int){0}), "=S"((int){0})
-	      : "c"(vmx), "D"(&vmx->loaded_vmcs->host_state.rsp), "S"(evmcs_rsp),
+	      : ASM_CALL_CONSTRAINT, VMX_UPDATE_VMCS_HOST_RSP_OUTPUTS
+	      : "c"(vmx), VMX_UPDATE_VMCS_HOST_RSP_INPUTS,
 		[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
 		[fail]"i"(offsetof(struct vcpu_vmx, fail)),
-		[HOST_RSP]"i"(HOST_RSP),
 		[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
 		[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
 		[rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 2138ddffb1cf..4fa17a7180ed 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -265,6 +265,34 @@  struct kvm_vmx {
 	spinlock_t ept_pointer_lock;
 };
 
+#define VMX_UPDATE_VMCS_HOST_RSP				\
+	/* Temporarily adjust RSP for CALL */			\
+	"sub $%c[stacksize], %%" _ASM_SP "\n\t"			\
+	"cmp %%" _ASM_SP ", (%%" _ASM_DI ") \n\t"		\
+	"je 2f \n\t"						\
+	"mov %%" _ASM_SP ", (%%" _ASM_DI ") \n\t"		\
+	/* Avoid VMWRITE when Enlightened VMCS is in use */	\
+	"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"		\
+	"jz 1f \n\t"						\
+	"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"		\
+	"jmp 2f \n\t"						\
+	"1: \n\t"						\
+	"mov $%c[HOST_RSP], %%" _ASM_SI " \n\t"			\
+	__ex("vmwrite %%" _ASM_SP ", %%" _ASM_SI) "\n\t"	\
+	"2: \n\t"						\
+	 /* un-adjust RSP */					\
+	"add $%c[stacksize], %%" _ASM_SP "\n\t"
+
+#define VMX_UPDATE_VMCS_HOST_RSP_OUTPUTS			\
+	"=D"((int){0}),						\
+	"=S"((int){0})
+
+#define VMX_UPDATE_VMCS_HOST_RSP_INPUTS				\
+	"D"(&vmx->loaded_vmcs->host_state.rsp),			\
+	"S"(evmcs_rsp),						\
+	[HOST_RSP]"i"(HOST_RSP),				\
+	[stacksize]"i"(sizeof(ulong))
+
 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void vmx_vcpu_put(struct kvm_vcpu *vcpu);