@@ -6381,7 +6381,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
asm(
/* Store host registers */
"push %%" _ASM_BP " \n\t"
- "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* placeholder for guest RCX */
"push %%" _ASM_ARG1 " \n\t"
/* Adjust RSP to account for the CALL to vmx_vmenter(). */
@@ -6417,11 +6416,11 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* Enter guest mode */
"call vmx_vmenter\n\t"
- /* Save guest's RCX to the stack placeholder (see above) */
- "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
+ /* Temporarily save guest's RCX. */
+ "push %%" _ASM_CX " \n\t"
- /* Load host's RCX, i.e. the vmx_vcpu pointer */
- "pop %%" _ASM_CX " \n\t"
+ /* Reload the vcpu_vmx pointer to RCX. */
+ "mov %c[wordsize](%%" _ASM_SP "), %%" _ASM_CX " \n\t"
/* Set vmx->fail based on EFLAGS.{CF,ZF} */
"setbe %c[fail](%%" _ASM_CX ")\n\t"
@@ -6461,6 +6460,9 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
"xor %%edi, %%edi \n\t"
+
+ /* "POP" the vcpu_vmx pointer. */
+ "add $%c[wordsize], %%" _ASM_SP " \n\t"
"pop %%" _ASM_BP " \n\t"
: ASM_CALL_CONSTRAINT, "=b"((int){0}),
#ifdef CONFIG_X86_64
Switching the ordering allows for an out-of-line path for VM-Fail that elides saving guest state but still shares the register clearing with the VM-Exit path. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/vmx/vmx.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-)