diff mbox series

[01/11] KVM: VMX: Explicitly reference RCX as the vmx_vcpu pointer in asm blobs

Message ID 20181220202518.21442-2-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Clean up VM-Enter/VM-Exit asm code | expand

Commit Message

Sean Christopherson Dec. 20, 2018, 8:25 p.m. UTC
Use '%% " _ASM_CX"' instead of '%0' to dereference RCX, i.e. the
'struct vmx_vcpu' pointer, in the VM-Enter asm blobs of vmx_vcpu_run()
and nested_vmx_check_vmentry_hw().  Using the symbolic name means that
adding/removing an output parameter(s) requires "rewriting" almost all
of the asm blob, which makes it nearly impossible to understand what's
being changed in even the most minor patches.

Opportunistically improve the code comments.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/nested.c |  6 +--
 arch/x86/kvm/vmx/vmx.c    | 86 +++++++++++++++++++++------------------
 2 files changed, 50 insertions(+), 42 deletions(-)

Comments

Miroslav Benes Dec. 21, 2018, 10:39 a.m. UTC | #1
On Thu, 20 Dec 2018, Sean Christopherson wrote:

> Use '%% " _ASM_CX"' instead of '%0' to dereference RCX, i.e. the
> 'struct vmx_vcpu' pointer, in the VM-Enter asm blobs of vmx_vcpu_run()

It's a nit, but I think it's 'struct vcpu_vmx' pointer. The typo is also 
in the new comments below and in the subject.

> and nested_vmx_check_vmentry_hw().  Using the symbolic name means that
> adding/removing an output parameter(s) requires "rewriting" almost all
> of the asm blob, which makes it nearly impossible to understand what's
> being changed in even the most minor patches.
> 
> Opportunistically improve the code comments.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/vmx/nested.c |  6 +--
>  arch/x86/kvm/vmx/vmx.c    | 86 +++++++++++++++++++++------------------
>  2 files changed, 50 insertions(+), 42 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 3f019aa63341..43b33cd23ac5 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -2759,17 +2759,17 @@ static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
>  	asm(
>  		/* Set HOST_RSP */
>  		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
> -		"mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
> +		"mov %%" _ASM_SP ", %c[host_rsp](%% " _ASM_CX")\n\t"
>  
>  		/* Check if vmlaunch or vmresume is needed */
> -		"cmpl $0, %c[launched](%0)\n\t"
> +		"cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
>  		"jne 1f\n\t"
>  		__ex("vmlaunch") "\n\t"
>  		"jmp 2f\n\t"
>  		"1: " __ex("vmresume") "\n\t"
>  		"2: "
>  		/* Set vmx->fail accordingly */
> -		"setbe %c[fail](%0)\n\t"
> +		"setbe %c[fail](%% " _ASM_CX")\n\t"
>  
>  		".pushsection .rodata\n\t"
>  		".global vmx_early_consistency_check_return\n\t"
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index b8fa74ce3af2..42bfcd28c27b 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6124,9 +6124,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
>  		"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
>  		"push %%" _ASM_CX " \n\t"
> -		"cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
> +		"cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
>  		"je 1f \n\t"
> -		"mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
> +		"mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
>  		/* Avoid VMWRITE when Enlightened VMCS is in use */
>  		"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
>  		"jz 2f \n\t"
> @@ -6136,32 +6136,33 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
>  		"1: \n\t"
>  		/* Reload cr2 if changed */
> -		"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
> +		"mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
>  		"mov %%cr2, %%" _ASM_DX " \n\t"
>  		"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
>  		"je 3f \n\t"
>  		"mov %%" _ASM_AX", %%cr2 \n\t"
>  		"3: \n\t"
>  		/* Check if vmlaunch or vmresume is needed */
> -		"cmpl $0, %c[launched](%0) \n\t"
> +		"cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
>  		/* Load guest registers.  Don't clobber flags. */
> -		"mov %c[rax](%0), %%" _ASM_AX " \n\t"
> -		"mov %c[rbx](%0), %%" _ASM_BX " \n\t"
> -		"mov %c[rdx](%0), %%" _ASM_DX " \n\t"
> -		"mov %c[rsi](%0), %%" _ASM_SI " \n\t"
> -		"mov %c[rdi](%0), %%" _ASM_DI " \n\t"
> -		"mov %c[rbp](%0), %%" _ASM_BP " \n\t"
> +		"mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
> +		"mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
> +		"mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
> +		"mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
> +		"mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
> +		"mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
>  #ifdef CONFIG_X86_64
> -		"mov %c[r8](%0),  %%r8  \n\t"
> -		"mov %c[r9](%0),  %%r9  \n\t"
> -		"mov %c[r10](%0), %%r10 \n\t"
> -		"mov %c[r11](%0), %%r11 \n\t"
> -		"mov %c[r12](%0), %%r12 \n\t"
> -		"mov %c[r13](%0), %%r13 \n\t"
> -		"mov %c[r14](%0), %%r14 \n\t"
> -		"mov %c[r15](%0), %%r15 \n\t"
> +		"mov %c[r8](%%" _ASM_CX "),  %%r8  \n\t"
> +		"mov %c[r9](%%" _ASM_CX "),  %%r9  \n\t"
> +		"mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
> +		"mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
> +		"mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
> +		"mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
> +		"mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
> +		"mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
>  #endif
> -		"mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
> +		/* Load guest RCX.  This kills the vmx_vcpu pointer! */

Here

> +		"mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
>  
>  		/* Enter guest mode */
>  		"jne 1f \n\t"
> @@ -6169,26 +6170,33 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		"jmp 2f \n\t"
>  		"1: " __ex("vmresume") "\n\t"
>  		"2: "
> -		/* Save guest registers, load host registers, keep flags */
> -		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
> -		"pop %0 \n\t"
> -		"setbe %c[fail](%0)\n\t"
> -		"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
> -		"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
> -		__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
> -		"mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
> -		"mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
> -		"mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
> -		"mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
> +
> +		/* Save guest's RCX to the stack placeholder (see above) */
> +		"mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
> +
> +		/* Load host's RCX, i.e. the vmx_vcpu pointer */

And here.

> +		"pop %%" _ASM_CX " \n\t"
> +
> +		/* Set vmx->fail based on EFLAGS.{CF,ZF} */
> +		"setbe %c[fail](%%" _ASM_CX ")\n\t"
> +
> +		/* Save all guest registers, including RCX from the stack */
> +		"mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
> +		"mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
> +		__ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
> +		"mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
> +		"mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
> +		"mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
> +		"mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
>  #ifdef CONFIG_X86_64
> -		"mov %%r8,  %c[r8](%0) \n\t"
> -		"mov %%r9,  %c[r9](%0) \n\t"
> -		"mov %%r10, %c[r10](%0) \n\t"
> -		"mov %%r11, %c[r11](%0) \n\t"
> -		"mov %%r12, %c[r12](%0) \n\t"
> -		"mov %%r13, %c[r13](%0) \n\t"
> -		"mov %%r14, %c[r14](%0) \n\t"
> -		"mov %%r15, %c[r15](%0) \n\t"
> +		"mov %%r8,  %c[r8](%%" _ASM_CX ") \n\t"
> +		"mov %%r9,  %c[r9](%%" _ASM_CX ") \n\t"
> +		"mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
> +		"mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
> +		"mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
> +		"mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
> +		"mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
> +		"mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
>  		/*
>  		* Clear host registers marked as clobbered to prevent
>  		* speculative use.
> @@ -6203,7 +6211,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		"xor %%r15d, %%r15d \n\t"
>  #endif
>  		"mov %%cr2, %%" _ASM_AX "   \n\t"
> -		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
> +		"mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
>  
>  		"xor %%eax, %%eax \n\t"
>  		"xor %%ebx, %%ebx \n\t"

Otherwise, it looks good to me.

Miroslav
Sean Christopherson Dec. 21, 2018, 6:37 p.m. UTC | #2
On Fri, Dec 21, 2018 at 11:39:07AM +0100, Miroslav Benes wrote:
> On Thu, 20 Dec 2018, Sean Christopherson wrote:
> 
> > Use '%% " _ASM_CX"' instead of '%0' to dereference RCX, i.e. the
> > 'struct vmx_vcpu' pointer, in the VM-Enter asm blobs of vmx_vcpu_run()
> 
> It's a nit, but I think it's 'struct vcpu_vmx' pointer. The typo is also 
> in the new comments below and in the subject.

Gah, I always manage to switch those two.  Looks like Paolo caught the
changelog typo but not the comments.

Paolo, can you fixup the comments, or should I send a proper patch?

From 20cce9c6f6f8fd12bfc02926a3a736117a06adf4 Mon Sep 17 00:00:00 2001
From: Sean Christopherson <sean.j.christopherson@intel.com>
Date: Fri, 21 Dec 2018 10:35:43 -0800
Subject: [PATCH] KVM: VMX: Fix comment typo to reference vcpu_vmx

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 41d6f7081ff7..f2626c25db2c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6482,7 +6482,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
 		"mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
 #endif
-		/* Load guest RCX.  This kills the vmx_vcpu pointer! */
+		/* Load guest RCX.  This kills the vcpu_vmx pointer! */
 		"mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
 
 		/* Enter guest mode */
@@ -6491,7 +6491,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		/* Save guest's RCX to the stack placeholder (see above) */
 		"mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
 
-		/* Load host's RCX, i.e. the vmx_vcpu pointer */
+		/* Load host's RCX, i.e. the vcpu_vmx pointer */
 		"pop %%" _ASM_CX " \n\t"
 
 		/* Set vmx->fail based on EFLAGS.{CF,ZF} */
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 3f019aa63341..43b33cd23ac5 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2759,17 +2759,17 @@  static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 	asm(
 		/* Set HOST_RSP */
 		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
-		"mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
+		"mov %%" _ASM_SP ", %c[host_rsp](%% " _ASM_CX")\n\t"
 
 		/* Check if vmlaunch or vmresume is needed */
-		"cmpl $0, %c[launched](%0)\n\t"
+		"cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
 		"jne 1f\n\t"
 		__ex("vmlaunch") "\n\t"
 		"jmp 2f\n\t"
 		"1: " __ex("vmresume") "\n\t"
 		"2: "
 		/* Set vmx->fail accordingly */
-		"setbe %c[fail](%0)\n\t"
+		"setbe %c[fail](%% " _ASM_CX")\n\t"
 
 		".pushsection .rodata\n\t"
 		".global vmx_early_consistency_check_return\n\t"
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b8fa74ce3af2..42bfcd28c27b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6124,9 +6124,9 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
 		"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
 		"push %%" _ASM_CX " \n\t"
-		"cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
+		"cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
 		"je 1f \n\t"
-		"mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
+		"mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
 		/* Avoid VMWRITE when Enlightened VMCS is in use */
 		"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
 		"jz 2f \n\t"
@@ -6136,32 +6136,33 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
 		"1: \n\t"
 		/* Reload cr2 if changed */
-		"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
+		"mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
 		"mov %%cr2, %%" _ASM_DX " \n\t"
 		"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
 		"je 3f \n\t"
 		"mov %%" _ASM_AX", %%cr2 \n\t"
 		"3: \n\t"
 		/* Check if vmlaunch or vmresume is needed */
-		"cmpl $0, %c[launched](%0) \n\t"
+		"cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
 		/* Load guest registers.  Don't clobber flags. */
-		"mov %c[rax](%0), %%" _ASM_AX " \n\t"
-		"mov %c[rbx](%0), %%" _ASM_BX " \n\t"
-		"mov %c[rdx](%0), %%" _ASM_DX " \n\t"
-		"mov %c[rsi](%0), %%" _ASM_SI " \n\t"
-		"mov %c[rdi](%0), %%" _ASM_DI " \n\t"
-		"mov %c[rbp](%0), %%" _ASM_BP " \n\t"
+		"mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
+		"mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
+		"mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
+		"mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
+		"mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
+		"mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
 #ifdef CONFIG_X86_64
-		"mov %c[r8](%0),  %%r8  \n\t"
-		"mov %c[r9](%0),  %%r9  \n\t"
-		"mov %c[r10](%0), %%r10 \n\t"
-		"mov %c[r11](%0), %%r11 \n\t"
-		"mov %c[r12](%0), %%r12 \n\t"
-		"mov %c[r13](%0), %%r13 \n\t"
-		"mov %c[r14](%0), %%r14 \n\t"
-		"mov %c[r15](%0), %%r15 \n\t"
+		"mov %c[r8](%%" _ASM_CX "),  %%r8  \n\t"
+		"mov %c[r9](%%" _ASM_CX "),  %%r9  \n\t"
+		"mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
+		"mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
+		"mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
+		"mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
+		"mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
+		"mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
 #endif
-		"mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
+		/* Load guest RCX.  This kills the vmx_vcpu pointer! */
+		"mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
 
 		/* Enter guest mode */
 		"jne 1f \n\t"
@@ -6169,26 +6170,33 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"jmp 2f \n\t"
 		"1: " __ex("vmresume") "\n\t"
 		"2: "
-		/* Save guest registers, load host registers, keep flags */
-		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
-		"pop %0 \n\t"
-		"setbe %c[fail](%0)\n\t"
-		"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
-		"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
-		__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
-		"mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
-		"mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
-		"mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
-		"mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
+
+		/* Save guest's RCX to the stack placeholder (see above) */
+		"mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
+
+		/* Load host's RCX, i.e. the vmx_vcpu pointer */
+		"pop %%" _ASM_CX " \n\t"
+
+		/* Set vmx->fail based on EFLAGS.{CF,ZF} */
+		"setbe %c[fail](%%" _ASM_CX ")\n\t"
+
+		/* Save all guest registers, including RCX from the stack */
+		"mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
+		"mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
+		__ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
+		"mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
+		"mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
+		"mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
+		"mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
 #ifdef CONFIG_X86_64
-		"mov %%r8,  %c[r8](%0) \n\t"
-		"mov %%r9,  %c[r9](%0) \n\t"
-		"mov %%r10, %c[r10](%0) \n\t"
-		"mov %%r11, %c[r11](%0) \n\t"
-		"mov %%r12, %c[r12](%0) \n\t"
-		"mov %%r13, %c[r13](%0) \n\t"
-		"mov %%r14, %c[r14](%0) \n\t"
-		"mov %%r15, %c[r15](%0) \n\t"
+		"mov %%r8,  %c[r8](%%" _ASM_CX ") \n\t"
+		"mov %%r9,  %c[r9](%%" _ASM_CX ") \n\t"
+		"mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
+		"mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
+		"mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
+		"mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
+		"mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
+		"mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
 		/*
 		* Clear host registers marked as clobbered to prevent
 		* speculative use.
@@ -6203,7 +6211,7 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"xor %%r15d, %%r15d \n\t"
 #endif
 		"mov %%cr2, %%" _ASM_AX "   \n\t"
-		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
+		"mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
 
 		"xor %%eax, %%eax \n\t"
 		"xor %%ebx, %%ebx \n\t"