diff mbox series

[v2,1/3] kvm, vmx: move CR2 context switch out of assembly path

Message ID 74fa3809ea293cc05d37b1449b16e08480c4ddbd.1540822350.git.jsteckli@amazon.de (mailing list archive)
State New, archived
Headers show
Series [v2,1/3] kvm, vmx: move CR2 context switch out of assembly path | expand

Commit Message

Julian Stecklina Oct. 29, 2018, 3:40 p.m. UTC
The VM entry/exit path is a giant inline assembly statement. Simplify it
by doing CR2 context switching in plain C. Move CR2 restore behind IBRS
clearing, so we reduce the amount of code we execute with IBRS on.

Signed-off-by: Julian Stecklina <jsteckli@amazon.de>
Reviewed-by: Jan H. Schönherr <jschoenh@amazon.de>
Reviewed-by: Konrad Jan Miller <kjm@amazon.de>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/vmx.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

Comments

Sean Christopherson Oct. 29, 2018, 5:14 p.m. UTC | #1
On Mon, Oct 29, 2018 at 04:40:42PM +0100, Julian Stecklina wrote:
> The VM entry/exit path is a giant inline assembly statement. Simplify it
> by doing CR2 context switching in plain C. Move CR2 restore behind IBRS
> clearing, so we reduce the amount of code we execute with IBRS on.

I think it's worth documenting two things in the changelog:

  - Using {read,write}_cr2() means KVM will use pv_mmu_ops instead of
    open coding native_{read,write}_cr2().

  - The CR2 code has been done in assembly since KVM's genesis[1],
    which predates the addition of the paravirt ops[2], i.e. KVM isn't
    deliberately avoiding the paravirt ops.

The above info makes it trivially easy to review this patch from a
correctness standpoint.  With that:

Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>


[1] Commit 6aa8b732ca01 ("[PATCH] kvm: userspace interface")
[2] Commit d3561b7fa0fb ("[PATCH] paravirt: header and stubs for paravirtualisation")
 
> Signed-off-by: Julian Stecklina <jsteckli@amazon.de>
> Reviewed-by: Jan H. Schönherr <jschoenh@amazon.de>
> Reviewed-by: Konrad Jan Miller <kjm@amazon.de>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> ---
>  arch/x86/kvm/vmx.c | 15 +++++----------
>  1 file changed, 5 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index ccc6a01..a6e5a5c 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -11212,6 +11212,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
>  		(unsigned long)&current_evmcs->host_rsp : 0;
>  
> +	if (read_cr2() != vcpu->arch.cr2)
> +		write_cr2(vcpu->arch.cr2);
> +
>  	if (static_branch_unlikely(&vmx_l1d_should_flush))
>  		vmx_l1d_flush(vcpu);
>  
> @@ -11231,13 +11234,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		"2: \n\t"
>  		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
>  		"1: \n\t"
> -		/* Reload cr2 if changed */
> -		"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
> -		"mov %%cr2, %%" _ASM_DX " \n\t"
> -		"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
> -		"je 3f \n\t"
> -		"mov %%" _ASM_AX", %%cr2 \n\t"
> -		"3: \n\t"
>  		/* Check if vmlaunch of vmresume is needed */
>  		"cmpl $0, %c[launched](%0) \n\t"
>  		/* Load guest registers.  Don't clobber flags. */
> @@ -11298,8 +11294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		"xor %%r14d, %%r14d \n\t"
>  		"xor %%r15d, %%r15d \n\t"
>  #endif
> -		"mov %%cr2, %%" _ASM_AX "   \n\t"
> -		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
>  
>  		"xor %%eax, %%eax \n\t"
>  		"xor %%ebx, %%ebx \n\t"
> @@ -11331,7 +11325,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  		[r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
>  		[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
>  #endif
> -		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
>  		[wordsize]"i"(sizeof(ulong))
>  	      : "cc", "memory"
>  #ifdef CONFIG_X86_64
> @@ -11365,6 +11358,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	/* Eliminate branch target predictions from guest mode */
>  	vmexit_fill_RSB();
>  
> +	vcpu->arch.cr2 = read_cr2();
> +
>  	/* All fields are clean at this point */
>  	if (static_branch_unlikely(&enable_evmcs))
>  		current_evmcs->hv_clean_fields |=
> -- 
> 2.7.4
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ccc6a01..a6e5a5c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11212,6 +11212,9 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
 		(unsigned long)&current_evmcs->host_rsp : 0;
 
+	if (read_cr2() != vcpu->arch.cr2)
+		write_cr2(vcpu->arch.cr2);
+
 	if (static_branch_unlikely(&vmx_l1d_should_flush))
 		vmx_l1d_flush(vcpu);
 
@@ -11231,13 +11234,6 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"2: \n\t"
 		__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
 		"1: \n\t"
-		/* Reload cr2 if changed */
-		"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
-		"mov %%cr2, %%" _ASM_DX " \n\t"
-		"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
-		"je 3f \n\t"
-		"mov %%" _ASM_AX", %%cr2 \n\t"
-		"3: \n\t"
 		/* Check if vmlaunch of vmresume is needed */
 		"cmpl $0, %c[launched](%0) \n\t"
 		/* Load guest registers.  Don't clobber flags. */
@@ -11298,8 +11294,6 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"xor %%r14d, %%r14d \n\t"
 		"xor %%r15d, %%r15d \n\t"
 #endif
-		"mov %%cr2, %%" _ASM_AX "   \n\t"
-		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
 
 		"xor %%eax, %%eax \n\t"
 		"xor %%ebx, %%ebx \n\t"
@@ -11331,7 +11325,6 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		[r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
 		[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
 #endif
-		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
 		[wordsize]"i"(sizeof(ulong))
 	      : "cc", "memory"
 #ifdef CONFIG_X86_64
@@ -11365,6 +11358,8 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
 
+	vcpu->arch.cr2 = read_cr2();
+
 	/* All fields are clean at this point */
 	if (static_branch_unlikely(&enable_evmcs))
 		current_evmcs->hv_clean_fields |=