diff mbox series

[v3,16/33] KVM: VMX: Load/save guest CR2 via C code in __vmx_vcpu_run()

Message ID 20190125154120.19385-17-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Move vCPU-run to proper asm sub-routine | expand

Commit Message

Sean Christopherson Jan. 25, 2019, 3:41 p.m. UTC
...to eliminate its parameter and struct vcpu_vmx offset definition
from the assembly blob.  Accessing CR2 from C versus assembly doesn't
change the likelihood of taking a page fault (and modifying CR2) while
it's loaded with the guest's value, so long as we don't do anything
silly between accessing CR2 and VM-Enter/VM-Exit.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 16 +++++-----------
 1 file changed, 5 insertions(+), 11 deletions(-)

Comments

Jim Mattson Jan. 25, 2019, 9:57 p.m. UTC | #1
On Fri, Jan 25, 2019 at 7:42 AM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> ...to eliminate its parameter and struct vcpu_vmx offset definition
> from the assembly blob.  Accessing CR2 from C versus assembly doesn't
> change the likelihood of taking a page fault (and modifying CR2) while
> it's loaded with the guest's value, so long as we don't do anything
> silly between accessing CR2 and VM-Enter/VM-Exit.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index fff1e5b5febe..ef6b3fb2e98b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6374,6 +6374,9 @@  static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 	if (static_branch_unlikely(&vmx_l1d_should_flush))
 		vmx_l1d_flush(vcpu);
 
+	if (vcpu->arch.cr2 != read_cr2())
+		write_cr2(vcpu->arch.cr2);
+
 	asm(
 		/* Store host registers */
 		"push %%" _ASM_BP " \n\t"
@@ -6394,13 +6397,6 @@  static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 		"1: \n\t"
 		"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
 
-		/* Reload cr2 if changed */
-		"mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
-		"mov %%cr2, %%" _ASM_DX " \n\t"
-		"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
-		"je 3f \n\t"
-		"mov %%" _ASM_AX", %%cr2 \n\t"
-		"3: \n\t"
 		/* Check if vmlaunch or vmresume is needed */
 		"cmpb $0, %c[launched](%%" _ASM_CX ") \n\t"
 		/* Load guest registers.  Don't clobber flags. */
@@ -6470,9 +6466,6 @@  static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 		"xor %%r14d, %%r14d \n\t"
 		"xor %%r15d, %%r15d \n\t"
 #endif
-		"mov %%cr2, %%" _ASM_AX "   \n\t"
-		"mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
-
 		"xor %%eax, %%eax \n\t"
 		"xor %%ebx, %%ebx \n\t"
 		"xor %%ecx, %%ecx \n\t"
@@ -6503,7 +6496,6 @@  static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 		[r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
 		[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
 #endif
-		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
 		[wordsize]"i"(sizeof(ulong))
 	      : "cc", "memory"
 #ifdef CONFIG_X86_64
@@ -6513,6 +6505,8 @@  static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 		, "eax", "ebx", "edx"
 #endif
 	      );
+
+	vcpu->arch.cr2 = read_cr2();
 }
 STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);