diff mbox series

[v2,11/18] KVM: nVMX: do early preparation of vmcs02 before check_vmentry_postreqs()

Message ID 20180828160459.14093-12-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: nVMX: add option to perform early consistency checks via H/W | expand

Commit Message

Sean Christopherson Aug. 28, 2018, 4:04 p.m. UTC
In anticipation of using vmcs02 to do early consistency checks, move
the early preparation of vmcs02 prior to checking the postreqs.  The
downside of this approach is that we'll unnecessary load vmcs02 in
the case that check_vmentry_postreqs() fails, but that is essentially
our slow path anyways (not actually slow, but it's the path we don't
really care about optimizing).

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

Comments

Jim Mattson Sept. 20, 2018, 7:56 p.m. UTC | #1
On Tue, Aug 28, 2018 at 9:04 AM, Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
> In anticipation of using vmcs02 to do early consistency checks, move
> the early preparation of vmcs02 prior to checking the postreqs.  The
> downside of this approach is that we'll unnecessary load vmcs02 in
> the case that check_vmentry_postreqs() fails, but that is essentially
> our slow path anyways (not actually slow, but it's the path we don't
> really care about optimizing).
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 492dc154c31e..ed0f9de50ff7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -12595,30 +12595,30 @@  static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	u32 exit_reason = EXIT_REASON_INVALID_STATE;
 	u32 exit_qual;
 
-	if (from_vmentry) {
-		if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
-			goto consistency_check_vmexit;
-	}
-
-	enter_guest_mode(vcpu);
-
 	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
 		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
 
 	vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
-	vmx_segment_cache_clear(vmx);
-
-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
-		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
 
 	prepare_vmcs02_early(vmx, vmcs12);
 
-	if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
-		goto fail;
-
 	if (from_vmentry) {
 		nested_get_vmcs12_pages(vcpu);
 
+		if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+			goto consistency_check_vmexit;
+	}
+
+	vmx_segment_cache_clear(vmx);
+
+	enter_guest_mode(vcpu);
+	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
+
+	if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+		goto fail;
+
+	if (from_vmentry) {
 		exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
 		exit_qual = nested_vmx_load_msr(vcpu,
 						vmcs12->vm_entry_msr_load_addr,
@@ -12648,7 +12648,6 @@  static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
 		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
 	leave_guest_mode(vcpu);
-	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
 
 	/*
 	 * A consistency check VMExit during L1's VMEnter to L2 is a subset
@@ -12657,6 +12656,7 @@  static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	 * reason and exit-qualification parameters).
 	 */
 consistency_check_vmexit:
+	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
 	vm_entry_controls_reset_shadow(vmx);
 	vm_exit_controls_reset_shadow(vmx);
 	vmx_segment_cache_clear(vmx);