diff mbox series

[12/16] KVM: nVMX: refactor inner section of nested_vmx_enter_non_root_mode()

Message ID 20180731153215.31794-13-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: nVMX: add option to perform early consistency checks via H/W | expand

Commit Message

Sean Christopherson July 31, 2018, 3:32 p.m. UTC
Move nested_vmx_load_msr() into prepare_vmcs02() and remove the "fail"
label now that the inner section of nested_vmx_enter_non_root_mode()
has a single error path.  Move the call to vmx_segment_cache_clear()
above the call to enter_guest_mode() so that it is explicitly clear
that the state being restored in the prepare_vmcs02() error path is
the state that is set immediately before the call to prepare_vmcs02.

Eliminating the "fail" label resolves the oddity of having multiple
entry points into the consistency check VMExit handling in
nested_vmx_enter_non_root_mode() and reduces the probability of
breaking the related code (speaking from experience).

Pass exit_reason to check_vmentry_postreqs() and prepare_vmcs02(),
and unconditionally set exit_reason and exit_qual in both functions.
This fixes a subtle bug where exit_qual would be used uninitialized
when vmx->nested.nested_run_pending is false and the MSR load fails,
and hopefully prevents such issues in the future, e.g. a similar bug
from the past: https://www.spinics.net/lists/kvm/msg165988.html.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx.c | 49 ++++++++++++++++++++++------------------------
 1 file changed, 23 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d5328518b1c6..7c3bdd9deb63 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11469,10 +11469,13 @@  static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
  * is assigned to entry_failure_code on failure.
  */
 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
-			  u32 *entry_failure_code)
+			  u32 *exit_reason, u32 *entry_failure_code)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+	*exit_reason = EXIT_REASON_INVALID_STATE;
+	*entry_failure_code = ENTRY_FAIL_DEFAULT;
+
 	if (vmx->nested.dirty_vmcs12) {
 		prepare_vmcs02_full(vmx, vmcs12);
 		vmx->nested.dirty_vmcs12 = false;
@@ -11572,10 +11575,8 @@  static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 	 * which means L1 attempted VMEntry to L2 with invalid state.
 	 * Fail the VMEntry.
 	 */
-	if (vmx->emulation_required) {
-		*entry_failure_code = ENTRY_FAIL_DEFAULT;
+	if (vmx->emulation_required)
 		return 1;
-	}
 
 	/* Shadow page tables on either EPT or shadow page tables. */
 	if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
@@ -11587,6 +11588,12 @@  static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
 	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
 	kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+
+	if (nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr,
+				      vmcs12->vm_entry_msr_load_count)) {
+		*exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+		return 1;
+	}
 	return 0;
 }
 
@@ -11753,10 +11760,11 @@  static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 }
 
 static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
-				  u32 *exit_qual)
+				  u32 *exit_reason, u32 *exit_qual)
 {
 	bool ia32e;
 
+	*exit_reason = EXIT_REASON_INVALID_STATE;
 	*exit_qual = ENTRY_FAIL_DEFAULT;
 
 	if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
@@ -11804,9 +11812,7 @@  static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	bool is_vmentry = vmx->nested.nested_run_pending;
-	u32 exit_reason = EXIT_REASON_INVALID_STATE;
-	u32 msr_entry_idx;
-	u32 exit_qual;
+	u32 exit_qual, exit_reason;
 
 	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
 		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
@@ -11820,26 +11826,22 @@  static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu)
 	if (!is_vmentry)
 		goto enter_non_root_mode;
 
-	if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+	if (check_vmentry_postreqs(vcpu, vmcs12, &exit_reason, &exit_qual))
 		goto consistency_check_vmexit;
 
 enter_non_root_mode:
+	vmx_segment_cache_clear(vmx);
+
 	enter_guest_mode(vcpu);
-
-	vmx_segment_cache_clear(vmx);
-
 	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
 		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
 
-	if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
-		goto fail;
-
-	exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
-	msr_entry_idx = nested_vmx_load_msr(vcpu,
-					    vmcs12->vm_entry_msr_load_addr,
-					    vmcs12->vm_entry_msr_load_count);
-	if (msr_entry_idx)
-		goto fail;
+	if (prepare_vmcs02(vcpu, vmcs12, &exit_reason, &exit_qual)) {
+		if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+			vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+		leave_guest_mode(vcpu);
+		goto consistency_check_vmexit;
+	}
 
 	/*
 	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11849,11 +11851,6 @@  static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu)
 	 */
 	return 0;
 
-fail:
-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
-		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
-	leave_guest_mode(vcpu);
-
 	/*
 	 * A consistency check VMExit during L1's VMEnter to L2 is a subset
 	 * of a normal VMexit, as explained in 23.7 "VM-entry failures during