diff mbox series

[v6,5/7] KVM: nVMX: Set VM-{Fail,Exit} failure info via params, not return val

Message ID 20190411191809.8131-6-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: nVMX Add IA32_PAT consistency checks | expand

Commit Message

Sean Christopherson April 11, 2019, 7:18 p.m. UTC
Convert all top-level nested VM-Enter consistency check functions to
use explicit parameters to pass failure information to the caller.
Using an explicit parameter achieves several goals:

  - Provides consistent prototypes for all functions.
  - Self-documents the net effect of failure, e.g. without the explicit
    parameter it may not be obvious that nested_vmx_check_guest_state()
    leads to a VM-Exit.
  - Does not give the false impression that failure information is
    always consumed and/or relevant, e.g. vmx_set_nested_state() only
    cares whether or not the checks were successful.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/nested.c | 61 ++++++++++++++++++++++-----------------
 1 file changed, 35 insertions(+), 26 deletions(-)

Comments

Krish Sadhukhan April 11, 2019, 9:56 p.m. UTC | #1
On 04/11/2019 12:18 PM, Sean Christopherson wrote:
> Convert all top-level nested VM-Enter consistency check functions to
> use explicit parameters to pass failure information to the caller.
> Using an explicit parameter achieves several goals:
>
>    - Provides consistent prototypes for all functions.
>    - Self-documents the net effect of failure, e.g. without the explicit
>      parameter it may not be obvious that nested_vmx_check_guest_state()
>      leads to a VM-Exit.
>    - Does not give the false impression that failure information is
>      always consumed and/or relevant, e.g. vmx_set_nested_state() only
>      cares whether or not the checks were successful.
>
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>   arch/x86/kvm/vmx/nested.c | 61 ++++++++++++++++++++++-----------------
>   1 file changed, 35 insertions(+), 26 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index b22605d5ee9e..16cff40456ee 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -817,7 +817,8 @@ static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
>    * Load guest's/host's msr at nested entry/exit.
>    * return 0 for success, entry index for failure.
>    */
> -static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> +static int nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count,
> +			       u32 *exit_reason, u32 *exit_qual)
>   {
>   	u32 i;
>   	struct vmx_msr_entry e;
> @@ -849,7 +850,9 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
>   	}
>   	return 0;
>   fail:
> -	return i + 1;
> +	*exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
> +	*exit_qual = i + 1;
> +	return -EINVAL;
>   }
>   
>   static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> @@ -2574,12 +2577,15 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>   }
>   
>   static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
> -				     struct vmcs12 *vmcs12)
> +				     struct vmcs12 *vmcs12,
> +				     u32 *vm_instruction_error)
>   {
> +	*vm_instruction_error = VMXERR_ENTRY_INVALID_CONTROL_FIELD;

Although, all of the callee functions generate the same error code, 
isn't it cleaner to set the error in the callee themselves where the 
actual checking is done ?

> +
>   	if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
>   	    nested_check_vm_exit_controls(vcpu, vmcs12) ||
>   	    nested_check_vm_entry_controls(vcpu, vmcs12))
> -		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +		return -EINVAL;
>   
>   	return 0;
>   }
> @@ -2624,10 +2630,13 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
>   }
>   
>   static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
> -				       struct vmcs12 *vmcs12)
> +				       struct vmcs12 *vmcs12,
> +				       u32 *vm_instruction_error)
>   {
> +	*vm_instruction_error = VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
> +
>   	if (nested_check_host_control_regs(vcpu, vmcs12))
> -		return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
> +		return -EINVAL;
>   
>   	return 0;
>   }
> @@ -2673,10 +2682,12 @@ static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
>   
>   static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>   					struct vmcs12 *vmcs12,
> +					u32 *exit_reason,
>   					u32 *exit_qual)
>   {
>   	bool ia32e;
>   
> +	*exit_reason = EXIT_REASON_INVALID_STATE;

Shouldn't we also reflect VMX_EXIT_REASONS_FAILED_VMENTRY in 'exit_reason' ?

>   	*exit_qual = ENTRY_FAIL_DEFAULT;
>   
>   	if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
> @@ -2965,7 +2976,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
>   	struct vcpu_vmx *vmx = to_vmx(vcpu);
>   	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
>   	bool evaluate_pending_interrupts;
> -	u32 exit_reason = EXIT_REASON_INVALID_STATE;
> +	u32 exit_reason;
>   	u32 exit_qual;
>   
>   	evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
> @@ -2991,7 +3002,8 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
>   			return -1;
>   		}
>   
> -		if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
> +		if (nested_vmx_check_guest_state(vcpu, vmcs12,
> +						 &exit_reason, &exit_qual))
>   			goto vmentry_fail_vmexit;
>   	}
>   
> @@ -3003,11 +3015,9 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
>   		goto vmentry_fail_vmexit_guest_mode;
>   
>   	if (from_vmentry) {
> -		exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
> -		exit_qual = nested_vmx_load_msr(vcpu,
> -						vmcs12->vm_entry_msr_load_addr,
> -						vmcs12->vm_entry_msr_load_count);
> -		if (exit_qual)
> +		if (nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr,
> +					vmcs12->vm_entry_msr_load_count,
> +					&exit_reason, &exit_qual))
>   			goto vmentry_fail_vmexit_guest_mode;
>   	} else {
>   		/*
> @@ -3087,6 +3097,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
>   	struct vmcs12 *vmcs12;
>   	struct vcpu_vmx *vmx = to_vmx(vcpu);
>   	u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
> +	u32 vm_instruction_error;
>   	int ret;
>   
>   	if (!nested_vmx_check_permission(vcpu))
> @@ -3136,13 +3147,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
>   			launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
>   			       : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
>   
> -	ret = nested_vmx_check_controls(vcpu, vmcs12);
> -	if (ret)
> -		return nested_vmx_failValid(vcpu, ret);
> +	if (nested_vmx_check_controls(vcpu, vmcs12, &vm_instruction_error))
> +		return nested_vmx_failValid(vcpu, vm_instruction_error);
>   
> -	ret = nested_vmx_check_host_state(vcpu, vmcs12);
> -	if (ret)
> -		return nested_vmx_failValid(vcpu, ret);
> +	if (nested_vmx_check_host_state(vcpu, vmcs12, &vm_instruction_error))
> +		return nested_vmx_failValid(vcpu, vm_instruction_error);
>   
>   	/*
>   	 * We're finally done with prerequisite checking, and can start with
> @@ -3594,7 +3603,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
>   				   struct vmcs12 *vmcs12)
>   {
>   	struct kvm_segment seg;
> -	u32 entry_failure_code;
> +	u32 ign;
>   
>   	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
>   		vcpu->arch.efer = vmcs12->host_ia32_efer;
> @@ -3629,7 +3638,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
>   	 * Only PDPTE load can fail as the value of cr3 was checked on entry and
>   	 * couldn't have changed.
>   	 */
> -	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
> +	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ign))
>   		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
>   
>   	if (!enable_ept)
> @@ -3727,7 +3736,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
>   		vmx_update_msr_bitmap(vcpu);
>   
>   	if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
> -				vmcs12->vm_exit_msr_load_count))
> +				vmcs12->vm_exit_msr_load_count, &ign, &ign))
>   		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
>   }
>   
> @@ -5352,7 +5361,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
>   {
>   	struct vcpu_vmx *vmx = to_vmx(vcpu);
>   	struct vmcs12 *vmcs12;
> -	u32 exit_qual;
> +	u32 ign;
>   	int ret;
>   
>   	if (kvm_state->format != 0)
> @@ -5465,9 +5474,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
>   			return -EINVAL;
>   	}
>   
> -	if (nested_vmx_check_controls(vcpu, vmcs12) ||
> -	    nested_vmx_check_host_state(vcpu, vmcs12) ||
> -	    nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
> +	if (nested_vmx_check_controls(vcpu, vmcs12, &ign) ||
> +	    nested_vmx_check_host_state(vcpu, vmcs12, &ign) ||
> +	    nested_vmx_check_guest_state(vcpu, vmcs12, &ign, &ign))
>   		return -EINVAL;
>   
>   	vmx->nested.dirty_vmcs12 = true;
Paolo Bonzini April 12, 2019, 8:30 a.m. UTC | #2
On 11/04/19 21:18, Sean Christopherson wrote:
> Convert all top-level nested VM-Enter consistency check functions to
> use explicit parameters to pass failure information to the caller.
> Using an explicit parameter achieves several goals:
> 
>   - Provides consistent prototypes for all functions.
>   - Self-documents the net effect of failure, e.g. without the explicit
>     parameter it may not be obvious that nested_vmx_check_guest_state()
>     leads to a VM-Exit.
>   - Does not give the false impression that failure information is
>     always consumed and/or relevant, e.g. vmx_set_nested_state() only
>     cares whether or not the checks were successful.
> 
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---

I think we have to agree to disagree on this one. :)

I'd rather have something like this (untested) replacing patches 5 and 6:

-------------- 8< -----------------
From: Paolo Bonzini <pbonzini@redhat.com>
Subject: [PATCH] KVM: nVMX: Return -EINVAL when signaling failure in pre-VM-Entry helpers
    
Convert all top-level nested VM-Enter consistency check functions to
return 0/-EINVAL instead of failure codes, since now they can only
ever return one failure code.

This also does not give the false impression that failure information is
always consumed and/or relevant, e.g. vmx_set_nested_state() only
cares whether or not the checks were successful.

nested_check_host_control_regs() can also now be inlined into its caller,
nested_vmx_check_host_state, since the two have effectively become the
same function.

Based on a patch by Sean Christopherson.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index f58de538e6d3..c75edccdd3bc 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2595,16 +2595,13 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
 	if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
 	    nested_check_vm_exit_controls(vcpu, vmcs12) ||
 	    nested_check_vm_entry_controls(vcpu, vmcs12))
-		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+		return -EINVAL;
 
 	return 0;
 }
 
-/*
- * Checks related to Host Control Registers and MSRs
- */
-static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
-                                          struct vmcs12 *vmcs12)
+static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
+				       struct vmcs12 *vmcs12)
 {
 	bool ia32e;
 
@@ -2639,15 +2636,6 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
-static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
-				       struct vmcs12 *vmcs12)
-{
-	if (nested_check_host_control_regs(vcpu, vmcs12))
-		return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
-
-	return 0;
-}
-
 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
 					  struct vmcs12 *vmcs12)
 {
@@ -3152,13 +3140,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 			launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
 			       : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
 
-	ret = nested_vmx_check_controls(vcpu, vmcs12);
-	if (ret)
-		return nested_vmx_failValid(vcpu, ret);
+	if (nested_vmx_check_controls(vcpu, vmcs12))
+		return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 
-	ret = nested_vmx_check_host_state(vcpu, vmcs12);
-	if (ret)
-		return nested_vmx_failValid(vcpu, ret);
+	if (nested_vmx_check_host_state(vcpu, vmcs12))
+		return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
 
 	/*
 	 * We're finally done with prerequisite checking, and can start with
Sean Christopherson April 12, 2019, 7:12 p.m. UTC | #3
On Fri, Apr 12, 2019 at 10:30:51AM +0200, Paolo Bonzini wrote:
> On 11/04/19 21:18, Sean Christopherson wrote:
> > Convert all top-level nested VM-Enter consistency check functions to
> > use explicit parameters to pass failure information to the caller.
> > Using an explicit parameter achieves several goals:
> > 
> >   - Provides consistent prototypes for all functions.
> >   - Self-documents the net effect of failure, e.g. without the explicit
> >     parameter it may not be obvious that nested_vmx_check_guest_state()
> >     leads to a VM-Exit.
> >   - Does not give the false impression that failure information is
> >     always consumed and/or relevant, e.g. vmx_set_nested_state() only
> >     cares whether or not the checks were successful.
> > 
> > Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > ---
> 
> I think we have to agree to disagree on this one. :)

Aaargh!  The dreaded Maintainer Hammer of Doom!  LGTM ;-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b22605d5ee9e..16cff40456ee 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -817,7 +817,8 @@  static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
  * Load guest's/host's msr at nested entry/exit.
  * return 0 for success, entry index for failure.
  */
-static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+static int nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count,
+			       u32 *exit_reason, u32 *exit_qual)
 {
 	u32 i;
 	struct vmx_msr_entry e;
@@ -849,7 +850,9 @@  static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 	}
 	return 0;
 fail:
-	return i + 1;
+	*exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+	*exit_qual = i + 1;
+	return -EINVAL;
 }
 
 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
@@ -2574,12 +2577,15 @@  static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
 }
 
 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
-				     struct vmcs12 *vmcs12)
+				     struct vmcs12 *vmcs12,
+				     u32 *vm_instruction_error)
 {
+	*vm_instruction_error = VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
 	if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
 	    nested_check_vm_exit_controls(vcpu, vmcs12) ||
 	    nested_check_vm_entry_controls(vcpu, vmcs12))
-		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+		return -EINVAL;
 
 	return 0;
 }
@@ -2624,10 +2630,13 @@  static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
 }
 
 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
-				       struct vmcs12 *vmcs12)
+				       struct vmcs12 *vmcs12,
+				       u32 *vm_instruction_error)
 {
+	*vm_instruction_error = VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+
 	if (nested_check_host_control_regs(vcpu, vmcs12))
-		return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+		return -EINVAL;
 
 	return 0;
 }
@@ -2673,10 +2682,12 @@  static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
 
 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 					struct vmcs12 *vmcs12,
+					u32 *exit_reason,
 					u32 *exit_qual)
 {
 	bool ia32e;
 
+	*exit_reason = EXIT_REASON_INVALID_STATE;
 	*exit_qual = ENTRY_FAIL_DEFAULT;
 
 	if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
@@ -2965,7 +2976,7 @@  int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	bool evaluate_pending_interrupts;
-	u32 exit_reason = EXIT_REASON_INVALID_STATE;
+	u32 exit_reason;
 	u32 exit_qual;
 
 	evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
@@ -2991,7 +3002,8 @@  int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
 			return -1;
 		}
 
-		if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
+		if (nested_vmx_check_guest_state(vcpu, vmcs12,
+						 &exit_reason, &exit_qual))
 			goto vmentry_fail_vmexit;
 	}
 
@@ -3003,11 +3015,9 @@  int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
 		goto vmentry_fail_vmexit_guest_mode;
 
 	if (from_vmentry) {
-		exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
-		exit_qual = nested_vmx_load_msr(vcpu,
-						vmcs12->vm_entry_msr_load_addr,
-						vmcs12->vm_entry_msr_load_count);
-		if (exit_qual)
+		if (nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr,
+					vmcs12->vm_entry_msr_load_count,
+					&exit_reason, &exit_qual))
 			goto vmentry_fail_vmexit_guest_mode;
 	} else {
 		/*
@@ -3087,6 +3097,7 @@  static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	struct vmcs12 *vmcs12;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
+	u32 vm_instruction_error;
 	int ret;
 
 	if (!nested_vmx_check_permission(vcpu))
@@ -3136,13 +3147,11 @@  static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 			launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
 			       : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
 
-	ret = nested_vmx_check_controls(vcpu, vmcs12);
-	if (ret)
-		return nested_vmx_failValid(vcpu, ret);
+	if (nested_vmx_check_controls(vcpu, vmcs12, &vm_instruction_error))
+		return nested_vmx_failValid(vcpu, vm_instruction_error);
 
-	ret = nested_vmx_check_host_state(vcpu, vmcs12);
-	if (ret)
-		return nested_vmx_failValid(vcpu, ret);
+	if (nested_vmx_check_host_state(vcpu, vmcs12, &vm_instruction_error))
+		return nested_vmx_failValid(vcpu, vm_instruction_error);
 
 	/*
 	 * We're finally done with prerequisite checking, and can start with
@@ -3594,7 +3603,7 @@  static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 				   struct vmcs12 *vmcs12)
 {
 	struct kvm_segment seg;
-	u32 entry_failure_code;
+	u32 ign;
 
 	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
 		vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -3629,7 +3638,7 @@  static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 	 * Only PDPTE load can fail as the value of cr3 was checked on entry and
 	 * couldn't have changed.
 	 */
-	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ign))
 		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
 
 	if (!enable_ept)
@@ -3727,7 +3736,7 @@  static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 		vmx_update_msr_bitmap(vcpu);
 
 	if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
-				vmcs12->vm_exit_msr_load_count))
+				vmcs12->vm_exit_msr_load_count, &ign, &ign))
 		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
 }
 
@@ -5352,7 +5361,7 @@  static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vmcs12 *vmcs12;
-	u32 exit_qual;
+	u32 ign;
 	int ret;
 
 	if (kvm_state->format != 0)
@@ -5465,9 +5474,9 @@  static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 			return -EINVAL;
 	}
 
-	if (nested_vmx_check_controls(vcpu, vmcs12) ||
-	    nested_vmx_check_host_state(vcpu, vmcs12) ||
-	    nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
+	if (nested_vmx_check_controls(vcpu, vmcs12, &ign) ||
+	    nested_vmx_check_host_state(vcpu, vmcs12, &ign) ||
+	    nested_vmx_check_guest_state(vcpu, vmcs12, &ign, &ign))
 		return -EINVAL;
 
 	vmx->nested.dirty_vmcs12 = true;