@@ -11689,7 +11689,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
return 0;
}
-static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
+static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (vmcs12->vm_entry_msr_load_count == 0)
@@ -12403,22 +12403,6 @@ static int nested_check_vm_exit_ctls(struct vmcs12 *vmcs12)
if (nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
return -EINVAL;
- return 0;
-}
-
-static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (nested_check_guest_state_area(vmcs12) ||
- nested_check_host_state_area(vmcs12) ||
- nested_check_vm_execution_ctls(vmcs12) ||
- nested_check_vm_exit_ctls(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
/*
* From the Intel SDM, volume 3:
* Fields relevant to VM-entry event injection must be set properly.
@@ -12478,6 +12462,20 @@ static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vm
return 0;
}
+static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (nested_check_guest_state_area(vmcs12) ||
+ nested_check_host_state_area(vmcs12) ||
+ nested_check_vm_execution_ctls(vmcs12) ||
+ nested_check_vm_exit_ctls(vmcs12) ||
+ nested_vmx_check_entry_msr_switch_controls(vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ return 0;
+}
+
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{