@@ -12338,39 +12338,19 @@ static int nested_check_host_state_area(struct vmcs12 *vmcs12)
return 0;
}
-static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (nested_check_guest_state_area(vmcs12) ||
- nested_check_host_state_area(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_apicv_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_pml_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
+/*
+ * Checks related to VM-Execution Control Fields
+ */
+static int nested_check_vm_execution_ctls(struct vmcs12 *vmcs12)
+{
+ if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
+ nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
+ nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
+ nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
+ nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
+ nested_vmx_check_pml_controls(vcpu, vmcs12) ||
+ nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
+ !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.msrs.procbased_ctls_low,
vmx->nested.msrs.procbased_ctls_high) ||
(nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
@@ -12385,25 +12365,38 @@ static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vm
vmx->nested.msrs.exit_ctls_high) ||
!vmx_control_verify(vmcs12->vm_entry_controls,
vmx->nested.msrs.entry_ctls_low,
- vmx->nested.msrs.entry_ctls_high))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_nmi_controls(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ vmx->nested.msrs.entry_ctls_high) ||
+ nested_vmx_check_nmi_controls(vmcs12))
+ return -EINVAL;
if (nested_cpu_has_vmfunc(vmcs12)) {
if (vmcs12->vm_function_control &
~vmx->nested.msrs.vmfunc_controls)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ return -EINVAL;
if (nested_cpu_has_eptp_switching(vmcs12)) {
if (!nested_cpu_has_ept(vmcs12) ||
!page_address_valid(vcpu, vmcs12->eptp_list_address))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ return -EINVAL;
}
}
if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (nested_check_guest_state_area(vmcs12) ||
+ nested_check_host_state_area(vmcs12) ||
+ nested_check_vm_execution_ctls(vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
/*