@@ -11692,18 +11692,26 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
+ if (vmcs12->vm_entry_msr_load_count == 0)
+ return 0; /* Fast path */
+ if (nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
+ VM_ENTRY_MSR_LOAD_ADDR))
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
if (vmcs12->vm_exit_msr_load_count == 0 &&
- vmcs12->vm_exit_msr_store_count == 0 &&
- vmcs12->vm_entry_msr_load_count == 0)
+ vmcs12->vm_exit_msr_store_count == 0)
return 0; /* Fast path */
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
VM_EXIT_MSR_LOAD_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
- VM_EXIT_MSR_STORE_ADDR) ||
- nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
- VM_ENTRY_MSR_LOAD_ADDR))
+ VM_EXIT_MSR_STORE_ADDR))
return -EINVAL;
- return 0;
+ return 0;
}
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
@@ -12387,13 +12395,25 @@ static int nested_check_vm_execution_ctls(struct vmcs12 *vmcs12)
return 0;
}
+/*
+ * Checks related to VM-Exit Control Fields
+ */
+static int nested_check_vm_exit_ctls(struct vmcs12 *vmcs12)
+{
+ if (nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (nested_check_guest_state_area(vmcs12) ||
nested_check_host_state_area(vmcs12) ||
- nested_check_vm_execution_ctls(vmcs12))
+ nested_check_vm_execution_ctls(vmcs12) ||
+ nested_check_vm_exit_ctls(vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))