@@ -33,6 +33,8 @@ struct nested_vmx_msrs {
u32 procbased_ctls_high;
u32 secondary_ctls_low;
u32 secondary_ctls_high;
+ /* Tertiary Controls is 64bit allow-1 semantics */
+ u64 tertiary_ctls;
u32 pinbased_ctls_low;
u32 pinbased_ctls_high;
u32 exit_ctls_low;
@@ -1272,6 +1272,18 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
lowp = &vmx->nested.msrs.secondary_ctls_low;
highp = &vmx->nested.msrs.secondary_ctls_high;
break;
+ /*
+ * MSR_IA32_VMX_PROCBASED_CTLS3 is 64bit, all allow-1 (must-be-0)
+ * semantics.
+ */
+ case MSR_IA32_VMX_PROCBASED_CTLS3:
+ /* Check must-be-0 bits are still 0. */
+ if (!is_bitwise_subset(vmx->nested.msrs.tertiary_ctls,
+ data, GENMASK_ULL(63, 0)))
+ return -EINVAL;
+
+ vmx->nested.msrs.tertiary_ctls = data;
+ return 0;
default:
BUG();
}
@@ -1408,6 +1420,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
case MSR_IA32_VMX_PROCBASED_CTLS2:
+ case MSR_IA32_VMX_PROCBASED_CTLS3:
return vmx_restore_control_msr(vmx, msr_index, data);
case MSR_IA32_VMX_MISC:
return vmx_restore_vmx_misc(vmx, data);
@@ -1503,6 +1516,9 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
msrs->secondary_ctls_low,
msrs->secondary_ctls_high);
break;
+ case MSR_IA32_VMX_PROCBASED_CTLS3:
+ *pdata = msrs->tertiary_ctls;
+ break;
case MSR_IA32_VMX_EPT_VPID_CAP:
*pdata = msrs->ept_caps |
((u64)msrs->vpid_caps << 32);
@@ -6429,7 +6445,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
- CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS |
+ CPU_BASED_ACTIVATE_TERTIARY_CONTROLS;
/*
* We can allow some features even when not supported by the
* hardware. For example, L1 can specify an MSR bitmap - and we
@@ -6467,6 +6484,9 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
SECONDARY_EXEC_RDSEED_EXITING |
SECONDARY_EXEC_XSAVES;
+ if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
+ rdmsrl(MSR_IA32_VMX_PROCBASED_CTLS3, msrs->tertiary_ctls);
+ msrs->tertiary_ctls &= 0;
/*
* We can emulate "VMCS shadowing," even if the hardware
* doesn't support it.
@@ -1981,7 +1981,7 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
{
switch (msr->index) {
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_PROCBASED_CTLS3:
if (!nested)
return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
@@ -2069,7 +2069,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
break;
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_PROCBASED_CTLS3:
if (!nested_vmx_allowed(vcpu))
return 1;
if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
@@ -2399,7 +2399,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx->msr_ia32_sgxlepubkeyhash
[msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
break;
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_PROCBASED_CTLS3:
if (!msr_info->host_initiated)
return 1; /* they are read-only */
if (!nested_vmx_allowed(vcpu))
@@ -1332,6 +1332,7 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
MSR_IA32_VMX_PROCBASED_CTLS2,
MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC,
+ MSR_IA32_VMX_PROCBASED_CTLS3,
MSR_K7_HWCR,
MSR_KVM_POLL_CONTROL,
@@ -1363,6 +1364,7 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
MSR_IA32_VMX_PROCBASED_CTLS2,
MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC,
+ MSR_IA32_VMX_PROCBASED_CTLS3,
MSR_F10H_DECFG,
MSR_IA32_UCODE_REV,
Add this new VMX capability MSR in nested_vmx_msrs, and related functions for its nested support. Don't set its LOADIWKEY VM-Exit bit at present. It will be enabled in last patch when everything's prepared. Signed-off-by: Robert Hoo <robert.hu@linux.intel.com> --- arch/x86/kvm/vmx/capabilities.h | 2 ++ arch/x86/kvm/vmx/nested.c | 22 +++++++++++++++++++++- arch/x86/kvm/vmx/vmx.c | 6 +++--- arch/x86/kvm/x86.c | 2 ++ 4 files changed, 28 insertions(+), 4 deletions(-)