diff mbox series

[2/5] KVM: nVMX: Verify the VMX controls MSRs with the global capability when setting VMX MSRs

Message ID 20200828085622.8365-3-chenyi.qiang@intel.com (mailing list archive)
State New, archived
Headers show
Series Fix nested VMX controls MSRs | expand

Commit Message

Chenyi Qiang Aug. 28, 2020, 8:56 a.m. UTC
When setting the nested VMX MSRs, verify it with the values in
vmcs_config.nested_vmx_msrs, which reflects the global capability of
VMX controls MSRs.

Signed-off-by: Chenyi Qiang <chenyi.qiang@intel.com>
---
 arch/x86/kvm/vmx/nested.c | 71 ++++++++++++++++++++++++++++-----------
 1 file changed, 51 insertions(+), 20 deletions(-)

Comments

Jim Mattson Aug. 28, 2020, 6:23 p.m. UTC | #1
On Fri, Aug 28, 2020 at 1:54 AM Chenyi Qiang <chenyi.qiang@intel.com> wrote:
>
> When setting the nested VMX MSRs, verify it with the values in
> vmcs_config.nested_vmx_msrs, which reflects the global capability of
> VMX controls MSRs.
>
> Signed-off-by: Chenyi Qiang <chenyi.qiang@intel.com>

You seem to have entirely missed the point of this code, which is to
prevent userspace from adding features that have previously been
removed for this vCPU (e.g as a side-effect of KVM_SET_CPUID).
Chenyi Qiang Aug. 31, 2020, 3:15 a.m. UTC | #2
On 8/29/2020 2:23 AM, Jim Mattson wrote:
> On Fri, Aug 28, 2020 at 1:54 AM Chenyi Qiang <chenyi.qiang@intel.com> wrote:
>>
>> When setting the nested VMX MSRs, verify it with the values in
>> vmcs_config.nested_vmx_msrs, which reflects the global capability of
>> VMX controls MSRs.
>>
>> Signed-off-by: Chenyi Qiang <chenyi.qiang@intel.com>
> 
> You seem to have entirely missed the point of this code, which is to
> prevent userspace from adding features that have previously been
> removed for this vCPU (e.g as a side-effect of KVM_SET_CPUID).
> 

We only have the case that the scope of features set by userspace is 
always reduced, right? If so, we don't need the change here.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 6e0e71f4d45f..47bee53e235a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1234,7 +1234,7 @@  static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
 		BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
 		/* reserved */
 		BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
-	u64 vmx_basic = vmx->nested.msrs.basic;
+	u64 vmx_basic = vmcs_config.nested.basic;
 
 	if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
 		return -EINVAL;
@@ -1265,24 +1265,24 @@  vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
 
 	switch (msr_index) {
 	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
-		lowp = &vmx->nested.msrs.pinbased_ctls_low;
-		highp = &vmx->nested.msrs.pinbased_ctls_high;
+		lowp = &vmcs_config.nested.pinbased_ctls_low;
+		highp = &vmcs_config.nested.pinbased_ctls_high;
 		break;
 	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
-		lowp = &vmx->nested.msrs.procbased_ctls_low;
-		highp = &vmx->nested.msrs.procbased_ctls_high;
+		lowp = &vmcs_config.nested.procbased_ctls_low;
+		highp = &vmcs_config.nested.procbased_ctls_high;
 		break;
 	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
-		lowp = &vmx->nested.msrs.exit_ctls_low;
-		highp = &vmx->nested.msrs.exit_ctls_high;
+		lowp = &vmcs_config.nested.exit_ctls_low;
+		highp = &vmcs_config.nested.exit_ctls_high;
 		break;
 	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
-		lowp = &vmx->nested.msrs.entry_ctls_low;
-		highp = &vmx->nested.msrs.entry_ctls_high;
+		lowp = &vmcs_config.nested.entry_ctls_low;
+		highp = &vmcs_config.nested.entry_ctls_high;
 		break;
 	case MSR_IA32_VMX_PROCBASED_CTLS2:
-		lowp = &vmx->nested.msrs.secondary_ctls_low;
-		highp = &vmx->nested.msrs.secondary_ctls_high;
+		lowp = &vmcs_config.nested.secondary_ctls_low;
+		highp = &vmcs_config.nested.secondary_ctls_high;
 		break;
 	default:
 		BUG();
@@ -1298,8 +1298,30 @@  vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
 	if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
 		return -EINVAL;
 
-	*lowp = data;
-	*highp = data >> 32;
+	switch (msr_index) {
+	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+		vmx->nested.msrs.pinbased_ctls_low = data;
+		vmx->nested.msrs.pinbased_ctls_high = data >> 32;
+		break;
+	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+		vmx->nested.msrs.procbased_ctls_low = data;
+		vmx->nested.msrs.procbased_ctls_high = data >> 32;
+		break;
+	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+		vmx->nested.msrs.exit_ctls_low = data;
+		vmx->nested.msrs.exit_ctls_high = data >> 32;
+		break;
+	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+		vmx->nested.msrs.entry_ctls_low = data;
+		vmx->nested.msrs.entry_ctls_high = data >> 32;
+		break;
+	case MSR_IA32_VMX_PROCBASED_CTLS2:
+		vmx->nested.msrs.secondary_ctls_low = data;
+		vmx->nested.msrs.secondary_ctls_high = data >> 32;
+		break;
+	default:
+		BUG();
+	}
 	return 0;
 }
 
@@ -1313,8 +1335,8 @@  static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
 		GENMASK_ULL(13, 9) | BIT_ULL(31);
 	u64 vmx_misc;
 
-	vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
-				   vmx->nested.msrs.misc_high);
+	vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
+				   vmcs_config.nested.misc_high);
 
 	if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
 		return -EINVAL;
@@ -1344,8 +1366,8 @@  static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
 {
 	u64 vmx_ept_vpid_cap;
 
-	vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
-					   vmx->nested.msrs.vpid_caps);
+	vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
+					   vmcs_config.nested.vpid_caps);
 
 	/* Every bit is either reserved or a feature bit. */
 	if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
@@ -1362,10 +1384,10 @@  static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
 
 	switch (msr_index) {
 	case MSR_IA32_VMX_CR0_FIXED0:
-		msr = &vmx->nested.msrs.cr0_fixed0;
+		msr = &vmcs_config.nested.cr0_fixed0;
 		break;
 	case MSR_IA32_VMX_CR4_FIXED0:
-		msr = &vmx->nested.msrs.cr4_fixed0;
+		msr = &vmcs_config.nested.cr4_fixed0;
 		break;
 	default:
 		BUG();
@@ -1378,7 +1400,16 @@  static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
 	if (!is_bitwise_subset(data, *msr, -1ULL))
 		return -EINVAL;
 
-	*msr = data;
+	switch (msr_index) {
+	case MSR_IA32_VMX_CR0_FIXED0:
+		vmx->nested.msrs.cr0_fixed0 = data;
+		break;
+	case MSR_IA32_VMX_CR4_FIXED0:
+		vmx->nested.msrs.cr4_fixed0 = data;
+		break;
+	default:
+		BUG();
+	}
 	return 0;
 }