@@ -659,6 +659,10 @@ static bool is_valid_passthrough_msr(u32 msr)
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
/* PT MSRs. These are handled in pt_update_intercept_for_msr() */
return true;
+ case MSR_IA32_U_CET:
+ case MSR_IA32_S_CET:
+ case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
+ return true;
}
r = possible_passthrough_msr_slot(msr) != -ENOENT;
@@ -7343,6 +7347,32 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
}
+static bool is_cet_state_supported(struct kvm_vcpu *vcpu, u32 xss_state)
+{
+ return (vcpu->arch.guest_supported_xss & xss_state) &&
+ (guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_IBT));
+}
+
+static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
+{
+ bool incpt = !is_cet_state_supported(vcpu, XFEATURE_MASK_CET_USER);
+
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, incpt);
+
+ incpt |= !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, incpt);
+
+ incpt = !is_cet_state_supported(vcpu, XFEATURE_MASK_CET_KERNEL);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, incpt);
+
+ incpt |= !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB, MSR_TYPE_RW, incpt);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, incpt);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, incpt);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, incpt);
+}
+
static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7386,6 +7416,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
/* Refresh #PF interception to account for MAXPHYADDR changes. */
update_exception_bitmap(vcpu);
+
+ if (kvm_cet_supported())
+ vmx_update_intercept_for_cet_msr(vcpu);
}
static __init void vmx_set_cpu_caps(void)