@@ -1788,6 +1788,66 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
}
}
+static void vmx_get_xsave_msr(struct msr_data *msr_info)
+{
+ local_irq_disable();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+ rdmsrl(msr_info->index, msr_info->data);
+ local_irq_enable();
+}
+
+static void vmx_set_xsave_msr(struct msr_data *msr_info)
+{
+ local_irq_disable();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+ wrmsrl(msr_info->index, msr_info->data);
+ local_irq_enable();
+}
+
+static bool cet_is_ssp_msr_accessible(struct kvm_vcpu *vcpu,
+ struct msr_data *msr)
+{
+ u64 mask;
+
+ if (!kvm_cet_supported())
+ return false;
+
+ if (msr->host_initiated)
+ return true;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
+ return false;
+
+ if (msr->index == MSR_IA32_INT_SSP_TAB)
+ return false;
+
+ mask = (msr->index == MSR_IA32_PL3_SSP) ? XFEATURE_MASK_CET_USER :
+ XFEATURE_MASK_CET_KERNEL;
+ return !!(vcpu->arch.guest_supported_xss & mask);
+}
+
+static bool cet_is_control_msr_accessible(struct kvm_vcpu *vcpu,
+ struct msr_data *msr)
+{
+ u64 mask;
+
+ if (!kvm_cet_supported())
+ return false;
+
+ if (msr->host_initiated)
+ return true;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBT))
+ return false;
+
+ mask = (msr->index == MSR_IA32_U_CET) ? XFEATURE_MASK_CET_USER :
+ XFEATURE_MASK_CET_KERNEL;
+ return !!(vcpu->arch.guest_supported_xss & mask);
+}
+
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -1920,6 +1980,26 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
break;
+ case MSR_IA32_S_CET:
+ if (!cet_is_control_msr_accessible(vcpu, msr_info))
+ return 1;
+ msr_info->data = vmcs_readl(GUEST_S_CET);
+ break;
+ case MSR_IA32_U_CET:
+ if (!cet_is_control_msr_accessible(vcpu, msr_info))
+ return 1;
+ vmx_get_xsave_msr(msr_info);
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
+ return 1;
+ msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
+ break;
+ case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+ if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
+ return 1;
+ vmx_get_xsave_msr(msr_info);
+ break;
case MSR_TSC_AUX:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
@@ -2189,6 +2269,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
vmx->pt_desc.guest.addr_a[index / 2] = data;
break;
+ case MSR_IA32_S_CET:
+ case MSR_IA32_U_CET:
+ if (!cet_is_control_msr_accessible(vcpu, msr_info))
+ return 1;
+ if (data & GENMASK(9, 6))
+ return 1;
+ if (msr_index == MSR_IA32_S_CET)
+ vmcs_writel(GUEST_S_CET, data);
+ else
+ vmx_set_xsave_msr(msr_info);
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ if (!cet_is_control_msr_accessible(vcpu, msr_info))
+ return 1;
+ if (is_noncanonical_address(data, vcpu))
+ return 1;
+ vmcs_writel(GUEST_INTR_SSP_TABLE, data);
+ break;
+ case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+ if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
+ return 1;
+ if ((data & GENMASK(2, 0)) || is_noncanonical_address(data, vcpu))
+ return 1;
+ vmx_set_xsave_msr(msr_info);
+ break;
case MSR_TSC_AUX:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
@@ -286,6 +286,11 @@ static inline bool kvm_mpx_supported(void)
== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
}
+static inline bool kvm_cet_supported(void)
+{
+ return supported_xss & XFEATURE_MASK_CET_USER;
+}
+
extern unsigned int min_timer_period_us;
extern bool enable_vmware_backdoor;