Message ID | 20240124024200.102792-23-weijiang.yang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Enable CET Virtualization | expand |
On Tue, Jan 23, 2024 at 06:41:55PM -0800, Yang Weijiang wrote: >Enable/disable CET MSRs interception per associated feature configuration. >Shadow Stack feature requires all CET MSRs passed through to guest to make >it supported in user and supervisor mode while IBT feature only depends on >MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT. > >Note, this MSR design introduced an architectural limitation of SHSTK and >IBT control for guest, i.e., when SHSTK is exposed, IBT is also available >to guest from architectual perspective since IBT relies on subset of SHSTK >relevant MSRs. > >Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> >Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Reviewed-by: Chao Gao <chao.gao@intel.com> one nit below, >--- > arch/x86/kvm/vmx/vmx.c | 41 +++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 41 insertions(+) > >diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c >index 064a5fe87948..34e91dbbffed 100644 >--- a/arch/x86/kvm/vmx/vmx.c >+++ b/arch/x86/kvm/vmx/vmx.c >@@ -692,6 +692,10 @@ static bool is_valid_passthrough_msr(u32 msr) > case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: > /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ > return true; >+ case MSR_IA32_U_CET: >+ case MSR_IA32_S_CET: >+ case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB: >+ return true; Please update the comment above vmx_possible_passthrough_msrs[] to indicate CET MSRs are also handled separately. > } > > r = possible_passthrough_msr_slot(msr) != -ENOENT; >@@ -7767,6 +7771,41 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) > vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); > } > >+static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu) >+{ >+ bool incpt; >+ >+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) { >+ incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK); >+ >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB, >+ MSR_TYPE_RW, incpt); >+ if (!incpt) >+ return; >+ } >+ >+ if (kvm_cpu_cap_has(X86_FEATURE_IBT)) { >+ incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT); >+ >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, >+ MSR_TYPE_RW, incpt); >+ } >+} >+ > static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > { > struct vcpu_vmx *vmx = to_vmx(vcpu); >@@ -7845,6 +7884,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > > /* Refresh #PF interception to account for MAXPHYADDR changes. */ > vmx_update_exception_bitmap(vcpu); >+ >+ vmx_update_intercept_for_cet_msr(vcpu); > } > > static u64 vmx_get_perf_capabilities(void) >-- >2.39.3 >
On 1/26/2024 11:54 AM, Chao Gao wrote: > On Tue, Jan 23, 2024 at 06:41:55PM -0800, Yang Weijiang wrote: >> Enable/disable CET MSRs interception per associated feature configuration. >> Shadow Stack feature requires all CET MSRs passed through to guest to make >> it supported in user and supervisor mode while IBT feature only depends on >> MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT. >> >> Note, this MSR design introduced an architectural limitation of SHSTK and >> IBT control for guest, i.e., when SHSTK is exposed, IBT is also available >> to guest from architectual perspective since IBT relies on subset of SHSTK >> relevant MSRs. >> >> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> >> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> > Reviewed-by: Chao Gao <chao.gao@intel.com> > > one nit below, > >> --- >> arch/x86/kvm/vmx/vmx.c | 41 +++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 41 insertions(+) >> >> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c >> index 064a5fe87948..34e91dbbffed 100644 >> --- a/arch/x86/kvm/vmx/vmx.c >> +++ b/arch/x86/kvm/vmx/vmx.c >> @@ -692,6 +692,10 @@ static bool is_valid_passthrough_msr(u32 msr) >> case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: >> /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ >> return true; >> + case MSR_IA32_U_CET: >> + case MSR_IA32_S_CET: >> + case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB: >> + return true; > Please update the comment above vmx_possible_passthrough_msrs[] to indicate CET > MSRs are also handled separately. OK, will do it, thanks!
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 064a5fe87948..34e91dbbffed 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -692,6 +692,10 @@ static bool is_valid_passthrough_msr(u32 msr) case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ return true; + case MSR_IA32_U_CET: + case MSR_IA32_S_CET: + case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB: + return true; } r = possible_passthrough_msr_slot(msr) != -ENOENT; @@ -7767,6 +7771,41 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); } +static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu) +{ + bool incpt; + + if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) { + incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK); + + vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB, + MSR_TYPE_RW, incpt); + if (!incpt) + return; + } + + if (kvm_cpu_cap_has(X86_FEATURE_IBT)) { + incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT); + + vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, + MSR_TYPE_RW, incpt); + vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, + MSR_TYPE_RW, incpt); + } +} + static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -7845,6 +7884,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) /* Refresh #PF interception to account for MAXPHYADDR changes. */ vmx_update_exception_bitmap(vcpu); + + vmx_update_intercept_for_cet_msr(vcpu); } static u64 vmx_get_perf_capabilities(void)