diff mbox series

[v7,22/26] KVM: VMX: Set up interception for CET MSRs

Message ID 20231124055330.138870-23-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable CET Virtualization | expand

Commit Message

Yang, Weijiang Nov. 24, 2023, 5:53 a.m. UTC
Enable/disable CET MSRs interception per associated feature configuration.
Shadow Stack feature requires all CET MSRs passed through to guest to make
it supported in user and supervisor mode while IBT feature only depends on
MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT.

Note, this MSR design introduced an architectural limitation of SHSTK and
IBT control for guest, i.e., when SHSTK is exposed, IBT is also available
to guest from architectual perspective since IBT relies on subset of SHSTK
relevant MSRs.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

Comments

Maxim Levitsky Nov. 30, 2023, 5:44 p.m. UTC | #1
On Fri, 2023-11-24 at 00:53 -0500, Yang Weijiang wrote:
> Enable/disable CET MSRs interception per associated feature configuration.
> Shadow Stack feature requires all CET MSRs passed through to guest to make
> it supported in user and supervisor mode while IBT feature only depends on
> MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT.
> 
> Note, this MSR design introduced an architectural limitation of SHSTK and
> IBT control for guest, i.e., when SHSTK is exposed, IBT is also available
> to guest from architectual perspective since IBT relies on subset of SHSTK
> relevant MSRs.
> 
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>  arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 42 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 554f665e59c3..e484333eddb0 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -699,6 +699,10 @@ static bool is_valid_passthrough_msr(u32 msr)
>  	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
>  		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
>  		return true;
> +	case MSR_IA32_U_CET:
> +	case MSR_IA32_S_CET:
> +	case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
> +		return true;
>  	}
>  
>  	r = possible_passthrough_msr_slot(msr) != -ENOENT;
> @@ -7766,6 +7770,42 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
>  		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
>  }
>  
> +static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
> +{
> +	bool incpt;
> +
> +	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
> +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
> +
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
> +					  MSR_TYPE_RW, incpt);
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
> +					  MSR_TYPE_RW, incpt);
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP,
> +					  MSR_TYPE_RW, incpt);
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP,
> +					  MSR_TYPE_RW, incpt);
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP,
> +					  MSR_TYPE_RW, incpt);
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP,
> +					  MSR_TYPE_RW, incpt);
> +		if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
> +			vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB,
> +						  MSR_TYPE_RW, incpt);
> +		if (!incpt)
> +			return;
> +	}
> +
> +	if (kvm_cpu_cap_has(X86_FEATURE_IBT)) {
> +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT);
> +
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
> +					  MSR_TYPE_RW, incpt);
> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
> +					  MSR_TYPE_RW, incpt);
> +	}
> +}
> +
>  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -7843,6 +7883,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>  
>  	/* Refresh #PF interception to account for MAXPHYADDR changes. */
>  	vmx_update_exception_bitmap(vcpu);
> +
> +	vmx_update_intercept_for_cet_msr(vcpu);
>  }
>  
>  static u64 vmx_get_perf_capabilities(void)

My review feedback from the previous patch still applies as well,

I still think that we should either try a best effort approach to plug
this virtualization hole, or we at least should fail guest creation
if the virtualization hole is present as I said:

"Another, much simpler option is to fail the guest creation if the shadow stack + indirect branch tracking
state differs between host and the guest, unless both are disabled in the guest.
(in essence don't let the guest be created if (2) or (3) happen)"

Please at least tell me what do you think about this.

Best regards,
	Maxim Levitsky
Chao Gao Dec. 1, 2023, 6:33 a.m. UTC | #2
On Thu, Nov 30, 2023 at 07:44:45PM +0200, Maxim Levitsky wrote:
>On Fri, 2023-11-24 at 00:53 -0500, Yang Weijiang wrote:
>> Enable/disable CET MSRs interception per associated feature configuration.
>> Shadow Stack feature requires all CET MSRs passed through to guest to make
>> it supported in user and supervisor mode while IBT feature only depends on
>> MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT.
>> 
>> Note, this MSR design introduced an architectural limitation of SHSTK and
>> IBT control for guest, i.e., when SHSTK is exposed, IBT is also available
>> to guest from architectual perspective since IBT relies on subset of SHSTK
>> relevant MSRs.
>> 
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> ---
>>  arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 42 insertions(+)
>> 
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index 554f665e59c3..e484333eddb0 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -699,6 +699,10 @@ static bool is_valid_passthrough_msr(u32 msr)
>>  	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
>>  		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
>>  		return true;
>> +	case MSR_IA32_U_CET:
>> +	case MSR_IA32_S_CET:
>> +	case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
>> +		return true;
>>  	}
>>  
>>  	r = possible_passthrough_msr_slot(msr) != -ENOENT;
>> @@ -7766,6 +7770,42 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
>>  		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
>>  }
>>  
>> +static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
>> +{
>> +	bool incpt;
>> +
>> +	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
>> +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
>> +
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
>> +			vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB,
>> +						  MSR_TYPE_RW, incpt);
>> +		if (!incpt)
>> +			return;
>> +	}
>> +
>> +	if (kvm_cpu_cap_has(X86_FEATURE_IBT)) {
>> +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT);
>> +
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +	}
>> +}
>> +
>>  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>>  {
>>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>> @@ -7843,6 +7883,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>>  
>>  	/* Refresh #PF interception to account for MAXPHYADDR changes. */
>>  	vmx_update_exception_bitmap(vcpu);
>> +
>> +	vmx_update_intercept_for_cet_msr(vcpu);
>>  }
>>  
>>  static u64 vmx_get_perf_capabilities(void)
>
>My review feedback from the previous patch still applies as well,
>
>I still think that we should either try a best effort approach to plug
>this virtualization hole, or we at least should fail guest creation
>if the virtualization hole is present as I said:
>

>"Another, much simpler option is to fail the guest creation if the shadow stack + indirect branch tracking
>state differs between host and the guest, unless both are disabled in the guest.
>(in essence don't let the guest be created if (2) or (3) happen)"

Enforcing a "none" or "all" policy is a temporary solution. in future, if some
reserved bits in S/U_CET MSRs are extended for new features, there will be:

	platform A supports SS + IBT
	platform B supports SS + IBT + new feature

Guests running on B inevitably have the same virtualization hole. and if kvm
continues enforcing the policy on B, then VM migration from A to B would be
impossible.

To me, intercepting S/U_CET MSR and CET_S/U xsave components is intricate and
yields marginal benefits. And I also doubt any reasonable OS implementation
would depend on #GP of WRMSR to S/U_CET MSRs for functionalities. So, I vote
to leave the patch as-is.

>
>Please at least tell me what do you think about this.

>
>Best regards,
>	Maxim Levitsky
>
>
>
Yang, Weijiang Dec. 1, 2023, 9:45 a.m. UTC | #3
On 12/1/2023 1:44 AM, Maxim Levitsky wrote:
> On Fri, 2023-11-24 at 00:53 -0500, Yang Weijiang wrote:
>> Enable/disable CET MSRs interception per associated feature configuration.
>> Shadow Stack feature requires all CET MSRs passed through to guest to make
>> it supported in user and supervisor mode while IBT feature only depends on
>> MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT.
>>
>> Note, this MSR design introduced an architectural limitation of SHSTK and
>> IBT control for guest, i.e., when SHSTK is exposed, IBT is also available
>> to guest from architectual perspective since IBT relies on subset of SHSTK
>> relevant MSRs.
>>
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> ---
>>   arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 42 insertions(+)
>>
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index 554f665e59c3..e484333eddb0 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -699,6 +699,10 @@ static bool is_valid_passthrough_msr(u32 msr)
>>   	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
>>   		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
>>   		return true;
>> +	case MSR_IA32_U_CET:
>> +	case MSR_IA32_S_CET:
>> +	case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
>> +		return true;
>>   	}
>>   
>>   	r = possible_passthrough_msr_slot(msr) != -ENOENT;
>> @@ -7766,6 +7770,42 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
>>   		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
>>   }
>>   
>> +static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
>> +{
>> +	bool incpt;
>> +
>> +	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
>> +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
>> +
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP,
>> +					  MSR_TYPE_RW, incpt);
>> +		if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
>> +			vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB,
>> +						  MSR_TYPE_RW, incpt);
>> +		if (!incpt)
>> +			return;
>> +	}
>> +
>> +	if (kvm_cpu_cap_has(X86_FEATURE_IBT)) {
>> +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT);
>> +
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
>> +					  MSR_TYPE_RW, incpt);
>> +	}
>> +}
>> +
>>   static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>>   {
>>   	struct vcpu_vmx *vmx = to_vmx(vcpu);
>> @@ -7843,6 +7883,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>>   
>>   	/* Refresh #PF interception to account for MAXPHYADDR changes. */
>>   	vmx_update_exception_bitmap(vcpu);
>> +
>> +	vmx_update_intercept_for_cet_msr(vcpu);
>>   }
>>   
>>   static u64 vmx_get_perf_capabilities(void)
> My review feedback from the previous patch still applies as well,
>
> I still think that we should either try a best effort approach to plug
> this virtualization hole, or we at least should fail guest creation
> if the virtualization hole is present as I said:
>
> "Another, much simpler option is to fail the guest creation if the shadow stack + indirect branch tracking
> state differs between host and the guest, unless both are disabled in the guest.
> (in essence don't let the guest be created if (2) or (3) happen)"
>
> Please at least tell me what do you think about this.

Oh, I thought I had replied this patch in v6 but I failed to send it out!
Let me explain it a bit, at early stage of this series, I thought of checking relevant host
feature enabling status before exposing guest CET features, but it's proved
unnecessary and user unfriendly.

E.g., we frequently disable host CET features due to whatever reasons on host,  then
the features cannot be used/tested in guest at all.  Technically, guest should be allowed
to run the features so long as the dependencies(i.e., xsave related support) are enabled
on host and there're no risks brought up by using of the features in guest.

I think cloud-computing should share the similar pain point when deploy CET into virtualization
usages.
Maxim Levitsky Dec. 5, 2023, 10:04 a.m. UTC | #4
On Fri, 2023-12-01 at 14:33 +0800, Chao Gao wrote:
> On Thu, Nov 30, 2023 at 07:44:45PM +0200, Maxim Levitsky wrote:
> > On Fri, 2023-11-24 at 00:53 -0500, Yang Weijiang wrote:
> > > Enable/disable CET MSRs interception per associated feature configuration.
> > > Shadow Stack feature requires all CET MSRs passed through to guest to make
> > > it supported in user and supervisor mode while IBT feature only depends on
> > > MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT.
> > > 
> > > Note, this MSR design introduced an architectural limitation of SHSTK and
> > > IBT control for guest, i.e., when SHSTK is exposed, IBT is also available
> > > to guest from architectual perspective since IBT relies on subset of SHSTK
> > > relevant MSRs.
> > > 
> > > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> > > ---
> > >  arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 42 insertions(+)
> > > 
> > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > > index 554f665e59c3..e484333eddb0 100644
> > > --- a/arch/x86/kvm/vmx/vmx.c
> > > +++ b/arch/x86/kvm/vmx/vmx.c
> > > @@ -699,6 +699,10 @@ static bool is_valid_passthrough_msr(u32 msr)
> > >  	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
> > >  		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
> > >  		return true;
> > > +	case MSR_IA32_U_CET:
> > > +	case MSR_IA32_S_CET:
> > > +	case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
> > > +		return true;
> > >  	}
> > >  
> > >  	r = possible_passthrough_msr_slot(msr) != -ENOENT;
> > > @@ -7766,6 +7770,42 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
> > >  		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
> > >  }
> > >  
> > > +static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
> > > +{
> > > +	bool incpt;
> > > +
> > > +	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
> > > +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
> > > +
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
> > > +			vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB,
> > > +						  MSR_TYPE_RW, incpt);
> > > +		if (!incpt)
> > > +			return;
> > > +	}
> > > +
> > > +	if (kvm_cpu_cap_has(X86_FEATURE_IBT)) {
> > > +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT);
> > > +
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +	}
> > > +}
> > > +
> > >  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
> > >  {
> > >  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> > > @@ -7843,6 +7883,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
> > >  
> > >  	/* Refresh #PF interception to account for MAXPHYADDR changes. */
> > >  	vmx_update_exception_bitmap(vcpu);
> > > +
> > > +	vmx_update_intercept_for_cet_msr(vcpu);
> > >  }
> > >  
> > >  static u64 vmx_get_perf_capabilities(void)
> > 
> > My review feedback from the previous patch still applies as well,
> > 
> > I still think that we should either try a best effort approach to plug
> > this virtualization hole, or we at least should fail guest creation
> > if the virtualization hole is present as I said:
> > 
> > "Another, much simpler option is to fail the guest creation if the shadow stack + indirect branch tracking
> > state differs between host and the guest, unless both are disabled in the guest.
> > (in essence don't let the guest be created if (2) or (3) happen)"
> 
> Enforcing a "none" or "all" policy is a temporary solution. in future, if some
> reserved bits in S/U_CET MSRs are extended for new features, there will be:
> 
> 	platform A supports SS + IBT
> 	platform B supports SS + IBT + new feature
> 
> Guests running on B inevitably have the same virtualization hole. and if kvm
> continues enforcing the policy on B, then VM migration from A to B would be
> impossible.
> 
> To me, intercepting S/U_CET MSR and CET_S/U xsave components is intricate and
> yields marginal benefits. And I also doubt any reasonable OS implementation
> would depend on #GP of WRMSR to S/U_CET MSRs for functionalities. So, I vote
> to leave the patch as-is.

To some extent I do agree with you but this can become a huge mess in the future.
I think we need at least to tell Intel/AMD about this to ensure that they don't make this thing worse
than it already is.

Also the very least we can do if we opt to keep things as is, 
is to document this virtualization hole - we have Documentation/virt/kvm/x86/errata.rst for that.

Best regards,
	Maxim Levitsky

> 
> > Please at least tell me what do you think about this.
> > Best regards,
> > 	Maxim Levitsky
> > 
> > 
> >
Maxim Levitsky Dec. 5, 2023, 10:07 a.m. UTC | #5
On Fri, 2023-12-01 at 17:45 +0800, Yang, Weijiang wrote:
> On 12/1/2023 1:44 AM, Maxim Levitsky wrote:
> > On Fri, 2023-11-24 at 00:53 -0500, Yang Weijiang wrote:
> > > Enable/disable CET MSRs interception per associated feature configuration.
> > > Shadow Stack feature requires all CET MSRs passed through to guest to make
> > > it supported in user and supervisor mode while IBT feature only depends on
> > > MSR_IA32_{U,S}_CETS_CET to enable user and supervisor IBT.
> > > 
> > > Note, this MSR design introduced an architectural limitation of SHSTK and
> > > IBT control for guest, i.e., when SHSTK is exposed, IBT is also available
> > > to guest from architectual perspective since IBT relies on subset of SHSTK
> > > relevant MSRs.
> > > 
> > > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> > > ---
> > >   arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
> > >   1 file changed, 42 insertions(+)
> > > 
> > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > > index 554f665e59c3..e484333eddb0 100644
> > > --- a/arch/x86/kvm/vmx/vmx.c
> > > +++ b/arch/x86/kvm/vmx/vmx.c
> > > @@ -699,6 +699,10 @@ static bool is_valid_passthrough_msr(u32 msr)
> > >   	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
> > >   		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
> > >   		return true;
> > > +	case MSR_IA32_U_CET:
> > > +	case MSR_IA32_S_CET:
> > > +	case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
> > > +		return true;
> > >   	}
> > >   
> > >   	r = possible_passthrough_msr_slot(msr) != -ENOENT;
> > > @@ -7766,6 +7770,42 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
> > >   		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
> > >   }
> > >   
> > > +static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
> > > +{
> > > +	bool incpt;
> > > +
> > > +	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
> > > +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
> > > +
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
> > > +			vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB,
> > > +						  MSR_TYPE_RW, incpt);
> > > +		if (!incpt)
> > > +			return;
> > > +	}
> > > +
> > > +	if (kvm_cpu_cap_has(X86_FEATURE_IBT)) {
> > > +		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT);
> > > +
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
> > > +					  MSR_TYPE_RW, incpt);
> > > +	}
> > > +}
> > > +
> > >   static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
> > >   {
> > >   	struct vcpu_vmx *vmx = to_vmx(vcpu);
> > > @@ -7843,6 +7883,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
> > >   
> > >   	/* Refresh #PF interception to account for MAXPHYADDR changes. */
> > >   	vmx_update_exception_bitmap(vcpu);
> > > +
> > > +	vmx_update_intercept_for_cet_msr(vcpu);
> > >   }
> > >   
> > >   static u64 vmx_get_perf_capabilities(void)
> > My review feedback from the previous patch still applies as well,
> > 
> > I still think that we should either try a best effort approach to plug
> > this virtualization hole, or we at least should fail guest creation
> > if the virtualization hole is present as I said:
> > 
> > "Another, much simpler option is to fail the guest creation if the shadow stack + indirect branch tracking
> > state differs between host and the guest, unless both are disabled in the guest.
> > (in essence don't let the guest be created if (2) or (3) happen)"
> > 
> > Please at least tell me what do you think about this.
> 
> Oh, I thought I had replied this patch in v6 but I failed to send it out!
> Let me explain it a bit, at early stage of this series, I thought of checking relevant host
> feature enabling status before exposing guest CET features, but it's proved
> unnecessary and user unfriendly.
> 
> E.g., we frequently disable host CET features due to whatever reasons on host,  then
> the features cannot be used/tested in guest at all.  Technically, guest should be allowed
> to run the features so long as the dependencies(i.e., xsave related support) are enabled
> on host and there're no risks brought up by using of the features in guest.

To be honest this is a dangerous POV in regard to guest migration: If the VMM were to be lax with features
that it exposes to the guest, then the guests will start to make assumptions instead of checking CPUID
and then the guest will mysteriously fail when migrated to a machine which actually lacks the features,
in addition to not having them in the CPUID.

In other words, leaving "undocumented" features opens a slippery slope of later supporting this
undocumented behavior.

I understand though that CET is problematic, and I overall won't object much to leave things as is
but a part of me thinks that we will regret it later.

Best regards,
	Maxim Levitsky

> 
> I think cloud-computing should share the similar pain point when deploy CET into virtualization
> usages.
> 
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 554f665e59c3..e484333eddb0 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -699,6 +699,10 @@  static bool is_valid_passthrough_msr(u32 msr)
 	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
 		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
 		return true;
+	case MSR_IA32_U_CET:
+	case MSR_IA32_S_CET:
+	case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
+		return true;
 	}
 
 	r = possible_passthrough_msr_slot(msr) != -ENOENT;
@@ -7766,6 +7770,42 @@  static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
 		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
 }
 
+static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
+{
+	bool incpt;
+
+	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
+		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
+
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
+					  MSR_TYPE_RW, incpt);
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
+					  MSR_TYPE_RW, incpt);
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP,
+					  MSR_TYPE_RW, incpt);
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP,
+					  MSR_TYPE_RW, incpt);
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP,
+					  MSR_TYPE_RW, incpt);
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP,
+					  MSR_TYPE_RW, incpt);
+		if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
+			vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB,
+						  MSR_TYPE_RW, incpt);
+		if (!incpt)
+			return;
+	}
+
+	if (kvm_cpu_cap_has(X86_FEATURE_IBT)) {
+		incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT);
+
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET,
+					  MSR_TYPE_RW, incpt);
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET,
+					  MSR_TYPE_RW, incpt);
+	}
+}
+
 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7843,6 +7883,8 @@  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 
 	/* Refresh #PF interception to account for MAXPHYADDR changes. */
 	vmx_update_exception_bitmap(vcpu);
+
+	vmx_update_intercept_for_cet_msr(vcpu);
 }
 
 static u64 vmx_get_perf_capabilities(void)