diff mbox series

[v4,13/20] KVM:VMX: Emulate read and write to CET MSRs

Message ID 20230721030352.72414-14-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable CET Virtualization | expand

Commit Message

Yang, Weijiang July 21, 2023, 3:03 a.m. UTC
Add VMX specific emulation for CET MSR read and write.
IBT feature is only available on Intel platforms now and the
virtualization interface to the control fields is vensor
specific, so split this part from the common code.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 40 ++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c     |  7 -------
 2 files changed, 40 insertions(+), 7 deletions(-)

Comments

Chao Gao July 26, 2023, 8:06 a.m. UTC | #1
On Thu, Jul 20, 2023 at 11:03:45PM -0400, Yang Weijiang wrote:
>Add VMX specific emulation for CET MSR read and write.
>IBT feature is only available on Intel platforms now and the
>virtualization interface to the control fields is vensor
>specific, so split this part from the common code.
>
>Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>---
> arch/x86/kvm/vmx/vmx.c | 40 ++++++++++++++++++++++++++++++++++++++++
> arch/x86/kvm/x86.c     |  7 -------
> 2 files changed, 40 insertions(+), 7 deletions(-)
>
>diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>index c8d9870cfecb..b29817ec6f2e 100644
>--- a/arch/x86/kvm/vmx/vmx.c
>+++ b/arch/x86/kvm/vmx/vmx.c
>@@ -2093,6 +2093,21 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> 		else
> 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
> 		break;
>+	case MSR_IA32_U_CET:
>+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>+		return kvm_get_msr_common(vcpu, msr_info);

kvm_get_msr_common() is called for the "default" case. so this can be dropped.

>+	case MSR_IA32_S_CET:
>+	case MSR_KVM_GUEST_SSP:
>+	case MSR_IA32_INT_SSP_TAB:
>+		if (kvm_get_msr_common(vcpu, msr_info))
>+			return 1;
>+		if (msr_info->index == MSR_KVM_GUEST_SSP)
>+			msr_info->data = vmcs_readl(GUEST_SSP);
>+		else if (msr_info->index == MSR_IA32_S_CET)
>+			msr_info->data = vmcs_readl(GUEST_S_CET);
>+		else if (msr_info->index == MSR_IA32_INT_SSP_TAB)
>+			msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
>+		break;
> 	case MSR_IA32_DEBUGCTLMSR:
> 		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
> 		break;
>@@ -2402,6 +2417,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> 		else
> 			vmx->pt_desc.guest.addr_a[index / 2] = data;
> 		break;
>+#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))

bits9-6 are reserved for both intel and amd. Shouldn't this check be
done in the common code?

>+#define CET_LEG_BITMAP_BASE(data)	((data) >> 12)
>+#define CET_EXCLUSIVE_BITS		(CET_SUPPRESS | CET_WAIT_ENDBR)

>+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>+		return kvm_set_msr_common(vcpu, msr_info);

this hunk can be dropped as well.

>+		break;
>+	case MSR_IA32_U_CET:
>+	case MSR_IA32_S_CET:
>+	case MSR_KVM_GUEST_SSP:
>+	case MSR_IA32_INT_SSP_TAB:
>+		if ((msr_index == MSR_IA32_U_CET ||
>+		     msr_index == MSR_IA32_S_CET) &&
>+		    ((data & ~VMX_CET_CONTROL_MASK) ||
>+		     !IS_ALIGNED(CET_LEG_BITMAP_BASE(data), 4) ||
>+		     (data & CET_EXCLUSIVE_BITS) == CET_EXCLUSIVE_BITS))
>+			return 1;

how about

	case MSR_IA32_U_CET:
	case MSR_IA32_S_CET:
		if ((data & ~VMX_CET_CONTROL_MASK) || ...
			...

	case MSR_KVM_GUEST_SSP:
	case MSR_IA32_INT_SSP_TAB:
Yang, Weijiang July 27, 2023, 3:19 a.m. UTC | #2
On 7/26/2023 4:06 PM, Chao Gao wrote:
> On Thu, Jul 20, 2023 at 11:03:45PM -0400, Yang Weijiang wrote:
>> Add VMX specific emulation for CET MSR read and write.
>> IBT feature is only available on Intel platforms now and the
>> virtualization interface to the control fields is vensor
>> specific, so split this part from the common code.
>>
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> ---
>> arch/x86/kvm/vmx/vmx.c | 40 ++++++++++++++++++++++++++++++++++++++++
>> arch/x86/kvm/x86.c     |  7 -------
>> 2 files changed, 40 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index c8d9870cfecb..b29817ec6f2e 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -2093,6 +2093,21 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> 		else
>> 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
>> 		break;
>> +	case MSR_IA32_U_CET:
>> +	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>> +		return kvm_get_msr_common(vcpu, msr_info);
> kvm_get_msr_common() is called for the "default" case. so this can be dropped.

yes, I can drop these in vmx_get_msr(), just wanted to make it clearer, 
Thanks!

>
>> +	case MSR_IA32_S_CET:
>> +	case MSR_KVM_GUEST_SSP:
>> +	case MSR_IA32_INT_SSP_TAB:
>> +		if (kvm_get_msr_common(vcpu, msr_info))
>> +			return 1;
>> +		if (msr_info->index == MSR_KVM_GUEST_SSP)
>> +			msr_info->data = vmcs_readl(GUEST_SSP);
>> +		else if (msr_info->index == MSR_IA32_S_CET)
>> +			msr_info->data = vmcs_readl(GUEST_S_CET);
>> +		else if (msr_info->index == MSR_IA32_INT_SSP_TAB)
>> +			msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
>> +		break;
>> 	case MSR_IA32_DEBUGCTLMSR:
>> 		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
>> 		break;
>> @@ -2402,6 +2417,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> 		else
>> 			vmx->pt_desc.guest.addr_a[index / 2] = data;
>> 		break;
>> +#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))
> bits9-6 are reserved for both intel and amd. Shouldn't this check be
> done in the common code?

My thinking is, on AMD platform, bit 63:2 is anyway reserved since it 
doesn't support IBT,

so the checks in common code for AMD is enough, when the execution flow 
comes here,

it should be vmx, and need this additional check.

>
>> +#define CET_LEG_BITMAP_BASE(data)	((data) >> 12)
>> +#define CET_EXCLUSIVE_BITS		(CET_SUPPRESS | CET_WAIT_ENDBR)
>> +	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>> +		return kvm_set_msr_common(vcpu, msr_info);
> this hunk can be dropped as well.

In patch 16, these lines still need to be added back for PL{0,1,2}_SSP, 
so would like keep it

here.

>
>> +		break;
>> +	case MSR_IA32_U_CET:
>> +	case MSR_IA32_S_CET:
>> +	case MSR_KVM_GUEST_SSP:
>> +	case MSR_IA32_INT_SSP_TAB:
>> +		if ((msr_index == MSR_IA32_U_CET ||
>> +		     msr_index == MSR_IA32_S_CET) &&
>> +		    ((data & ~VMX_CET_CONTROL_MASK) ||
>> +		     !IS_ALIGNED(CET_LEG_BITMAP_BASE(data), 4) ||
>> +		     (data & CET_EXCLUSIVE_BITS) == CET_EXCLUSIVE_BITS))
>> +			return 1;
> how about
>
> 	case MSR_IA32_U_CET:
> 	case MSR_IA32_S_CET:
> 		if ((data & ~VMX_CET_CONTROL_MASK) || ...
> 			...
>
> 	case MSR_KVM_GUEST_SSP:
> 	case MSR_IA32_INT_SSP_TAB:

Do you mean to use "fallthrough"?

If not, for  MSR_IA32_S_CET, we need vmcs_write() to handle it,

this is vmx specific code.
Chao Gao July 27, 2023, 5:16 a.m. UTC | #3
>> > +	case MSR_IA32_S_CET:
>> > +	case MSR_KVM_GUEST_SSP:
>> > +	case MSR_IA32_INT_SSP_TAB:
>> > +		if (kvm_get_msr_common(vcpu, msr_info))
>> > +			return 1;
>> > +		if (msr_info->index == MSR_KVM_GUEST_SSP)
>> > +			msr_info->data = vmcs_readl(GUEST_SSP);
>> > +		else if (msr_info->index == MSR_IA32_S_CET)
>> > +			msr_info->data = vmcs_readl(GUEST_S_CET);
>> > +		else if (msr_info->index == MSR_IA32_INT_SSP_TAB)
>> > +			msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
>> > +		break;
>> > 	case MSR_IA32_DEBUGCTLMSR:
>> > 		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
>> > 		break;
>> > @@ -2402,6 +2417,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> > 		else
>> > 			vmx->pt_desc.guest.addr_a[index / 2] = data;
>> > 		break;
>> > +#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))
>> bits9-6 are reserved for both intel and amd. Shouldn't this check be
>> done in the common code?
>
>My thinking is, on AMD platform, bit 63:2 is anyway reserved since it doesn't
>support IBT,

You can only say

	bits 5:2 and bits 63:10 are reserved since AMD doens't support IBT.

bits 9:6 are reserved regardless of the support of IBT.

>
>so the checks in common code for AMD is enough, when the execution flow comes
>here,
>
>it should be vmx, and need this additional check.

The checks against reserved bits are common for AMD and Intel:

1. if SHSTK is supported, bit1:0 are not reserved.
2. if IBT is supported, bit5:2 and bit63:10 are not reserved
3. bit9:6 are always reserved.

There is nothing specific to Intel.

>
>> 
>> > +#define CET_LEG_BITMAP_BASE(data)	((data) >> 12)
>> > +#define CET_EXCLUSIVE_BITS		(CET_SUPPRESS | CET_WAIT_ENDBR)
>> > +	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>> > +		return kvm_set_msr_common(vcpu, msr_info);
>> this hunk can be dropped as well.
>
>In patch 16, these lines still need to be added back for PL{0,1,2}_SSP, so
>would like keep it

If that's the case, better to move it to patch 16, where the change
can be justified. And PL3_SSP should be removed anyway. and then
"msr_index != MSR_IA32_PL3_SSP" check in the below code snippet in
patch 16 can go away.

+		/*
+		 * Write to the base SSP MSRs should happen ahead of toggling
+		 * of IA32_S_CET.SH_STK_EN bit.
+		 */
+		if (msr_index != MSR_IA32_PL3_SSP && data) {
+			vmx_disable_write_intercept_sss_msr(vcpu);
+			wrmsrl(msr_index, data);
+		}


>
>here.
>
>> 
>> > +		break;
>> > +	case MSR_IA32_U_CET:
>> > +	case MSR_IA32_S_CET:
>> > +	case MSR_KVM_GUEST_SSP:
>> > +	case MSR_IA32_INT_SSP_TAB:
>> > +		if ((msr_index == MSR_IA32_U_CET ||
>> > +		     msr_index == MSR_IA32_S_CET) &&
>> > +		    ((data & ~VMX_CET_CONTROL_MASK) ||
>> > +		     !IS_ALIGNED(CET_LEG_BITMAP_BASE(data), 4) ||
>> > +		     (data & CET_EXCLUSIVE_BITS) == CET_EXCLUSIVE_BITS))
>> > +			return 1;
>> how about
>> 
>> 	case MSR_IA32_U_CET:
>> 	case MSR_IA32_S_CET:
>> 		if ((data & ~VMX_CET_CONTROL_MASK) || ...
>> 			...
>> 
>> 	case MSR_KVM_GUEST_SSP:
>> 	case MSR_IA32_INT_SSP_TAB:
>
>Do you mean to use "fallthrough"?

Yes.
Yang, Weijiang July 27, 2023, 7:10 a.m. UTC | #4
On 7/27/2023 1:16 PM, Chao Gao wrote:
>>>> +	case MSR_IA32_S_CET:
>>>> +	case MSR_KVM_GUEST_SSP:
>>>> +	case MSR_IA32_INT_SSP_TAB:
>>>> +		if (kvm_get_msr_common(vcpu, msr_info))
>>>> +			return 1;
>>>> +		if (msr_info->index == MSR_KVM_GUEST_SSP)
>>>> +			msr_info->data = vmcs_readl(GUEST_SSP);
>>>> +		else if (msr_info->index == MSR_IA32_S_CET)
>>>> +			msr_info->data = vmcs_readl(GUEST_S_CET);
>>>> +		else if (msr_info->index == MSR_IA32_INT_SSP_TAB)
>>>> +			msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
>>>> +		break;
>>>> 	case MSR_IA32_DEBUGCTLMSR:
>>>> 		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
>>>> 		break;
>>>> @@ -2402,6 +2417,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>>> 		else
>>>> 			vmx->pt_desc.guest.addr_a[index / 2] = data;
>>>> 		break;
>>>> +#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))
>>> bits9-6 are reserved for both intel and amd. Shouldn't this check be
>>> done in the common code?
>> My thinking is, on AMD platform, bit 63:2 is anyway reserved since it doesn't
>> support IBT,
> You can only say
>
> 	bits 5:2 and bits 63:10 are reserved since AMD doens't support IBT.
>
> bits 9:6 are reserved regardless of the support of IBT.
>
>> so the checks in common code for AMD is enough, when the execution flow comes
>> here,
>>
>> it should be vmx, and need this additional check.
> The checks against reserved bits are common for AMD and Intel:
>
> 1. if SHSTK is supported, bit1:0 are not reserved.
> 2. if IBT is supported, bit5:2 and bit63:10 are not reserved
> 3. bit9:6 are always reserved.
>
> There is nothing specific to Intel.

So you want the code to be:

+#define CET_IBT_MASK_BITS          (GENMASK_ULL(5, 2) | GENMASK_ULL(63, 
10))

+#define CET_CTRL_RESERVED_BITS GENMASK(9, 6)

+#define CET_SHSTK_MASK_BITSGENMASK(1, 0)

+if ((!guest_can_use(vcpu, X86_FEATURE_SHSTK) &&

+(data & CET_SHSTK_MASK_BITS)) ||

+(!guest_can_use(vcpu, X86_FEATURE_IBT) &&

+(data & CET_IBT_MASK_BITS)) ||

                             (data & CET_CTRL_RESERVED_BITS) )

^^^^^^^^^^^^^^^^^^^^^^^^^

+return 1;

>
>>>> +#define CET_LEG_BITMAP_BASE(data)	((data) >> 12)
>>>> +#define CET_EXCLUSIVE_BITS		(CET_SUPPRESS | CET_WAIT_ENDBR)
>>>> +	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>>>> +		return kvm_set_msr_common(vcpu, msr_info);
>>> this hunk can be dropped as well.
>> In patch 16, these lines still need to be added back for PL{0,1,2}_SSP, so
>> would like keep it
> If that's the case, better to move it to patch 16, where the change
> can be justified. And PL3_SSP should be removed anyway. and then
> "msr_index != MSR_IA32_PL3_SSP" check in the below code snippet in
> patch 16 can go away.

Sure, will do it.

>
> +		/*
> +		 * Write to the base SSP MSRs should happen ahead of toggling
> +		 * of IA32_S_CET.SH_STK_EN bit.
> +		 */
> +		if (msr_index != MSR_IA32_PL3_SSP && data) {
> +			vmx_disable_write_intercept_sss_msr(vcpu);
> +			wrmsrl(msr_index, data);
> +		}
>
>
>> here.
>>
>>>> +		break;
>>>> +	case MSR_IA32_U_CET:
>>>> +	case MSR_IA32_S_CET:
>>>> +	case MSR_KVM_GUEST_SSP:
>>>> +	case MSR_IA32_INT_SSP_TAB:
>>>> +		if ((msr_index == MSR_IA32_U_CET ||
>>>> +		     msr_index == MSR_IA32_S_CET) &&
>>>> +		    ((data & ~VMX_CET_CONTROL_MASK) ||
>>>> +		     !IS_ALIGNED(CET_LEG_BITMAP_BASE(data), 4) ||
>>>> +		     (data & CET_EXCLUSIVE_BITS) == CET_EXCLUSIVE_BITS))
>>>> +			return 1;
>>> how about
>>>
>>> 	case MSR_IA32_U_CET:
>>> 	case MSR_IA32_S_CET:
>>> 		if ((data & ~VMX_CET_CONTROL_MASK) || ...
>>> 			...
>>>
>>> 	case MSR_KVM_GUEST_SSP:
>>> 	case MSR_IA32_INT_SSP_TAB:
>> Do you mean to use "fallthrough"?
> Yes.

OK, will change it, thanks!
Sean Christopherson July 27, 2023, 3:20 p.m. UTC | #5
On Thu, Jul 27, 2023, Weijiang Yang wrote:
> 
> On 7/27/2023 1:16 PM, Chao Gao wrote:
> > > > > @@ -2402,6 +2417,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> > > > > 		else
> > > > > 			vmx->pt_desc.guest.addr_a[index / 2] = data;
> > > > > 		break;
> > > > > +#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))
> > > > bits9-6 are reserved for both intel and amd. Shouldn't this check be
> > > > done in the common code?
> > > My thinking is, on AMD platform, bit 63:2 is anyway reserved since it doesn't
> > > support IBT,
> > You can only say
> > 
> > 	bits 5:2 and bits 63:10 are reserved since AMD doens't support IBT.
> > 
> > bits 9:6 are reserved regardless of the support of IBT.
> > 
> > > so the checks in common code for AMD is enough, when the execution flow comes
> > > here,
> > > 
> > > it should be vmx, and need this additional check.
> > The checks against reserved bits are common for AMD and Intel:
> > 
> > 1. if SHSTK is supported, bit1:0 are not reserved.
> > 2. if IBT is supported, bit5:2 and bit63:10 are not reserved
> > 3. bit9:6 are always reserved.
> > 
> > There is nothing specific to Intel.

+1

> So you want the code to be:
> 
> +#define CET_IBT_MASK_BITS          (GENMASK_ULL(5, 2) | GENMASK_ULL(63,
> 10))
> 
> +#define CET_CTRL_RESERVED_BITS GENMASK(9, 6)
> 
> +#define CET_SHSTK_MASK_BITSGENMASK(1, 0)
> 
> +if ((!guest_can_use(vcpu, X86_FEATURE_SHSTK) &&
> 
> +(data & CET_SHSTK_MASK_BITS)) ||
> 
> +(!guest_can_use(vcpu, X86_FEATURE_IBT) &&
> 
> +(data & CET_IBT_MASK_BITS)) ||
> 
>                             (data & CET_CTRL_RESERVED_BITS) )
> 
> ^^^^^^^^^^^^^^^^^^^^^^^^^

Yes, though I vote to separate each check, e.g.

	if (data & CET_CTRL_RESERVED_BITS)
		return 1;

	if (!guest_can_use(vcpu, X86_FEATURE_SHSTK) && (data & CET_SHSTK_MASK_BITS))
		return 1;

	if (!guest_can_use(vcpu, X86_FEATURE_IBT) && (data & CET_IBT_MASK_BITS))
		return 1;

I would expect the code generation to be similar, if not outright identical, and
IMO it's easier to quickly understand the flow if each check is a separate if-statement.
Yang, Weijiang July 28, 2023, 12:43 a.m. UTC | #6
On 7/27/2023 11:20 PM, Sean Christopherson wrote:
> On Thu, Jul 27, 2023, Weijiang Yang wrote:
>> On 7/27/2023 1:16 PM, Chao Gao wrote:
>>>>>> @@ -2402,6 +2417,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>>>>> 		else
>>>>>> 			vmx->pt_desc.guest.addr_a[index / 2] = data;
>>>>>> 		break;
>>>>>> +#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))
>>>>> bits9-6 are reserved for both intel and amd. Shouldn't this check be
>>>>> done in the common code?
>>>> My thinking is, on AMD platform, bit 63:2 is anyway reserved since it doesn't
>>>> support IBT,
>>> You can only say
>>>
>>> 	bits 5:2 and bits 63:10 are reserved since AMD doens't support IBT.
>>>
>>> bits 9:6 are reserved regardless of the support of IBT.
>>>
>>>> so the checks in common code for AMD is enough, when the execution flow comes
>>>> here,
>>>>
>>>> it should be vmx, and need this additional check.
>>> The checks against reserved bits are common for AMD and Intel:
>>>
>>> 1. if SHSTK is supported, bit1:0 are not reserved.
>>> 2. if IBT is supported, bit5:2 and bit63:10 are not reserved
>>> 3. bit9:6 are always reserved.
>>>
>>> There is nothing specific to Intel.
> +1
>
>> So you want the code to be:
>>
>> +#define CET_IBT_MASK_BITS          (GENMASK_ULL(5, 2) | GENMASK_ULL(63,
>> 10))
>>
>> +#define CET_CTRL_RESERVED_BITS GENMASK(9, 6)
>>
>> +#define CET_SHSTK_MASK_BITSGENMASK(1, 0)
>>
>> +if ((!guest_can_use(vcpu, X86_FEATURE_SHSTK) &&
>>
>> +(data & CET_SHSTK_MASK_BITS)) ||
>>
>> +(!guest_can_use(vcpu, X86_FEATURE_IBT) &&
>>
>> +(data & CET_IBT_MASK_BITS)) ||
>>
>>                              (data & CET_CTRL_RESERVED_BITS) )
>>
>> ^^^^^^^^^^^^^^^^^^^^^^^^^
> Yes, though I vote to separate each check, e.g.
>
> 	if (data & CET_CTRL_RESERVED_BITS)
> 		return 1;
>
> 	if (!guest_can_use(vcpu, X86_FEATURE_SHSTK) && (data & CET_SHSTK_MASK_BITS))
> 		return 1;
>
> 	if (!guest_can_use(vcpu, X86_FEATURE_IBT) && (data & CET_IBT_MASK_BITS))
> 		return 1;
>
> I would expect the code generation to be similar, if not outright identical, and
> IMO it's easier to quickly understand the flow if each check is a separate if-statement.

It looks good to me! Thank you!
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c8d9870cfecb..b29817ec6f2e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2093,6 +2093,21 @@  static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		else
 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
 		break;
+	case MSR_IA32_U_CET:
+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+		return kvm_get_msr_common(vcpu, msr_info);
+	case MSR_IA32_S_CET:
+	case MSR_KVM_GUEST_SSP:
+	case MSR_IA32_INT_SSP_TAB:
+		if (kvm_get_msr_common(vcpu, msr_info))
+			return 1;
+		if (msr_info->index == MSR_KVM_GUEST_SSP)
+			msr_info->data = vmcs_readl(GUEST_SSP);
+		else if (msr_info->index == MSR_IA32_S_CET)
+			msr_info->data = vmcs_readl(GUEST_S_CET);
+		else if (msr_info->index == MSR_IA32_INT_SSP_TAB)
+			msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
+		break;
 	case MSR_IA32_DEBUGCTLMSR:
 		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
 		break;
@@ -2402,6 +2417,31 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		else
 			vmx->pt_desc.guest.addr_a[index / 2] = data;
 		break;
+#define VMX_CET_CONTROL_MASK		(~GENMASK_ULL(9, 6))
+#define CET_LEG_BITMAP_BASE(data)	((data) >> 12)
+#define CET_EXCLUSIVE_BITS		(CET_SUPPRESS | CET_WAIT_ENDBR)
+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+		return kvm_set_msr_common(vcpu, msr_info);
+		break;
+	case MSR_IA32_U_CET:
+	case MSR_IA32_S_CET:
+	case MSR_KVM_GUEST_SSP:
+	case MSR_IA32_INT_SSP_TAB:
+		if ((msr_index == MSR_IA32_U_CET ||
+		     msr_index == MSR_IA32_S_CET) &&
+		    ((data & ~VMX_CET_CONTROL_MASK) ||
+		     !IS_ALIGNED(CET_LEG_BITMAP_BASE(data), 4) ||
+		     (data & CET_EXCLUSIVE_BITS) == CET_EXCLUSIVE_BITS))
+			return 1;
+		if (kvm_set_msr_common(vcpu, msr_info))
+			return 1;
+		if (msr_index == MSR_KVM_GUEST_SSP)
+			vmcs_writel(GUEST_SSP, data);
+		else if (msr_index == MSR_IA32_S_CET)
+			vmcs_writel(GUEST_S_CET, data);
+		else if (msr_index == MSR_IA32_INT_SSP_TAB)
+			vmcs_writel(GUEST_INTR_SSP_TABLE, data);
+		break;
 	case MSR_IA32_PERF_CAPABILITIES:
 		if (data && !vcpu_to_pmu(vcpu)->version)
 			return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 70d7c80889d6..e200f22cdaad 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3642,13 +3642,6 @@  static inline bool is_shadow_stack_msr(struct kvm_vcpu *vcpu,
 static bool kvm_cet_is_msr_accessible(struct kvm_vcpu *vcpu,
 				      struct msr_data *msr)
 {
-
-	/*
-	 * This function cannot work without later CET MSR read/write
-	 * emulation patch.
-	 */
-	WARN_ON_ONCE(1);
-
 	if (is_shadow_stack_msr(vcpu, msr)) {
 		if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK))
 			return false;