diff mbox series

[v9,10/27] KVM: x86: Refine xsave-managed guest register/MSR reset handling

Message ID 20240124024200.102792-11-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable CET Virtualization | expand

Commit Message

Yang, Weijiang Jan. 24, 2024, 2:41 a.m. UTC
Tweak the code a bit to facilitate resetting more xstate components in
the future, e.g., adding CET's xstate-managed MSRs.

No functional change intended.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/x86.c | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

Comments

Chao Gao Jan. 25, 2024, 10:17 a.m. UTC | #1
On Tue, Jan 23, 2024 at 06:41:43PM -0800, Yang Weijiang wrote:
>Tweak the code a bit to facilitate resetting more xstate components in
>the future, e.g., adding CET's xstate-managed MSRs.
>
>No functional change intended.
>
>Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
>---
> arch/x86/kvm/x86.c | 15 ++++++++++++---
> 1 file changed, 12 insertions(+), 3 deletions(-)
>
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index 0e7dc3398293..3671f4868d1b 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -12205,6 +12205,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
> 		static_branch_dec(&kvm_has_noapic_vcpu);
> }
> 
>+static inline bool is_xstate_reset_needed(void)
>+{
>+	return kvm_cpu_cap_has(X86_FEATURE_MPX);
>+}
>+
> void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> {
> 	struct kvm_cpuid_entry2 *cpuid_0x1;
>@@ -12262,7 +12267,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> 	kvm_async_pf_hash_reset(vcpu);
> 	vcpu->arch.apf.halted = false;
> 
>-	if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
>+	if (vcpu->arch.guest_fpu.fpstate && is_xstate_reset_needed()) {
> 		struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
> 
> 		/*
>@@ -12272,8 +12277,12 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> 		if (init_event)
> 			kvm_put_guest_fpu(vcpu);
> 
>-		fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
>-		fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
>+		if (kvm_cpu_cap_has(X86_FEATURE_MPX)) {
>+			fpstate_clear_xstate_component(fpstate,
>+						       XFEATURE_BNDREGS);
>+			fpstate_clear_xstate_component(fpstate,
>+						       XFEATURE_BNDCSR);
>+		}

Checking whether KVM supports MPX is indirect and adds complexity.

how about something like:

#define XSTATE_NEED_RESET_MASK		(XFEATURE_MASK_BNDREGS | \
					 XFEATURE_MASK_BNDCSR)

	u64 reset_mask;
	...

	reset_mask = (kvm_caps.supported_xcr0 | kvm_caps.supported_xss) &
			XSTATE_NEED_RESET_MASK;
	if (vcpu->arch.guest_fpu.fpstate && reset_mask) {
	...
		for_each_set_bit(i, &reset_mask, XFEATURE_MAX)
			fpstate_clear_xstate_component(fpstate, i);
	...
	}

then in patch 24, you can simply add CET_U/S into XSTATE_NEED_RESET_MASK.
Yang, Weijiang Jan. 26, 2024, 9:13 a.m. UTC | #2
On 1/25/2024 6:17 PM, Chao Gao wrote:
> On Tue, Jan 23, 2024 at 06:41:43PM -0800, Yang Weijiang wrote:
>> Tweak the code a bit to facilitate resetting more xstate components in
>> the future, e.g., adding CET's xstate-managed MSRs.
>>
>> No functional change intended.
>>
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
>> ---
>> arch/x86/kvm/x86.c | 15 ++++++++++++---
>> 1 file changed, 12 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 0e7dc3398293..3671f4868d1b 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -12205,6 +12205,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
>> 		static_branch_dec(&kvm_has_noapic_vcpu);
>> }
>>
>> +static inline bool is_xstate_reset_needed(void)
>> +{
>> +	return kvm_cpu_cap_has(X86_FEATURE_MPX);
>> +}
>> +
>> void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>> {
>> 	struct kvm_cpuid_entry2 *cpuid_0x1;
>> @@ -12262,7 +12267,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>> 	kvm_async_pf_hash_reset(vcpu);
>> 	vcpu->arch.apf.halted = false;
>>
>> -	if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
>> +	if (vcpu->arch.guest_fpu.fpstate && is_xstate_reset_needed()) {
>> 		struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
>>
>> 		/*
>> @@ -12272,8 +12277,12 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>> 		if (init_event)
>> 			kvm_put_guest_fpu(vcpu);
>>
>> -		fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
>> -		fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
>> +		if (kvm_cpu_cap_has(X86_FEATURE_MPX)) {
>> +			fpstate_clear_xstate_component(fpstate,
>> +						       XFEATURE_BNDREGS);
>> +			fpstate_clear_xstate_component(fpstate,
>> +						       XFEATURE_BNDCSR);
>> +		}
> Checking whether KVM supports MPX is indirect and adds complexity.
>
> how about something like:
>
> #define XSTATE_NEED_RESET_MASK		(XFEATURE_MASK_BNDREGS | \
> 					 XFEATURE_MASK_BNDCSR)
>
> 	u64 reset_mask;
> 	...
>
> 	reset_mask = (kvm_caps.supported_xcr0 | kvm_caps.supported_xss) &
> 			XSTATE_NEED_RESET_MASK;
> 	if (vcpu->arch.guest_fpu.fpstate && reset_mask) {
> 	...
> 		for_each_set_bit(i, &reset_mask, XFEATURE_MAX)
> 			fpstate_clear_xstate_component(fpstate, i);
> 	...
> 	}
>
> then in patch 24, you can simply add CET_U/S into XSTATE_NEED_RESET_MASK.

Yes, the proposal looks good to me, will apply it in next version, thanks!
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0e7dc3398293..3671f4868d1b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12205,6 +12205,11 @@  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 		static_branch_dec(&kvm_has_noapic_vcpu);
 }
 
+static inline bool is_xstate_reset_needed(void)
+{
+	return kvm_cpu_cap_has(X86_FEATURE_MPX);
+}
+
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
 	struct kvm_cpuid_entry2 *cpuid_0x1;
@@ -12262,7 +12267,7 @@  void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 	kvm_async_pf_hash_reset(vcpu);
 	vcpu->arch.apf.halted = false;
 
-	if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
+	if (vcpu->arch.guest_fpu.fpstate && is_xstate_reset_needed()) {
 		struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
 
 		/*
@@ -12272,8 +12277,12 @@  void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 		if (init_event)
 			kvm_put_guest_fpu(vcpu);
 
-		fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
-		fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
+		if (kvm_cpu_cap_has(X86_FEATURE_MPX)) {
+			fpstate_clear_xstate_component(fpstate,
+						       XFEATURE_BNDREGS);
+			fpstate_clear_xstate_component(fpstate,
+						       XFEATURE_BNDCSR);
+		}
 
 		if (init_event)
 			kvm_load_guest_fpu(vcpu);