diff mbox series

[7/7] KVM: TDX: Add TSX_CTRL msr into uret_msrs list

Message ID 20241121201448.36170-8-adrian.hunter@intel.com (mailing list archive)
State New
Headers show
Series KVM: TDX: TD vcpu enter/exit | expand

Commit Message

Adrian Hunter Nov. 21, 2024, 8:14 p.m. UTC
From: Yang Weijiang <weijiang.yang@intel.com>

TDX module resets the TSX_CTRL MSR to 0 at TD exit if TSX is enabled for
TD. Or it preserves the TSX_CTRL MSR if TSX is disabled for TD.  VMM can
rely on uret_msrs mechanism to defer the reload of host value until exiting
to user space.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
---
TD vcpu enter/exit v1:
 - Update from rename in earlier patches (Binbin)

v19:
- fix the type of tdx_uret_tsx_ctrl_slot. unguent int => int.
---
 arch/x86/kvm/vmx/tdx.c | 33 +++++++++++++++++++++++++++++++--
 arch/x86/kvm/vmx/tdx.h |  8 ++++++++
 2 files changed, 39 insertions(+), 2 deletions(-)

Comments

Chao Gao Nov. 22, 2024, 3:27 a.m. UTC | #1
>+static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid)
>+{
>+	const struct kvm_cpuid_entry2 *entry;
>+	u64 mask;
>+	u32 ebx;
>+
>+	entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0);
>+	if (entry)
>+		ebx = entry->ebx;
>+	else
>+		ebx = 0;
>+
>+	mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM);
>+	return ebx & mask;
>+}
>+
> static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
> 			struct kvm_tdx_init_vm *init_vm)
> {
>@@ -1299,6 +1322,7 @@ static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
> 	MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
> 	MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
> 
>+	to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid);
> 	return 0;
> }
> 
>@@ -2272,6 +2296,11 @@ static int __init __tdx_bringup(void)
> 			return -EIO;
> 		}
> 	}
>+	tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL);
>+	if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) {
>+		pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n");
>+		return -EIO;
>+	}
> 
> 	/*
> 	 * Enabling TDX requires enabling hardware virtualization first,
>diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
>index 48cf0a1abfcc..815ff6bdbc7e 100644
>--- a/arch/x86/kvm/vmx/tdx.h
>+++ b/arch/x86/kvm/vmx/tdx.h
>@@ -29,6 +29,14 @@ struct kvm_tdx {
> 	u8 nr_tdcs_pages;
> 	u8 nr_vcpu_tdcx_pages;
> 
>+	/*
>+	 * Used on each TD-exit, see tdx_user_return_msr_update_cache().
>+	 * TSX_CTRL value on TD exit
>+	 * - set 0     if guest TSX enabled
>+	 * - preserved if guest TSX disabled
>+	 */
>+	bool tsx_supported;

Is it possible to drop this boolean and tdparams_tsx_supported()? I think we
can use the guest_can_use() framework instead.

>+
> 	u64 tsc_offset;
> 
> 	enum kvm_tdx_state state;
>-- 
>2.43.0
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 4a33ca54c8ba..2c7b6308da73 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -722,14 +722,21 @@  static struct tdx_uret_msr tdx_uret_msrs[] = {
 	{.msr = MSR_LSTAR,},
 	{.msr = MSR_TSC_AUX,},
 };
+static int tdx_uret_tsx_ctrl_slot;
 
-static void tdx_user_return_msr_update_cache(void)
+static void tdx_user_return_msr_update_cache(struct kvm_vcpu *vcpu)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
 		kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
 						 tdx_uret_msrs[i].defval);
+	/*
+	 * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise
+	 * preserved.
+	 */
+	if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1)
+		kvm_user_return_msr_update_cache(tdx_uret_tsx_ctrl_slot, 0);
 }
 
 static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
@@ -817,7 +824,7 @@  fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
 
 	tdx_vcpu_enter_exit(vcpu);
 
-	tdx_user_return_msr_update_cache();
+	tdx_user_return_msr_update_cache(vcpu);
 	tdx_restore_host_xsave_state(vcpu);
 	tdx->host_state_need_restore = true;
 
@@ -1258,6 +1265,22 @@  static int setup_tdparams_cpuids(struct kvm_cpuid2 *cpuid,
 	return 0;
 }
 
+static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid)
+{
+	const struct kvm_cpuid_entry2 *entry;
+	u64 mask;
+	u32 ebx;
+
+	entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0);
+	if (entry)
+		ebx = entry->ebx;
+	else
+		ebx = 0;
+
+	mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM);
+	return ebx & mask;
+}
+
 static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
 			struct kvm_tdx_init_vm *init_vm)
 {
@@ -1299,6 +1322,7 @@  static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
 	MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
 	MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
 
+	to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid);
 	return 0;
 }
 
@@ -2272,6 +2296,11 @@  static int __init __tdx_bringup(void)
 			return -EIO;
 		}
 	}
+	tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL);
+	if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) {
+		pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n");
+		return -EIO;
+	}
 
 	/*
 	 * Enabling TDX requires enabling hardware virtualization first,
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index 48cf0a1abfcc..815ff6bdbc7e 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -29,6 +29,14 @@  struct kvm_tdx {
 	u8 nr_tdcs_pages;
 	u8 nr_vcpu_tdcx_pages;
 
+	/*
+	 * Used on each TD-exit, see tdx_user_return_msr_update_cache().
+	 * TSX_CTRL value on TD exit
+	 * - set 0     if guest TSX enabled
+	 * - preserved if guest TSX disabled
+	 */
+	bool tsx_supported;
+
 	u64 tsc_offset;
 
 	enum kvm_tdx_state state;