Message ID | ca819af632d5c7ea2905c4a1d07303139eaef4ea.1705965635.git.isaku.yamahata@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM TDX basic feature support | expand |
On 1/23/2024 7:53 AM, isaku.yamahata@intel.com wrote: > From: Yang Weijiang <weijiang.yang@intel.com> > > TDX module resets the TSX_CTRL MSR to 0 at TD exit if TSX is enabled for > TD. Or it preserves the TSX_CTRL MSR if TSX is disabled for TD. VMM can > rely on uret_msrs mechanism to defer the reload of host value until exiting > to user space. > > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> > --- > arch/x86/kvm/vmx/tdx.c | 33 +++++++++++++++++++++++++++++++-- > arch/x86/kvm/vmx/tdx.h | 8 ++++++++ > 2 files changed, 39 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c > index 4685ff6aa5f8..71c6fc10e8c4 100644 > --- a/arch/x86/kvm/vmx/tdx.c > +++ b/arch/x86/kvm/vmx/tdx.c > @@ -597,14 +597,21 @@ static struct tdx_uret_msr tdx_uret_msrs[] = { > {.msr = MSR_LSTAR,}, > {.msr = MSR_TSC_AUX,}, > }; > +static unsigned int tdx_uret_tsx_ctrl_slot; It should use "int" instead of "unsigned int" since the return type of kvm_find_user_return_msr() is int. Not a good code style to compare between unsigned int and int. > > -static void tdx_user_return_update_cache(void) > +static void tdx_user_return_update_cache(struct kvm_vcpu *vcpu) > { > int i; > > for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) > kvm_user_return_update_cache(tdx_uret_msrs[i].slot, > tdx_uret_msrs[i].defval); > + /* > + * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise > + * preserved. > + */ > + if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1) > + kvm_user_return_update_cache(tdx_uret_tsx_ctrl_slot, 0); > } > > static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu) > @@ -699,7 +706,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu) > > tdx_vcpu_enter_exit(tdx); > > - tdx_user_return_update_cache(); > + tdx_user_return_update_cache(vcpu); > tdx_restore_host_xsave_state(vcpu); > tdx->host_state_need_restore = true; > > @@ -1212,6 +1219,22 @@ static int setup_tdparams_xfam(struct kvm_cpuid2 *cpuid, struct td_params *td_pa > return 0; > } > > +static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid) > +{ > + const struct kvm_cpuid_entry2 *entry; > + u64 mask; > + u32 ebx; > + > + entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0); > + if (entry) > + ebx = entry->ebx; > + else > + ebx = 0; > + > + mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM); > + return ebx & mask; > +} > + > static int setup_tdparams(struct kvm *kvm, struct td_params *td_params, > struct kvm_tdx_init_vm *init_vm) > { > @@ -1253,6 +1276,7 @@ static int setup_tdparams(struct kvm *kvm, struct td_params *td_params, > MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner); > MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig); > > + to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid); > return 0; > } > > @@ -1978,6 +2002,11 @@ int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops) > return -EIO; > } > } > + tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL); > + if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) { > + pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n"); > + return -EIO; > + } > > max_pkgs = topology_max_packages(); > tdx_mng_key_config_lock = kcalloc(max_pkgs, sizeof(*tdx_mng_key_config_lock), > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h > index 2d3119c60a14..883eb05d207f 100644 > --- a/arch/x86/kvm/vmx/tdx.h > +++ b/arch/x86/kvm/vmx/tdx.h > @@ -17,6 +17,14 @@ struct kvm_tdx { > u64 xfam; > int hkid; > > + /* > + * Used on each TD-exit, see tdx_user_return_update_cache(). > + * TSX_CTRL value on TD exit > + * - set 0 if guest TSX enabled > + * - preserved if guest TSX disabled > + */ > + bool tsx_supported; > + > hpa_t source_pa; > > bool finalized;
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 4685ff6aa5f8..71c6fc10e8c4 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -597,14 +597,21 @@ static struct tdx_uret_msr tdx_uret_msrs[] = { {.msr = MSR_LSTAR,}, {.msr = MSR_TSC_AUX,}, }; +static unsigned int tdx_uret_tsx_ctrl_slot; -static void tdx_user_return_update_cache(void) +static void tdx_user_return_update_cache(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) kvm_user_return_update_cache(tdx_uret_msrs[i].slot, tdx_uret_msrs[i].defval); + /* + * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise + * preserved. + */ + if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1) + kvm_user_return_update_cache(tdx_uret_tsx_ctrl_slot, 0); } static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu) @@ -699,7 +706,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu) tdx_vcpu_enter_exit(tdx); - tdx_user_return_update_cache(); + tdx_user_return_update_cache(vcpu); tdx_restore_host_xsave_state(vcpu); tdx->host_state_need_restore = true; @@ -1212,6 +1219,22 @@ static int setup_tdparams_xfam(struct kvm_cpuid2 *cpuid, struct td_params *td_pa return 0; } +static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid) +{ + const struct kvm_cpuid_entry2 *entry; + u64 mask; + u32 ebx; + + entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0); + if (entry) + ebx = entry->ebx; + else + ebx = 0; + + mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM); + return ebx & mask; +} + static int setup_tdparams(struct kvm *kvm, struct td_params *td_params, struct kvm_tdx_init_vm *init_vm) { @@ -1253,6 +1276,7 @@ static int setup_tdparams(struct kvm *kvm, struct td_params *td_params, MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner); MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig); + to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid); return 0; } @@ -1978,6 +2002,11 @@ int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops) return -EIO; } } + tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL); + if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) { + pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n"); + return -EIO; + } max_pkgs = topology_max_packages(); tdx_mng_key_config_lock = kcalloc(max_pkgs, sizeof(*tdx_mng_key_config_lock), diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h index 2d3119c60a14..883eb05d207f 100644 --- a/arch/x86/kvm/vmx/tdx.h +++ b/arch/x86/kvm/vmx/tdx.h @@ -17,6 +17,14 @@ struct kvm_tdx { u64 xfam; int hkid; + /* + * Used on each TD-exit, see tdx_user_return_update_cache(). + * TSX_CTRL value on TD exit + * - set 0 if guest TSX enabled + * - preserved if guest TSX disabled + */ + bool tsx_supported; + hpa_t source_pa; bool finalized;