Message ID | 20241121201448.36170-5-adrian.hunter@intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KVM: TDX: TD vcpu enter/exit | expand |
>+static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu) >+{ >+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm); >+ >+ if (static_cpu_has(X86_FEATURE_XSAVE) && >+ kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0)) >+ xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0); >+ if (static_cpu_has(X86_FEATURE_XSAVES) && >+ /* PT can be exposed to TD guest regardless of KVM's XSS support */ >+ kvm_host.xss != (kvm_tdx->xfam & >+ (kvm_caps.supported_xss | XFEATURE_MASK_PT | >+ XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL))) Should we drop CET/PT from this series? I think they are worth a new patch/series. >+ wrmsrl(MSR_IA32_XSS, kvm_host.xss); >+ if (static_cpu_has(X86_FEATURE_PKU) && How about using cpu_feature_enabled()? It is used in kvm_load_host_xsave_state() It handles the case where CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not enabled. >+ (kvm_tdx->xfam & XFEATURE_MASK_PKRU)) >+ write_pkru(vcpu->arch.host_pkru); If host_pkru happens to match the hardware value after TD-exits, the write can be omitted, similar to what is done above for xss and xcr0. >+}
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 6e4ea2d420bc..00fdd2932205 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -2,6 +2,8 @@ #include <linux/cleanup.h> #include <linux/cpu.h> #include <linux/mmu_context.h> + +#include <asm/fpu/xcr.h> #include <asm/tdx.h> #include "capabilities.h" #include "mmu.h" @@ -709,6 +711,24 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu) } +static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu) +{ + struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm); + + if (static_cpu_has(X86_FEATURE_XSAVE) && + kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0)) + xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0); + if (static_cpu_has(X86_FEATURE_XSAVES) && + /* PT can be exposed to TD guest regardless of KVM's XSS support */ + kvm_host.xss != (kvm_tdx->xfam & + (kvm_caps.supported_xss | XFEATURE_MASK_PT | + XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL))) + wrmsrl(MSR_IA32_XSS, kvm_host.xss); + if (static_cpu_has(X86_FEATURE_PKU) && + (kvm_tdx->xfam & XFEATURE_MASK_PKRU)) + write_pkru(vcpu->arch.host_pkru); +} + static void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu) { struct vcpu_tdx *tdx = to_tdx(vcpu); @@ -776,6 +796,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) tdx_vcpu_enter_exit(vcpu); + tdx_restore_host_xsave_state(vcpu); tdx->host_state_need_restore = true; vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;