diff mbox series

[4/7] KVM: TDX: restore host xsave state when exit from the guest TD

Message ID 20241121201448.36170-5-adrian.hunter@intel.com (mailing list archive)
State New
Headers show
Series KVM: TDX: TD vcpu enter/exit | expand

Commit Message

Adrian Hunter Nov. 21, 2024, 8:14 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

On exiting from the guest TD, xsave state is clobbered.  Restore xsave
state on TD exit.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
---
TD vcpu enter/exit v1:
- Remove noinstr on tdx_vcpu_enter_exit() (Sean)
- Switch to kvm_host struct for xcr0 and xss

v19:
- Add EXPORT_SYMBOL_GPL(host_xcr0)

v15 -> v16:
- Added CET flag mask
---
 arch/x86/kvm/vmx/tdx.c | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

Comments

Chao Gao Nov. 22, 2024, 5:49 a.m. UTC | #1
>+static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
>+{
>+	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
>+
>+	if (static_cpu_has(X86_FEATURE_XSAVE) &&
>+	    kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
>+		xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
>+	if (static_cpu_has(X86_FEATURE_XSAVES) &&
>+	    /* PT can be exposed to TD guest regardless of KVM's XSS support */
>+	    kvm_host.xss != (kvm_tdx->xfam &
>+			 (kvm_caps.supported_xss | XFEATURE_MASK_PT |
>+			  XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL)))

Should we drop CET/PT from this series? I think they are worth a new
patch/series.

>+		wrmsrl(MSR_IA32_XSS, kvm_host.xss);
>+	if (static_cpu_has(X86_FEATURE_PKU) &&

How about using cpu_feature_enabled()? It is used in kvm_load_host_xsave_state()
It handles the case where CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not
enabled.

>+	    (kvm_tdx->xfam & XFEATURE_MASK_PKRU))
>+		write_pkru(vcpu->arch.host_pkru);

If host_pkru happens to match the hardware value after TD-exits, the write can
be omitted, similar to what is done above for xss and xcr0.

>+}
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 6e4ea2d420bc..00fdd2932205 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -2,6 +2,8 @@ 
 #include <linux/cleanup.h>
 #include <linux/cpu.h>
 #include <linux/mmu_context.h>
+
+#include <asm/fpu/xcr.h>
 #include <asm/tdx.h>
 #include "capabilities.h"
 #include "mmu.h"
@@ -709,6 +711,24 @@  void tdx_vcpu_free(struct kvm_vcpu *vcpu)
 }
 
 
+static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+
+	if (static_cpu_has(X86_FEATURE_XSAVE) &&
+	    kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
+		xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
+	if (static_cpu_has(X86_FEATURE_XSAVES) &&
+	    /* PT can be exposed to TD guest regardless of KVM's XSS support */
+	    kvm_host.xss != (kvm_tdx->xfam &
+			 (kvm_caps.supported_xss | XFEATURE_MASK_PT |
+			  XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL)))
+		wrmsrl(MSR_IA32_XSS, kvm_host.xss);
+	if (static_cpu_has(X86_FEATURE_PKU) &&
+	    (kvm_tdx->xfam & XFEATURE_MASK_PKRU))
+		write_pkru(vcpu->arch.host_pkru);
+}
+
 static void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_tdx *tdx = to_tdx(vcpu);
@@ -776,6 +796,7 @@  fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
 
 	tdx_vcpu_enter_exit(vcpu);
 
+	tdx_restore_host_xsave_state(vcpu);
 	tdx->host_state_need_restore = true;
 
 	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;