diff mbox series

[v18,069/121] KVM: TDX: restore host xsave state when exit from the guest TD

Message ID bfa1994b79687709aae011ff455147cc7dd97ffb.1705965635.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM TDX basic feature support | expand

Commit Message

Isaku Yamahata Jan. 22, 2024, 11:53 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

On exiting from the guest TD, xsave state is clobbered.  Restore xsave
state on TD exit.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
v15 -> v16:
- Added CET flag mask
---
 arch/x86/kvm/vmx/tdx.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

Comments

Binbin Wu Feb. 20, 2024, 8:56 a.m. UTC | #1
On 1/23/2024 7:53 AM, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
>
> On exiting from the guest TD, xsave state is clobbered.  Restore xsave
> state on TD exit.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> v15 -> v16:
> - Added CET flag mask
> ---
>   arch/x86/kvm/vmx/tdx.c | 19 +++++++++++++++++++
>   1 file changed, 19 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 903f4abb3543..fe818cfde9e7 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -2,6 +2,7 @@
>   #include <linux/cpu.h>
>   #include <linux/mmu_context.h>
>   
> +#include <asm/fpu/xcr.h>
>   #include <asm/tdx.h>
>   
>   #include "capabilities.h"
> @@ -584,6 +585,23 @@ void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>   	 */
>   }
>   
> +static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
> +
> +	if (static_cpu_has(X86_FEATURE_XSAVE) &&
> +	    host_xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
> +		xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +	if (static_cpu_has(X86_FEATURE_XSAVES) &&
> +	    /* PT can be exposed to TD guest regardless of KVM's XSS support */
> +	    host_xss != (kvm_tdx->xfam &
> +			 (kvm_caps.supported_xss | XFEATURE_MASK_PT | TDX_TD_XFAM_CET)))
> +		wrmsrl(MSR_IA32_XSS, host_xss);
> +	if (static_cpu_has(X86_FEATURE_PKU) &&
> +	    (kvm_tdx->xfam & XFEATURE_MASK_PKRU))
> +		write_pkru(vcpu->arch.host_pkru);
> +}
> +

The export of host_xcr0 in patch 67 can be moved to this path.

  u64 __read_mostly host_xcr0;
+EXPORT_SYMBOL_GPL(host_xcr0);

>   static noinstr void tdx_vcpu_enter_exit(struct vcpu_tdx *tdx)
>   {
>   	struct tdx_module_args args;
> @@ -659,6 +677,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
>   
>   	tdx_vcpu_enter_exit(tdx);
>   
> +	tdx_restore_host_xsave_state(vcpu);
>   	tdx->host_state_need_restore = true;
>   
>   	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 903f4abb3543..fe818cfde9e7 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -2,6 +2,7 @@ 
 #include <linux/cpu.h>
 #include <linux/mmu_context.h>
 
+#include <asm/fpu/xcr.h>
 #include <asm/tdx.h>
 
 #include "capabilities.h"
@@ -584,6 +585,23 @@  void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 	 */
 }
 
+static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+
+	if (static_cpu_has(X86_FEATURE_XSAVE) &&
+	    host_xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
+		xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+	if (static_cpu_has(X86_FEATURE_XSAVES) &&
+	    /* PT can be exposed to TD guest regardless of KVM's XSS support */
+	    host_xss != (kvm_tdx->xfam &
+			 (kvm_caps.supported_xss | XFEATURE_MASK_PT | TDX_TD_XFAM_CET)))
+		wrmsrl(MSR_IA32_XSS, host_xss);
+	if (static_cpu_has(X86_FEATURE_PKU) &&
+	    (kvm_tdx->xfam & XFEATURE_MASK_PKRU))
+		write_pkru(vcpu->arch.host_pkru);
+}
+
 static noinstr void tdx_vcpu_enter_exit(struct vcpu_tdx *tdx)
 {
 	struct tdx_module_args args;
@@ -659,6 +677,7 @@  fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	tdx_vcpu_enter_exit(tdx);
 
+	tdx_restore_host_xsave_state(vcpu);
 	tdx->host_state_need_restore = true;
 
 	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;