diff mbox series

[2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS

Message ID 20191009004142.225377-2-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series [1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE | expand

Commit Message

Aaron Lewis Oct. 9, 2019, 12:41 a.m. UTC
Set IA32_XSS for the guest and host during VM Enter and VM Exit
transitions rather than by using the MSR-load areas.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/svm.c     |  4 ++--
 arch/x86/kvm/vmx/vmx.c | 14 ++------------
 arch/x86/kvm/x86.c     | 25 +++++++++++++++++++++----
 arch/x86/kvm/x86.h     |  4 ++--
 4 files changed, 27 insertions(+), 20 deletions(-)

Comments

Yang, Weijiang Oct. 9, 2019, 4:49 a.m. UTC | #1
On Tue, Oct 08, 2019 at 05:41:38PM -0700, Aaron Lewis wrote:
> Set IA32_XSS for the guest and host during VM Enter and VM Exit
> transitions rather than by using the MSR-load areas.
> 
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
> ---
>  arch/x86/kvm/svm.c     |  4 ++--
>  arch/x86/kvm/vmx/vmx.c | 14 ++------------
>  arch/x86/kvm/x86.c     | 25 +++++++++++++++++++++----
>  arch/x86/kvm/x86.h     |  4 ++--
>  4 files changed, 27 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index f8ecb6df5106..e2d7a7738c76 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5628,7 +5628,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	svm->vmcb->save.cr2 = vcpu->arch.cr2;
>  
>  	clgi();
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (lapic_in_kernel(vcpu) &&
>  		vcpu->arch.apic->lapic_timer.timer_advance_ns)
> @@ -5778,7 +5778,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
>  		kvm_before_interrupt(&svm->vcpu);
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  	stgi();
>  
>  	/* Any pending NMI will happen here */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 409e9a7323f1..ff5ba28abecb 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO);
>  static bool __read_mostly nested = 1;
>  module_param(nested, bool, S_IRUGO);
>  
> -static u64 __read_mostly host_xss;
> -
>  bool __read_mostly enable_pml = 1;
>  module_param_named(pml, enable_pml, bool, S_IRUGO);
>  
> @@ -2074,11 +2072,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		if (data != 0)
>  			return 1;
>  		vcpu->arch.ia32_xss = data;
> -		if (vcpu->arch.ia32_xss != host_xss)
> -			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
> -				vcpu->arch.ia32_xss, host_xss, false);
> -		else
> -			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
>  		break;
>  	case MSR_IA32_RTIT_CTL:
>  		if ((pt_mode != PT_MODE_HOST_GUEST) ||
> @@ -6540,7 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
>  		vmx_set_interrupt_shadow(vcpu, 0);
>  
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (static_cpu_has(X86_FEATURE_PKU) &&
>  	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
> @@ -6647,7 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  			__write_pkru(vmx->host_pkru);
>  	}
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  
>  	vmx->nested.nested_run_pending = 0;
>  	vmx->idt_vectoring_info = 0;
> @@ -7599,9 +7592,6 @@ static __init int hardware_setup(void)
>  		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
>  	}
>  
> -	if (boot_cpu_has(X86_FEATURE_XSAVES))
> -		rdmsrl(MSR_IA32_XSS, host_xss);
> -
>  	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
>  	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
>  		enable_vpid = 0;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 661e2bf38526..e90e658fd8a9 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -176,6 +176,8 @@ struct kvm_shared_msrs {
>  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
>  static struct kvm_shared_msrs __percpu *shared_msrs;
>  
> +static u64 __read_mostly host_xss;
> +
>  struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	{ "pf_fixed", VCPU_STAT(pf_fixed) },
>  	{ "pf_guest", VCPU_STAT(pf_guest) },
> @@ -812,27 +814,39 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
>  }
>  EXPORT_SYMBOL_GPL(kvm_lmsw);
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
>  			!vcpu->guest_xcr0_loaded) {
>  		/* kvm_set_xcr() also depends on this */
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
> +
>  		vcpu->guest_xcr0_loaded = 1;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_controls);
>  
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (vcpu->guest_xcr0_loaded) {
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
Should it check guest/VMX XSAVES support before load *host*
XSS states?
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, host_xss);
> +
>  		vcpu->guest_xcr0_loaded = 0;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_host_xsave_controls);
>  
>  static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
>  {
> @@ -9293,6 +9307,9 @@ int kvm_arch_hardware_setup(void)
>  		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
>  	}
>  
> +	if (boot_cpu_has(X86_FEATURE_XSAVES))
> +		rdmsrl(MSR_IA32_XSS, host_xss);
> +
>  	kvm_init_msr_list();
>  	return 0;
>  }
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index dbf7442a822b..0d04e865665b 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -366,7 +366,7 @@ static inline bool kvm_pat_valid(u64 data)
>  	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
>  }
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu);
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu);
>  
>  #endif
> -- 
> 2.23.0.581.g78d2f28ef7-goog
Paolo Bonzini Oct. 9, 2019, 6:30 a.m. UTC | #2
On 09/10/19 02:41, Aaron Lewis wrote:
> Set IA32_XSS for the guest and host during VM Enter and VM Exit
> transitions rather than by using the MSR-load areas.
> 
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>

This commit message is missing an explanation of why this is a good thing.

Also, the series is missing a cover letter that explains a bit more of
the overall picture.  I have no problem with no cover letter for
two-patch series, but at six it is definitely a requirement.

So I'm replying to this patch as a proxy for the whole series, and
asking: why is it useful to enable XSAVES (on AMD or anywhere) if anyway
IA32_XSS is limited to zero?

On AMD, we do have the problem that XSAVES is essentially a WRMSR with
no exit, albeit confined to the MSRs included in the set bits of
IA32_XSS.  But while that would be a (good) argument for writing 0 to
IA32_XSS around AMD vmentry, it shouldn't require making XSAVES
available to the guests.

Thanks,

Paolo

> ---
>  arch/x86/kvm/svm.c     |  4 ++--
>  arch/x86/kvm/vmx/vmx.c | 14 ++------------
>  arch/x86/kvm/x86.c     | 25 +++++++++++++++++++++----
>  arch/x86/kvm/x86.h     |  4 ++--
>  4 files changed, 27 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index f8ecb6df5106..e2d7a7738c76 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5628,7 +5628,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	svm->vmcb->save.cr2 = vcpu->arch.cr2;
>  
>  	clgi();
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (lapic_in_kernel(vcpu) &&
>  		vcpu->arch.apic->lapic_timer.timer_advance_ns)
> @@ -5778,7 +5778,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
>  		kvm_before_interrupt(&svm->vcpu);
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  	stgi();
>  
>  	/* Any pending NMI will happen here */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 409e9a7323f1..ff5ba28abecb 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO);
>  static bool __read_mostly nested = 1;
>  module_param(nested, bool, S_IRUGO);
>  
> -static u64 __read_mostly host_xss;
> -
>  bool __read_mostly enable_pml = 1;
>  module_param_named(pml, enable_pml, bool, S_IRUGO);
>  
> @@ -2074,11 +2072,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		if (data != 0)
>  			return 1;
>  		vcpu->arch.ia32_xss = data;
> -		if (vcpu->arch.ia32_xss != host_xss)
> -			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
> -				vcpu->arch.ia32_xss, host_xss, false);
> -		else
> -			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
>  		break;
>  	case MSR_IA32_RTIT_CTL:
>  		if ((pt_mode != PT_MODE_HOST_GUEST) ||
> @@ -6540,7 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
>  		vmx_set_interrupt_shadow(vcpu, 0);
>  
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (static_cpu_has(X86_FEATURE_PKU) &&
>  	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
> @@ -6647,7 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  			__write_pkru(vmx->host_pkru);
>  	}
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  
>  	vmx->nested.nested_run_pending = 0;
>  	vmx->idt_vectoring_info = 0;
> @@ -7599,9 +7592,6 @@ static __init int hardware_setup(void)
>  		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
>  	}
>  
> -	if (boot_cpu_has(X86_FEATURE_XSAVES))
> -		rdmsrl(MSR_IA32_XSS, host_xss);
> -
>  	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
>  	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
>  		enable_vpid = 0;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 661e2bf38526..e90e658fd8a9 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -176,6 +176,8 @@ struct kvm_shared_msrs {
>  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
>  static struct kvm_shared_msrs __percpu *shared_msrs;
>  
> +static u64 __read_mostly host_xss;
> +
>  struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	{ "pf_fixed", VCPU_STAT(pf_fixed) },
>  	{ "pf_guest", VCPU_STAT(pf_guest) },
> @@ -812,27 +814,39 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
>  }
>  EXPORT_SYMBOL_GPL(kvm_lmsw);
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
>  			!vcpu->guest_xcr0_loaded) {
>  		/* kvm_set_xcr() also depends on this */
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
> +
>  		vcpu->guest_xcr0_loaded = 1;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_controls);
>  
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (vcpu->guest_xcr0_loaded) {
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, host_xss);
> +
>  		vcpu->guest_xcr0_loaded = 0;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_host_xsave_controls);
>  
>  static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
>  {
> @@ -9293,6 +9307,9 @@ int kvm_arch_hardware_setup(void)
>  		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
>  	}
>  
> +	if (boot_cpu_has(X86_FEATURE_XSAVES))
> +		rdmsrl(MSR_IA32_XSS, host_xss);
> +
>  	kvm_init_msr_list();
>  	return 0;
>  }
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index dbf7442a822b..0d04e865665b 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -366,7 +366,7 @@ static inline bool kvm_pat_valid(u64 data)
>  	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
>  }
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu);
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu);
>  
>  #endif
>
Sebastian Andrzej Siewior Oct. 9, 2019, 6:31 a.m. UTC | #3
On 2019-10-08 17:41:38 [-0700], Aaron Lewis wrote:
> Set IA32_XSS for the guest and host during VM Enter and VM Exit
> transitions rather than by using the MSR-load areas.

Could you please explain the *why* as part of the message?

> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>

Sebastian
Vitaly Kuznetsov Oct. 9, 2019, 12:34 p.m. UTC | #4
Paolo Bonzini <pbonzini@redhat.com> writes:

> On 09/10/19 02:41, Aaron Lewis wrote:
>> Set IA32_XSS for the guest and host during VM Enter and VM Exit
>> transitions rather than by using the MSR-load areas.
>> 
>> Reviewed-by: Jim Mattson <jmattson@google.com>
>> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
>
> This commit message is missing an explanation of why this is a good thing.
>
> Also, the series is missing a cover letter that explains a bit more of
> the overall picture.  I have no problem with no cover letter for
> two-patch series, but at six it is definitely a requirement.
>
> So I'm replying to this patch as a proxy for the whole series, and
> asking: why is it useful to enable XSAVES (on AMD or anywhere) if anyway
> IA32_XSS is limited to zero?

I know at least one good reason to do so: Hyper-V 2016 Gen1 (but not
Gen2!) doesn't seem to be able to boot without XSAVES (don't ask me
why). I'm not particularly sure if Aaron is aiming at fixing this one
though.
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f8ecb6df5106..e2d7a7738c76 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5628,7 +5628,7 @@  static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	clgi();
-	kvm_load_guest_xcr0(vcpu);
+	kvm_load_guest_xsave_controls(vcpu);
 
 	if (lapic_in_kernel(vcpu) &&
 		vcpu->arch.apic->lapic_timer.timer_advance_ns)
@@ -5778,7 +5778,7 @@  static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
 		kvm_before_interrupt(&svm->vcpu);
 
-	kvm_put_guest_xcr0(vcpu);
+	kvm_load_host_xsave_controls(vcpu);
 	stgi();
 
 	/* Any pending NMI will happen here */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 409e9a7323f1..ff5ba28abecb 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -106,8 +106,6 @@  module_param(enable_apicv, bool, S_IRUGO);
 static bool __read_mostly nested = 1;
 module_param(nested, bool, S_IRUGO);
 
-static u64 __read_mostly host_xss;
-
 bool __read_mostly enable_pml = 1;
 module_param_named(pml, enable_pml, bool, S_IRUGO);
 
@@ -2074,11 +2072,6 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (data != 0)
 			return 1;
 		vcpu->arch.ia32_xss = data;
-		if (vcpu->arch.ia32_xss != host_xss)
-			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
-				vcpu->arch.ia32_xss, host_xss, false);
-		else
-			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
 		break;
 	case MSR_IA32_RTIT_CTL:
 		if ((pt_mode != PT_MODE_HOST_GUEST) ||
@@ -6540,7 +6533,7 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
 		vmx_set_interrupt_shadow(vcpu, 0);
 
-	kvm_load_guest_xcr0(vcpu);
+	kvm_load_guest_xsave_controls(vcpu);
 
 	if (static_cpu_has(X86_FEATURE_PKU) &&
 	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
@@ -6647,7 +6640,7 @@  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 			__write_pkru(vmx->host_pkru);
 	}
 
-	kvm_put_guest_xcr0(vcpu);
+	kvm_load_host_xsave_controls(vcpu);
 
 	vmx->nested.nested_run_pending = 0;
 	vmx->idt_vectoring_info = 0;
@@ -7599,9 +7592,6 @@  static __init int hardware_setup(void)
 		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
 	}
 
-	if (boot_cpu_has(X86_FEATURE_XSAVES))
-		rdmsrl(MSR_IA32_XSS, host_xss);
-
 	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
 	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
 		enable_vpid = 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 661e2bf38526..e90e658fd8a9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -176,6 +176,8 @@  struct kvm_shared_msrs {
 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
 static struct kvm_shared_msrs __percpu *shared_msrs;
 
+static u64 __read_mostly host_xss;
+
 struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "pf_fixed", VCPU_STAT(pf_fixed) },
 	{ "pf_guest", VCPU_STAT(pf_guest) },
@@ -812,27 +814,39 @@  void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu)
 {
 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
 			!vcpu->guest_xcr0_loaded) {
 		/* kvm_set_xcr() also depends on this */
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+
+		if (kvm_x86_ops->xsaves_supported() &&
+		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
+		    vcpu->arch.ia32_xss != host_xss)
+			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
+
 		vcpu->guest_xcr0_loaded = 1;
 	}
 }
-EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
+EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_controls);
 
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu)
 {
 	if (vcpu->guest_xcr0_loaded) {
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+
+		if (kvm_x86_ops->xsaves_supported() &&
+		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
+		    vcpu->arch.ia32_xss != host_xss)
+			wrmsrl(MSR_IA32_XSS, host_xss);
+
 		vcpu->guest_xcr0_loaded = 0;
 	}
 }
-EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
+EXPORT_SYMBOL_GPL(kvm_load_host_xsave_controls);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -9293,6 +9307,9 @@  int kvm_arch_hardware_setup(void)
 		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
 	}
 
+	if (boot_cpu_has(X86_FEATURE_XSAVES))
+		rdmsrl(MSR_IA32_XSS, host_xss);
+
 	kvm_init_msr_list();
 	return 0;
 }
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index dbf7442a822b..0d04e865665b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -366,7 +366,7 @@  static inline bool kvm_pat_valid(u64 data)
 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
 }
 
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu);
+void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu);
 
 #endif