diff mbox series

[14/31] x86/fpu: Replace KVMs homebrewn FPU copy from user

Message ID 20211011223611.129308001@linutronix.de (mailing list archive)
State New, archived
Headers show
Series x86/fpu: Preparatory cleanups for AMX support (part 1) | expand

Commit Message

Thomas Gleixner Oct. 12, 2021, midnight UTC
Copying a user space buffer to the memory buffer is already available in
the FPU core. The copy mechanism in KVM lacks sanity checks and needs to
use cpuid() to lookup the offset of each component, while the FPU core has
this information cached.

Make the FPU core variant accessible for KVM and replace the homebrewn
mechanism.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/fpu/api.h |    3 +
 arch/x86/kernel/fpu/core.c     |   38 ++++++++++++++++++++-
 arch/x86/kernel/fpu/xstate.c   |    3 -
 arch/x86/kvm/x86.c             |   74 +----------------------------------------
 4 files changed, 44 insertions(+), 74 deletions(-)

Comments

Borislav Petkov Oct. 12, 2021, 5 p.m. UTC | #1
On Tue, Oct 12, 2021 at 02:00:19AM +0200, Thomas Gleixner wrote:
> Copying a user space buffer to the memory buffer is already available in
> the FPU core. The copy mechanism in KVM lacks sanity checks and needs to
> use cpuid() to lookup the offset of each component, while the FPU core has
> this information cached.
> 
> Make the FPU core variant accessible for KVM and replace the homebrewn
> mechanism.

I think you mean "homebred" in that patch... or "home brewed", that
works too, I think.

> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: kvm@vger.kernel.org
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/include/asm/fpu/api.h |    3 +
>  arch/x86/kernel/fpu/core.c     |   38 ++++++++++++++++++++-
>  arch/x86/kernel/fpu/xstate.c   |    3 -
>  arch/x86/kvm/x86.c             |   74 +----------------------------------------
>  4 files changed, 44 insertions(+), 74 deletions(-)
> 
> --- a/arch/x86/include/asm/fpu/api.h
> +++ b/arch/x86/include/asm/fpu/api.h
> @@ -116,4 +116,7 @@ extern void fpu_init_fpstate_user(struct
>  /* KVM specific functions */
>  extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
>  
> +struct kvm_vcpu;
> +extern int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0, u32 *pkru);
> +
>  #endif /* _ASM_X86_FPU_API_H */
> --- a/arch/x86/kernel/fpu/core.c
> +++ b/arch/x86/kernel/fpu/core.c
> @@ -174,7 +174,43 @@ void fpu_swap_kvm_fpu(struct fpu *save,
>  	fpregs_unlock();
>  }
>  EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
> -#endif
> +
> +int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0,
> +			      u32 *vpkru)

Right, except that there's no @vcpu in the args of that function. I
guess you could call it

fpu_copy_kvm_uabi_to_buf()

and that @buf can be

vcpu->arch.guest_fpu

...

Just a nitpick anyway.
Paolo Bonzini Oct. 12, 2021, 5:30 p.m. UTC | #2
On 12/10/21 02:00, Thomas Gleixner wrote:
> Copying a user space buffer to the memory buffer is already available in
> the FPU core. The copy mechanism in KVM lacks sanity checks and needs to
> use cpuid() to lookup the offset of each component, while the FPU core has
> this information cached.
> 
> Make the FPU core variant accessible for KVM and replace the homebrewn
> mechanism.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: kvm@vger.kernel.org
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> ---
>   arch/x86/include/asm/fpu/api.h |    3 +
>   arch/x86/kernel/fpu/core.c     |   38 ++++++++++++++++++++-
>   arch/x86/kernel/fpu/xstate.c   |    3 -
>   arch/x86/kvm/x86.c             |   74 +----------------------------------------
>   4 files changed, 44 insertions(+), 74 deletions(-)
> 
> --- a/arch/x86/include/asm/fpu/api.h
> +++ b/arch/x86/include/asm/fpu/api.h
> @@ -116,4 +116,7 @@ extern void fpu_init_fpstate_user(struct
>   /* KVM specific functions */
>   extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
>   
> +struct kvm_vcpu;
> +extern int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0, u32 *pkru);
> +
>   #endif /* _ASM_X86_FPU_API_H */
> --- a/arch/x86/kernel/fpu/core.c
> +++ b/arch/x86/kernel/fpu/core.c
> @@ -174,7 +174,43 @@ void fpu_swap_kvm_fpu(struct fpu *save,
>   	fpregs_unlock();
>   }
>   EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
> -#endif
> +
> +int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0,
> +			      u32 *vpkru)
> +{
> +	union fpregs_state *kstate = &fpu->state;
> +	const union fpregs_state *ustate = buf;
> +	struct pkru_state *xpkru;
> +	int ret;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
> +		if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
> +			return -EINVAL;
> +		if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
> +			return -EINVAL;
> +		memcpy(&kstate->fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
> +		return 0;
> +	}
> +
> +	if (ustate->xsave.header.xfeatures & ~xcr0)
> +		return -EINVAL;
> +
> +	ret = copy_uabi_from_kernel_to_xstate(&kstate->xsave, ustate);
> +	if (ret)
> +		return ret;
> +
> +	/* Retrieve PKRU if not in init state */
> +	if (kstate->xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
> +		xpkru = get_xsave_addr(&kstate->xsave, XFEATURE_PKRU);
> +		*vpkru = xpkru->pkru;
> +	}
> +
> +	/* Ensure that XCOMP_BV is set up for XSAVES */
> +	xstate_init_xcomp_bv(&kstate->xsave, xfeatures_mask_uabi());
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(fpu_copy_kvm_uabi_to_vcpu);
> +#endif /* CONFIG_KVM */
>   
>   void kernel_fpu_begin_mask(unsigned int kfpu_mask)
>   {
> --- a/arch/x86/kernel/fpu/xstate.c
> +++ b/arch/x86/kernel/fpu/xstate.c
> @@ -1134,8 +1134,7 @@ static int copy_uabi_to_xstate(struct xr
>   
>   /*
>    * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
> - * format and copy to the target thread. This is called from
> - * xstateregs_set().
> + * format and copy to the target thread. Used by ptrace and KVM.
>    */
>   int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
>   {
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4695,8 +4695,6 @@ static int kvm_vcpu_ioctl_x86_set_debugr
>   	return 0;
>   }
>   
> -#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
> -
>   static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
>   {
>   	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
> @@ -4740,50 +4738,6 @@ static void fill_xsave(u8 *dest, struct
>   	}
>   }
>   
> -static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
> -{
> -	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
> -	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
> -	u64 valid;
> -
> -	/*
> -	 * Copy legacy XSAVE area, to avoid complications with CPUID
> -	 * leaves 0 and 1 in the loop below.
> -	 */
> -	memcpy(xsave, src, XSAVE_HDR_OFFSET);
> -
> -	/* Set XSTATE_BV and possibly XCOMP_BV.  */
> -	xsave->header.xfeatures = xstate_bv;
> -	if (boot_cpu_has(X86_FEATURE_XSAVES))
> -		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
> -
> -	/*
> -	 * Copy each region from the non-compacted offset to the
> -	 * possibly compacted offset.
> -	 */
> -	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
> -	while (valid) {
> -		u32 size, offset, ecx, edx;
> -		u64 xfeature_mask = valid & -valid;
> -		int xfeature_nr = fls64(xfeature_mask) - 1;
> -
> -		cpuid_count(XSTATE_CPUID, xfeature_nr,
> -			    &size, &offset, &ecx, &edx);
> -
> -		if (xfeature_nr == XFEATURE_PKRU) {
> -			memcpy(&vcpu->arch.pkru, src + offset,
> -			       sizeof(vcpu->arch.pkru));
> -		} else {
> -			void *dest = get_xsave_addr(xsave, xfeature_nr);
> -
> -			if (dest)
> -				memcpy(dest, src + offset, size);
> -		}
> -
> -		valid -= xfeature_mask;
> -	}
> -}
> -
>   static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
>   					 struct kvm_xsave *guest_xsave)
>   {
> @@ -4802,37 +4756,15 @@ static void kvm_vcpu_ioctl_x86_get_xsave
>   	}
>   }
>   
> -#define XSAVE_MXCSR_OFFSET 24
> -
>   static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
>   					struct kvm_xsave *guest_xsave)
>   {
> -	u64 xstate_bv;
> -	u32 mxcsr;
> -
>   	if (!vcpu->arch.guest_fpu)
>   		return 0;
>   
> -	xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
> -	mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
> -
> -	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
> -		/*
> -		 * Here we allow setting states that are not present in
> -		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
> -		 * with old userspace.
> -		 */
> -		if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
> -			return -EINVAL;
> -		load_xsave(vcpu, (u8 *)guest_xsave->region);
> -	} else {
> -		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
> -			mxcsr & ~mxcsr_feature_mask)
> -			return -EINVAL;
> -		memcpy(&vcpu->arch.guest_fpu->state.fxsave,
> -			guest_xsave->region, sizeof(struct fxregs_state));
> -	}
> -	return 0;
> +	return fpu_copy_kvm_uabi_to_vcpu(vcpu->arch.guest_fpu,
> +					 guest_xsave->region,
> +					 supported_xcr0, &vcpu->arch.pkru);
>   }
>   
>   static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Sean Christopherson Oct. 13, 2021, 2:57 p.m. UTC | #3
On Tue, Oct 12, 2021, Borislav Petkov wrote:
> On Tue, Oct 12, 2021 at 02:00:19AM +0200, Thomas Gleixner wrote:
> > --- a/arch/x86/include/asm/fpu/api.h
> > +++ b/arch/x86/include/asm/fpu/api.h
> > @@ -116,4 +116,7 @@ extern void fpu_init_fpstate_user(struct
> >  /* KVM specific functions */
> >  extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
> >  
> > +struct kvm_vcpu;
> > +extern int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0, u32 *pkru);
> > +
> >  #endif /* _ASM_X86_FPU_API_H */
> > --- a/arch/x86/kernel/fpu/core.c
> > +++ b/arch/x86/kernel/fpu/core.c
> > @@ -174,7 +174,43 @@ void fpu_swap_kvm_fpu(struct fpu *save,
> >  	fpregs_unlock();
> >  }
> >  EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
> > -#endif
> > +
> > +int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0,
> > +			      u32 *vpkru)
> 
> Right, except that there's no @vcpu in the args of that function. I
> guess you could call it
> 
> fpu_copy_kvm_uabi_to_buf()
> 
> and that @buf can be
> 
> vcpu->arch.guest_fpu

But the existing @buf is the userspace pointer, which semantically makes sense
because the userspace pointer is the "buffer" and the destination @fpu (and @prku)
is vCPU state, not a buffer.

That said, I also struggled with the lack of @vcpu.  What about prepending vcpu_
to fpu and to pkru?  E.g.

  int fpu_copy_kvm_uabi_to_vcpu(struct fpu *vcpu_fpu, const void *buf, u64 xcr0,
  				u32 *vcpu_pkru)
Paolo Bonzini Oct. 13, 2021, 3:12 p.m. UTC | #4
On 13/10/21 16:57, Sean Christopherson wrote:
>>> +int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0,
>>> +			      u32 *vpkru)
>> Right, except that there's no @vcpu in the args of that function. I
>> guess you could call it
>>
>> fpu_copy_kvm_uabi_to_buf()
>>
>> and that @buf can be
>>
>> vcpu->arch.guest_fpu
> But the existing @buf is the userspace pointer, which semantically makes sense
> because the userspace pointer is the "buffer" and the destination @fpu (and @prku)
> is vCPU state, not a buffer.
> 
> That said, I also struggled with the lack of @vcpu.  What about prepending vcpu_
> to fpu and to pkru?  E.g.
> 
>    int fpu_copy_kvm_uabi_to_vcpu(struct fpu *vcpu_fpu, const void *buf, u64 xcr0,
>    				u32 *vcpu_pkru)
> 

It doesn't matter much that the source is somehow related to a vCPU, as 
long as the FPU is concerned.  If anything I would even drop the "v" 
from vpkru, but that's really nitpicking.

Paolo
Thomas Gleixner Oct. 13, 2021, 3:16 p.m. UTC | #5
On Wed, Oct 13 2021 at 14:57, Sean Christopherson wrote:
> On Tue, Oct 12, 2021, Borislav Petkov wrote:
>> On Tue, Oct 12, 2021 at 02:00:19AM +0200, Thomas Gleixner wrote:
>> > --- a/arch/x86/include/asm/fpu/api.h
>> > +++ b/arch/x86/include/asm/fpu/api.h
>> > @@ -116,4 +116,7 @@ extern void fpu_init_fpstate_user(struct
>> >  /* KVM specific functions */
>> >  extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
>> >  
>> > +struct kvm_vcpu;
>> > +extern int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0, u32 *pkru);
>> > +
>> >  #endif /* _ASM_X86_FPU_API_H */
>> > --- a/arch/x86/kernel/fpu/core.c
>> > +++ b/arch/x86/kernel/fpu/core.c
>> > @@ -174,7 +174,43 @@ void fpu_swap_kvm_fpu(struct fpu *save,
>> >  	fpregs_unlock();
>> >  }
>> >  EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
>> > -#endif
>> > +
>> > +int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0,
>> > +			      u32 *vpkru)
>> 
>> Right, except that there's no @vcpu in the args of that function. I
>> guess you could call it
>> 
>> fpu_copy_kvm_uabi_to_buf()
>> 
>> and that @buf can be
>> 
>> vcpu->arch.guest_fpu
>
> But the existing @buf is the userspace pointer, which semantically makes sense
> because the userspace pointer is the "buffer" and the destination @fpu (and @prku)
> is vCPU state, not a buffer.
>
> That said, I also struggled with the lack of @vcpu.  What about prepending vcpu_
> to fpu and to pkru?  E.g.
>
>   int fpu_copy_kvm_uabi_to_vcpu(struct fpu *vcpu_fpu, const void *buf, u64 xcr0,
>   				u32 *vcpu_pkru)

I've renamed them to:

     fpu_copy_kvm_uabi_to_fpstate()
     fpu_copy_fpstate_to_kvm_uabi()

See
https://git.kernel.org/pub/scm/linux/kernel/git/tglx/devel.git/log/?h=x86/fpu-1

Thanks,

        tglx
diff mbox series

Patch

--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -116,4 +116,7 @@  extern void fpu_init_fpstate_user(struct
 /* KVM specific functions */
 extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
 
+struct kvm_vcpu;
+extern int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0, u32 *pkru);
+
 #endif /* _ASM_X86_FPU_API_H */
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -174,7 +174,43 @@  void fpu_swap_kvm_fpu(struct fpu *save,
 	fpregs_unlock();
 }
 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
-#endif
+
+int fpu_copy_kvm_uabi_to_vcpu(struct fpu *fpu, const void *buf, u64 xcr0,
+			      u32 *vpkru)
+{
+	union fpregs_state *kstate = &fpu->state;
+	const union fpregs_state *ustate = buf;
+	struct pkru_state *xpkru;
+	int ret;
+
+	if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+		if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
+			return -EINVAL;
+		if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
+			return -EINVAL;
+		memcpy(&kstate->fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
+		return 0;
+	}
+
+	if (ustate->xsave.header.xfeatures & ~xcr0)
+		return -EINVAL;
+
+	ret = copy_uabi_from_kernel_to_xstate(&kstate->xsave, ustate);
+	if (ret)
+		return ret;
+
+	/* Retrieve PKRU if not in init state */
+	if (kstate->xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
+		xpkru = get_xsave_addr(&kstate->xsave, XFEATURE_PKRU);
+		*vpkru = xpkru->pkru;
+	}
+
+	/* Ensure that XCOMP_BV is set up for XSAVES */
+	xstate_init_xcomp_bv(&kstate->xsave, xfeatures_mask_uabi());
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpu_copy_kvm_uabi_to_vcpu);
+#endif /* CONFIG_KVM */
 
 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1134,8 +1134,7 @@  static int copy_uabi_to_xstate(struct xr
 
 /*
  * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
- * format and copy to the target thread. This is called from
- * xstateregs_set().
+ * format and copy to the target thread. Used by ptrace and KVM.
  */
 int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
 {
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4695,8 +4695,6 @@  static int kvm_vcpu_ioctl_x86_set_debugr
 	return 0;
 }
 
-#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
-
 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 {
 	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
@@ -4740,50 +4738,6 @@  static void fill_xsave(u8 *dest, struct
 	}
 }
 
-static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
-{
-	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
-	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
-	u64 valid;
-
-	/*
-	 * Copy legacy XSAVE area, to avoid complications with CPUID
-	 * leaves 0 and 1 in the loop below.
-	 */
-	memcpy(xsave, src, XSAVE_HDR_OFFSET);
-
-	/* Set XSTATE_BV and possibly XCOMP_BV.  */
-	xsave->header.xfeatures = xstate_bv;
-	if (boot_cpu_has(X86_FEATURE_XSAVES))
-		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
-
-	/*
-	 * Copy each region from the non-compacted offset to the
-	 * possibly compacted offset.
-	 */
-	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
-	while (valid) {
-		u32 size, offset, ecx, edx;
-		u64 xfeature_mask = valid & -valid;
-		int xfeature_nr = fls64(xfeature_mask) - 1;
-
-		cpuid_count(XSTATE_CPUID, xfeature_nr,
-			    &size, &offset, &ecx, &edx);
-
-		if (xfeature_nr == XFEATURE_PKRU) {
-			memcpy(&vcpu->arch.pkru, src + offset,
-			       sizeof(vcpu->arch.pkru));
-		} else {
-			void *dest = get_xsave_addr(xsave, xfeature_nr);
-
-			if (dest)
-				memcpy(dest, src + offset, size);
-		}
-
-		valid -= xfeature_mask;
-	}
-}
-
 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 					 struct kvm_xsave *guest_xsave)
 {
@@ -4802,37 +4756,15 @@  static void kvm_vcpu_ioctl_x86_get_xsave
 	}
 }
 
-#define XSAVE_MXCSR_OFFSET 24
-
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
 					struct kvm_xsave *guest_xsave)
 {
-	u64 xstate_bv;
-	u32 mxcsr;
-
 	if (!vcpu->arch.guest_fpu)
 		return 0;
 
-	xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
-	mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
-
-	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
-		/*
-		 * Here we allow setting states that are not present in
-		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
-		 * with old userspace.
-		 */
-		if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
-			return -EINVAL;
-		load_xsave(vcpu, (u8 *)guest_xsave->region);
-	} else {
-		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
-			mxcsr & ~mxcsr_feature_mask)
-			return -EINVAL;
-		memcpy(&vcpu->arch.guest_fpu->state.fxsave,
-			guest_xsave->region, sizeof(struct fxregs_state));
-	}
-	return 0;
+	return fpu_copy_kvm_uabi_to_vcpu(vcpu->arch.guest_fpu,
+					 guest_xsave->region,
+					 supported_xcr0, &vcpu->arch.pkru);
 }
 
 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,