@@ -8,6 +8,7 @@
#include <xen/param.h>
#include <xen/percpu.h>
#include <xen/sched.h>
+#include <xen/xvmalloc.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/hvm/support.h>
@@ -522,7 +523,7 @@ int xstate_alloc_save_area(struct vcpu *
/* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
BUILD_BUG_ON(__alignof(*save_area) < 64);
- save_area = _xzalloc(size, __alignof(*save_area));
+ save_area = _xvzalloc(size, __alignof(*save_area));
if ( save_area == NULL )
return -ENOMEM;
@@ -543,8 +544,7 @@ int xstate_alloc_save_area(struct vcpu *
void xstate_free_save_area(struct vcpu *v)
{
- xfree(v->arch.xsave_area);
- v->arch.xsave_area = NULL;
+ XVFREE(v->arch.xsave_area);
}
static unsigned int _xstate_ctxt_size(u64 xcr0)
This is in preparation for the area size exceeding a page's worth of space, as will happen with AMX as well as Architectural LBR. Signed-off-by: Jan Beulich <jbeulich@suse.com>