This is in preparation for the area size exceeding a page's worth of
space, as will happen with AMX as well as Architectural LBR.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
#include <xen/param.h>
#include <xen/percpu.h>
#include <xen/sched.h>
+#include <xen/xvmalloc.h>
#include <asm/cpu-policy.h>
#include <asm/current.h>
/* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
BUILD_BUG_ON(__alignof(*save_area) < 64);
- save_area = _xzalloc(size, __alignof(*save_area));
+ save_area = _xvzalloc(size, __alignof(*save_area));
if ( save_area == NULL )
return -ENOMEM;
void xstate_free_save_area(struct vcpu *v)
{
- xfree(v->arch.xsave_area);
- v->arch.xsave_area = NULL;
+ XVFREE(v->arch.xsave_area);
}
static bool valid_xcr0(uint64_t xcr0)