From: Keir Fraser Date: Fri, 14 Jan 2011 08:34:53 +0000 (+0000) Subject: x86: Avoid calling xsave_alloc_save_area before xsave_init X-Git-Tag: 4.1.0-rc2~30^2~14 X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=5a96732bd4101281bc1c609a08688cc87de61b26;p=xen.git x86: Avoid calling xsave_alloc_save_area before xsave_init Currently, xsave_alloc_save_area will be called in init_idle_domain->scheduler_init->alloc_vcpu->vcpu_initialise calls with xsave_cntxt_size=0, it is earlier than xsave_init called in identity_cpu(). This may causing buffer overflow on xmem_pool. Idle domain isn't using FPU,SSE,AVX or any such extended state and doesn't need it saved. xsave_{alloc,free}_save_area() should test-and-exit on is_idle_vcpu(), and our context switch code should not be doing XSAVE when switching out an idle vcpu. Signed-off-by: Wei Gang Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index 477efec973..59dbecf9aa 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -16,6 +16,39 @@ #include #include +void setup_fpu(struct vcpu *v) +{ + ASSERT(!is_idle_vcpu(v)); + + /* Avoid recursion. */ + clts(); + + if ( !v->fpu_dirtied ) + { + v->fpu_dirtied = 1; + if ( cpu_has_xsave ) + { + if ( !v->fpu_initialised ) + v->fpu_initialised = 1; + + /* XCR0 normally represents what guest OS set. In case of Xen + * itself, we set all supported feature mask before doing + * save/restore. + */ + set_xcr0(v->arch.xcr0_accum); + xrstor(v); + set_xcr0(v->arch.xcr0); + } + else + { + if ( v->fpu_initialised ) + restore_fpu(v); + else + init_fpu(); + } + } +} + void init_fpu(void) { asm volatile ( "fninit" ); @@ -29,6 +62,8 @@ void save_init_fpu(struct vcpu *v) unsigned long cr0 = read_cr0(); char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x; + ASSERT(!is_idle_vcpu(v)); + /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */ if ( cr0 & X86_CR0_TS ) clts(); @@ -138,6 +173,7 @@ void restore_fpu(struct vcpu *v) } #define XSTATE_CPUID 0xd +#define XSAVE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */ /* * Maximum size (in byte) of the XSAVE/XRSTOR save area required by all @@ -177,7 +213,9 @@ void xsave_init(void) } /* FP/SSE, XSAVE.HEADER, YMM */ - min_size = 512 + 64 + ((eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0); + min_size = XSAVE_AREA_MIN_SIZE; + if ( eax & XSTATE_YMM ) + min_size += XSTATE_YMM_SIZE; BUG_ON(ecx < min_size); /* @@ -214,9 +252,11 @@ int xsave_alloc_save_area(struct vcpu *v) { void *save_area; - if ( !cpu_has_xsave ) + if ( !cpu_has_xsave || is_idle_vcpu(v) ) return 0; + BUG_ON(xsave_cntxt_size < XSAVE_AREA_MIN_SIZE); + /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */ save_area = _xmalloc(xsave_cntxt_size, 64); if ( save_area == NULL ) diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h index 4cb67d482c..8b4e9ce1bd 100644 --- a/xen/include/asm-x86/i387.h +++ b/xen/include/asm-x86/i387.h @@ -110,6 +110,7 @@ static inline void xrstor(struct vcpu *v) : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr)); } +extern void setup_fpu(struct vcpu *v); extern void init_fpu(void); extern void save_init_fpu(struct vcpu *v); extern void restore_fpu(struct vcpu *v); @@ -124,35 +125,4 @@ extern void restore_fpu(struct vcpu *v); __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \ } while ( 0 ) -static inline void setup_fpu(struct vcpu *v) -{ - /* Avoid recursion. */ - clts(); - - if ( !v->fpu_dirtied ) - { - v->fpu_dirtied = 1; - if ( cpu_has_xsave ) - { - if ( !v->fpu_initialised ) - v->fpu_initialised = 1; - - /* XCR0 normally represents what guest OS set. In case of Xen - * itself, we set all supported feature mask before doing - * save/restore. - */ - set_xcr0(v->arch.xcr0_accum); - xrstor(v); - set_xcr0(v->arch.xcr0); - } - else - { - if ( v->fpu_initialised ) - restore_fpu(v); - else - init_fpu(); - } - } -} - #endif /* __ASM_I386_I387_H */