if ( p->basic.avx )
{
- xstates |= XSTATE_YMM;
+ xstates |= X86_XCR0_YMM;
xstate_size = max(xstate_size,
- xstate_offsets[_XSTATE_YMM] +
- xstate_sizes[_XSTATE_YMM]);
+ xstate_offsets[X86_XCR0_YMM_POS] +
+ xstate_sizes[X86_XCR0_YMM_POS]);
}
if ( p->feat.mpx )
{
- xstates |= XSTATE_BNDREGS | XSTATE_BNDCSR;
+ xstates |= X86_XCR0_BNDREGS | X86_XCR0_BNDCSR;
xstate_size = max(xstate_size,
- xstate_offsets[_XSTATE_BNDCSR] +
- xstate_sizes[_XSTATE_BNDCSR]);
+ xstate_offsets[X86_XCR0_BNDCSR_POS] +
+ xstate_sizes[X86_XCR0_BNDCSR_POS]);
}
if ( p->feat.avx512f )
{
- xstates |= XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM;
+ xstates |= X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM;
xstate_size = max(xstate_size,
- xstate_offsets[_XSTATE_HI_ZMM] +
- xstate_sizes[_XSTATE_HI_ZMM]);
+ xstate_offsets[X86_XCR0_HI_ZMM_POS] +
+ xstate_sizes[X86_XCR0_HI_ZMM_POS]);
}
if ( p->feat.pku )
{
- xstates |= XSTATE_PKRU;
+ xstates |= X86_XCR0_PKRU;
xstate_size = max(xstate_size,
- xstate_offsets[_XSTATE_PKRU] +
- xstate_sizes[_XSTATE_PKRU]);
+ xstate_offsets[X86_XCR0_PKRU_POS] +
+ xstate_sizes[X86_XCR0_PKRU_POS]);
}
if ( p->extd.lwp )
{
- xstates |= XSTATE_LWP;
+ xstates |= X86_XCR0_LWP;
xstate_size = max(xstate_size,
- xstate_offsets[_XSTATE_LWP] +
- xstate_sizes[_XSTATE_LWP]);
+ xstate_offsets[X86_XCR0_LWP_POS] +
+ xstate_sizes[X86_XCR0_LWP_POS]);
}
p->xstate.max_size = xstate_size;
break;
case 0x8000001c:
- if ( (v->arch.xcr0 & XSTATE_LWP) && cpu_has_svm )
+ if ( (v->arch.xcr0 & X86_XCR0_LWP) && cpu_has_svm )
/* Turn on available bit and other features specified in lwp_cfg. */
res->a = (res->d & v->arch.hvm_svm.guest_lwp_cfg) | 1;
break;
case X86EMUL_FPU_xmm:
break;
case X86EMUL_FPU_ymm:
- if ( !(curr->arch.xcr0 & XSTATE_SSE) ||
- !(curr->arch.xcr0 & XSTATE_YMM) )
+ if ( !(curr->arch.xcr0 & X86_XCR0_SSE) ||
+ !(curr->arch.xcr0 & X86_XCR0_YMM) )
return X86EMUL_UNHANDLEABLE;
break;
default:
* enabled in BNDCFGS.
*/
if ( (val & IA32_BNDCFGS_ENABLE) &&
- !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) )
+ !(v->arch.xcr0_accum & (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR)) )
{
uint64_t xcr0 = get_xcr0();
int rc;
return false;
rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
- xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR);
+ xcr0 | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR);
if ( rc )
{
* guest may enable the feature in CR4 without enabling it in XCR0. We
* need to context switch / migrate PKRU nevertheless.
*/
- if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & XSTATE_PKRU) )
+ if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & X86_XCR0_PKRU) )
{
int rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
- get_xcr0() | XSTATE_PKRU);
+ get_xcr0() | X86_XCR0_PKRU);
if ( rc )
{
}
if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
- get_xcr0() & ~XSTATE_PKRU) )
+ get_xcr0() & ~X86_XCR0_PKRU) )
/* nothing, best effort only */;
}
fpu_ctxt->mxcsr = MXCSR_DEFAULT;
if ( v->arch.xsave_area )
{
- v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP;
+ v->arch.xsave_area->xsave_hdr.xstate_bv = X86_XCR0_FP;
v->arch.xsave_area->xsave_hdr.xcomp_bv = 0;
}
* (in which case executing any suitable non-prefixed branch
* instruction would do), or use XRSTOR.
*/
- xstate_set_init(XSTATE_BNDREGS);
+ xstate_set_init(X86_XCR0_BNDREGS);
}
done:;
}
"=m" (*ptr), \
"a" (lmask), "d" (hmask), "D" (ptr))
- if ( fip_width == 8 || !(mask & XSTATE_FP) )
+ if ( fip_width == 8 || !(mask & X86_XCR0_FP) )
{
XSAVE("0x48,");
}
fip_width = 8;
}
#undef XSAVE
- if ( mask & XSTATE_FP )
+ if ( mask & X86_XCR0_FP )
ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET] = fip_width;
}
* sometimes new user value. Both should be ok. Use the FPU saved
* data block as a safe address because it should be in L1.
*/
- if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
+ if ( (mask & ptr->xsave_hdr.xstate_bv & X86_XCR0_FP) &&
!(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
asm volatile ( "fnclex\n\t" /* clear exceptions */
* Also try to eliminate fault reasons, even if this shouldn't be
* needed here (other code should ensure the sanity of the data).
*/
- if ( ((mask & XSTATE_SSE) ||
- ((mask & XSTATE_YMM) &&
+ if ( ((mask & X86_XCR0_SSE) ||
+ ((mask & X86_XCR0_YMM) &&
!(ptr->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED))) )
ptr->fpu_sse.mxcsr &= mxcsr_mask;
if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY )
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
BUG_ON((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE);
- BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE));
+ BUG_ON((eax & X86_XCR0_YMM) && !(eax & X86_XCR0_SSE));
feature_mask = (((u64)edx << 32) | eax) & XCNTXT_MASK;
/*
static bool valid_xcr0(u64 xcr0)
{
/* FP must be unconditionally set. */
- if ( !(xcr0 & XSTATE_FP) )
+ if ( !(xcr0 & X86_XCR0_FP) )
return false;
/* YMM depends on SSE. */
- if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) )
+ if ( (xcr0 & X86_XCR0_YMM) && !(xcr0 & X86_XCR0_SSE) )
return false;
- if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) )
+ if ( xcr0 & (X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM) )
{
/* OPMASK, ZMM, and HI_ZMM require YMM. */
- if ( !(xcr0 & XSTATE_YMM) )
+ if ( !(xcr0 & X86_XCR0_YMM) )
return false;
/* OPMASK, ZMM, and HI_ZMM must be the same. */
- if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) )
+ if ( ~xcr0 & (X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM) )
return false;
}
/* BNDREGS and BNDCSR must be the same. */
- return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR);
+ return !(xcr0 & X86_XCR0_BNDREGS) == !(xcr0 & X86_XCR0_BNDCSR);
}
int validate_xstate(u64 xcr0, u64 xcr0_accum, const struct xsave_hdr *hdr)
return -EINVAL;
/* XCR0.PKRU is disabled on PV mode. */
- if ( is_pv_vcpu(curr) && (new_bv & XSTATE_PKRU) )
+ if ( is_pv_vcpu(curr) && (new_bv & X86_XCR0_PKRU) )
return -EOPNOTSUPP;
if ( !set_xcr0(new_bv) )
curr->arch.xcr0_accum |= new_bv;
/* LWP sets nonlazy_xstate_used independently. */
- if ( new_bv & (XSTATE_NONLAZY & ~XSTATE_LWP) )
+ if ( new_bv & (XSTATE_NONLAZY & ~X86_XCR0_LWP) )
curr->arch.nonlazy_xstate_used = 1;
mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY;
{
asm ( ".byte 0x0f,0xc7,0x27\n" /* xsavec */
: "=m" (*xstate)
- : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
+ : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
bndcsr = (void *)(xstate + 1);
}
{
asm ( ".byte 0x0f,0xae,0x27\n" /* xsave */
: "=m" (*xstate)
- : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
+ : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
- bndcsr = (void *)xstate + xstate_offsets[_XSTATE_BNDCSR];
+ bndcsr = (void *)xstate + xstate_offsets[X86_XCR0_BNDCSR_POS];
}
if ( cr0 & X86_CR0_TS )
write_cr0(cr0);
- return xstate->xsave_hdr.xstate_bv & XSTATE_BNDCSR ? bndcsr->bndcfgu : 0;
+ return xstate->xsave_hdr.xstate_bv & X86_XCR0_BNDCSR ? bndcsr->bndcfgu : 0;
}
void xstate_set_init(uint64_t mask)
#define X86_CR4_SMAP 0x00200000 /* enable SMAP */
#define X86_CR4_PKE 0x00400000 /* enable PKE */
+/*
+ * XSTATE component flags in XCR0
+ */
+#define X86_XCR0_FP_POS 0
+#define X86_XCR0_FP (1ULL << X86_XCR0_FP_POS)
+#define X86_XCR0_SSE_POS 1
+#define X86_XCR0_SSE (1ULL << X86_XCR0_SSE_POS)
+#define X86_XCR0_YMM_POS 2
+#define X86_XCR0_YMM (1ULL << X86_XCR0_YMM_POS)
+#define X86_XCR0_BNDREGS_POS 3
+#define X86_XCR0_BNDREGS (1ULL << X86_XCR0_BNDREGS_POS)
+#define X86_XCR0_BNDCSR_POS 4
+#define X86_XCR0_BNDCSR (1ULL << X86_XCR0_BNDCSR_POS)
+#define X86_XCR0_OPMASK_POS 5
+#define X86_XCR0_OPMASK (1ULL << X86_XCR0_OPMASK_POS)
+#define X86_XCR0_ZMM_POS 6
+#define X86_XCR0_ZMM (1ULL << X86_XCR0_ZMM_POS)
+#define X86_XCR0_HI_ZMM_POS 7
+#define X86_XCR0_HI_ZMM (1ULL << X86_XCR0_HI_ZMM_POS)
+#define X86_XCR0_PKRU_POS 9
+#define X86_XCR0_PKRU (1ULL << X86_XCR0_PKRU_POS)
+#define X86_XCR0_LWP_POS 62
+#define X86_XCR0_LWP (1ULL << X86_XCR0_LWP_POS)
+
#endif /* __XEN_X86_DEFNS_H__ */
#include <xen/sched.h>
#include <asm/cpufeature.h>
+#include <asm/x86-defns.h>
#define FCW_DEFAULT 0x037f
#define FCW_RESET 0x0040
#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSTATE_AREA_MIN_SIZE (FXSAVE_SIZE + XSAVE_HDR_SIZE)
-#define _XSTATE_FP 0
-#define XSTATE_FP (1ULL << _XSTATE_FP)
-#define _XSTATE_SSE 1
-#define XSTATE_SSE (1ULL << _XSTATE_SSE)
-#define _XSTATE_YMM 2
-#define XSTATE_YMM (1ULL << _XSTATE_YMM)
-#define _XSTATE_BNDREGS 3
-#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS)
-#define _XSTATE_BNDCSR 4
-#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR)
-#define _XSTATE_OPMASK 5
-#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK)
-#define _XSTATE_ZMM 6
-#define XSTATE_ZMM (1ULL << _XSTATE_ZMM)
-#define _XSTATE_HI_ZMM 7
-#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM)
-#define _XSTATE_PKRU 9
-#define XSTATE_PKRU (1ULL << _XSTATE_PKRU)
-#define _XSTATE_LWP 62
-#define XSTATE_LWP (1ULL << _XSTATE_LWP)
-
-#define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE)
-#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \
- XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY)
+#define XSTATE_FP_SSE (X86_XCR0_FP | X86_XCR0_SSE)
+#define XCNTXT_MASK (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | \
+ X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM | \
+ XSTATE_NONLAZY)
#define XSTATE_ALL (~(1ULL << 63))
-#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS | XSTATE_BNDCSR | \
- XSTATE_PKRU)
+#define XSTATE_NONLAZY (X86_XCR0_LWP | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | \
+ X86_XCR0_PKRU)
#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY)
#define XSTATE_XSAVES_ONLY 0
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)