return -EINVAL;
}
- if ( (ctxt.msr_efer & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
+ if ( (ctxt.msr_efer & ~(EFER_FFXSE | EFER_LME | EFER_LMA |
+ EFER_NX | EFER_SCE)) ||
((sizeof(long) != 8) && (ctxt.msr_efer & EFER_LME)) ||
(!cpu_has_nx && (ctxt.msr_efer & EFER_NX)) ||
(!cpu_has_syscall && (ctxt.msr_efer & EFER_SCE)) ||
+ (!cpu_has_ffxsr && (ctxt.msr_efer & EFER_FFXSE)) ||
((ctxt.msr_efer & (EFER_LME|EFER_LMA)) == EFER_LMA) )
{
gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
value &= ~EFER_LMA;
- if ( (value & ~(EFER_LME | EFER_NX | EFER_SCE)) ||
+ if ( (value & ~(EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE)) ||
((sizeof(long) != 8) && (value & EFER_LME)) ||
(!cpu_has_nx && (value & EFER_NX)) ||
- (!cpu_has_syscall && (value & EFER_SCE)) )
+ (!cpu_has_syscall && (value & EFER_SCE)) ||
+ (!cpu_has_ffxsr && (value & EFER_FFXSE)) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
"EFER: %"PRIx64"\n", value);
/* So far, we do not support 3DNow for the guest. */
clear_bit(X86_FEATURE_3DNOW & 31, &edx);
clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
- /* no FFXSR instructions feature. */
- clear_bit(X86_FEATURE_FFXSR & 31, &edx);
}
else if ( input == 0x80000007 || input == 0x8000000A )
{
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FFXSR (1*32+25) /* FFXSR instruction optimizations */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVME (6*32+ 2) /* Secure Virtual Machine */
-#define X86_FEATURE_FFXSR (6*32+25) /* FFXSR instruction optimizations */
#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#endif
+#define cpu_has_ffxsr ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \
+ && boot_cpu_has(X86_FEATURE_FFXSR))
+
#endif /* __ASM_I386_CPUFEATURE_H */
/*
#define _EFER_LME 8 /* Long mode enable */
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
-#define _EFER_SVME 12
+#define _EFER_SVME 12 /* AMD: SVM enable */
+#define _EFER_LMSLE 13 /* AMD: Long-mode segment limit enable */
+#define _EFER_FFXSE 14 /* AMD: Fast FXSAVE/FXRSTOR enable */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
+#define EFER_LMSLE (1<<_EFER_LMSLE)
+#define EFER_FFXSE (1<<_EFER_FFXSE)
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1