return sel;
}
+static inline uint64_t xgetbv(uint32_t index)
+{
+ uint32_t feat_lo;
+ uint64_t feat_hi;
+
+ asm volatile ("xgetbv" : "=a" (feat_lo), "=d" (feat_hi)
+ : "c" (index) );
+
+ return feat_lo | (feat_hi << 32);
+}
+
+static inline void xsetbv(uint32_t index, uint64_t value)
+{
+ asm volatile ("xsetbv" :: "a" ((uint32_t)value), "d" (value >> 32),
+ "c" (index) );
+}
+
+static inline uint64_t read_xcr0(void)
+{
+ return xgetbv(0);
+}
+
+static inline void write_xcr0(uint64_t xcr0)
+{
+ xsetbv(0, xcr0);
+}
+
#endif /* XTF_X86_LIB_H */
/*
#define X86_DR6_BS (1u << 14) /* Single step */
#define X86_DR6_BT (1u << 15) /* Task switch */
+/*
+ * CPU features in XCR0.
+ */
+#define _XSTATE_FP 0
+#define XSTATE_FP (1ULL << _XSTATE_FP)
+#define _XSTATE_SSE 1
+#define XSTATE_SSE (1ULL << _XSTATE_SSE)
+#define _XSTATE_YMM 2
+#define XSTATE_YMM (1ULL << _XSTATE_YMM)
+#define _XSTATE_BNDREGS 3
+#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS)
+#define _XSTATE_BNDCSR 4
+#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR)
+#define _XSTATE_OPMASK 5
+#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK)
+#define _XSTATE_ZMM 6
+#define XSTATE_ZMM (1ULL << _XSTATE_ZMM)
+#define _XSTATE_HI_ZMM 7
+#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM)
+#define _XSTATE_PKRU 9
+#define XSTATE_PKRU (1ULL << _XSTATE_PKRU)
+#define _XSTATE_LWP 62
+#define XSTATE_LWP (1ULL << _XSTATE_LWP)
+
/*
* Exception mnemonics.
*/
* - x87 `wait`
* - MMX
* - SSE
+ * - AVX
*
* checking that appropriate exceptions are raised (@#NM or @#UD), or that no
* exception is raised.
* @todo Extend to include unmasked pending exceptions. There is definitely
* work required in the instruction emulator to support this properly.
*
- * @todo Extend to include xsave-based FPU instruction sets.
- *
* @see tests/fpu-exception-emulation/main.c
*/
#include <xtf.h>
return fault;
}
+/**
+ * AVX instructions. The emulation flag is meaningless,
+ * but @#NM should be raised if the task has been switched.
+ */
+static const struct test_cfg avx[] =
+{
+ { CR0_SYM( ), 0 },
+ { CR0_SYM( TS), EXINFO_SYM(NM, 0) },
+ { CR0_SYM( MP ), 0 },
+ { CR0_SYM( MP, TS), EXINFO_SYM(NM, 0) },
+ { CR0_SYM(EM ), 0 },
+ { CR0_SYM(EM, TS), EXINFO_SYM(NM, 0) },
+ { CR0_SYM(EM, MP ), 0 },
+ { CR0_SYM(EM, MP, TS), EXINFO_SYM(NM, 0) },
+};
+
+static exinfo_t probe_avx(bool force)
+{
+ exinfo_t fault = 0;
+
+ asm volatile ("test %[fep], %[fep];"
+ "jz 1f;"
+ _ASM_XEN_FEP
+ "1: vmovups %%xmm0, %%xmm0; 2:"
+ _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
+ : "+a" (fault)
+ : [fep] "q" (force));
+
+ return fault;
+}
+
void run_sequence(const struct test_cfg *seq, unsigned int nr,
unsigned int (*fn)(bool), bool force, exinfo_t override)
{
write_cr4(cr4);
}
+
+ if ( cpu_has_avx )
+ {
+ unsigned long cr4 = read_cr4();
+ unsigned long xcr0;
+
+ printk("Testing%s AVX\n", force ? " emulated" : "");
+ write_cr4(cr4 & ~X86_CR4_OSXSAVE);
+ run_sequence(avx, ARRAY_SIZE(avx), probe_avx, force,
+ EXINFO_SYM(UD, 0));
+
+ printk("Testing%s AVX (CR4.OSXSAVE)\n", force ? " emulated" : "");
+ write_cr4(cr4 | X86_CR4_OSXSAVE);
+ xcr0 = read_xcr0();
+ write_xcr0(xcr0 & ~XSTATE_YMM);
+ run_sequence(avx, ARRAY_SIZE(avx), probe_avx, force,
+ EXINFO_SYM(UD, 0));
+
+ printk("Testing%s AVX (CR4.OSXSAVE+XCR0.YMM)\n", force ? " emulated" : "");
+ write_xcr0(xcr0 | XSTATE_SSE | XSTATE_YMM);
+ run_sequence(avx, ARRAY_SIZE(avx), probe_avx, force, 0);
+
+ write_xcr0(xcr0);
+ write_cr4(cr4);
+ }
}
void test_main(void)