#include <xtf/asm_macros.h>
-#include <arch/x86/page.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/msr-index.h>
-#include <arch/x86/segment.h>
+#include <arch/page.h>
+#include <arch/processor.h>
+#include <arch/msr-index.h>
+#include <arch/segment.h>
.code32 /* Always starts in 32bit flat mode. */
GLOBAL(_start) /* HVM common setup. */
#include <xtf/asm_macros.h>
-#include <arch/x86/page.h>
+#include <arch/page.h>
#include <xen/elfnote.h>
#include <xtf/lib.h>
#include <xtf/libc.h>
-#include <arch/x86/decode.h>
-#include <arch/x86/processor.h>
+#include <arch/decode.h>
+#include <arch/processor.h>
const char *x86_vendor_name(enum x86_vendor v)
{
-#include <arch/x86/desc.h>
-#include <arch/x86/segment.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/desc.h>
+#include <arch/segment.h>
+#include <arch/symbolic-const.h>
user_desc gdt[NR_GDT_ENTRIES] =
{
-#include <arch/x86/idt.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/segment.h>
+#include <arch/idt.h>
+#include <arch/processor.h>
+#include <arch/segment.h>
#include <xtf/asm_macros.h>
/*
-#include <arch/x86/idt.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/segment.h>
+#include <arch/idt.h>
+#include <arch/processor.h>
+#include <arch/segment.h>
#include <xtf/asm_macros.h>
/*
#include <xtf/lib.h>
#include <xtf/extable.h>
-#include <arch/x86/exinfo.h>
-#include <arch/x86/regs.h>
+#include <arch/exinfo.h>
+#include <arch/regs.h>
/**
* Record the current fault in @%eax
#include <xtf/asm_macros.h>
-#include <arch/x86/page.h>
+#include <arch/page.h>
#define PAGETABLE_START(sym) \
GLOBAL(sym)
#include <xtf/traps.h>
#include <xtf/lib.h>
-#include <arch/x86/idt.h>
-#include <arch/x86/lib.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/desc.h>
+#include <arch/idt.h>
+#include <arch/lib.h>
+#include <arch/processor.h>
+#include <arch/desc.h>
/* Real entry points */
void entry_DE(void);
-#include <arch/x86/page.h>
+#include <arch/page.h>
#include <xtf/asm_macros.h>
#include <xen/xen.h>
--- /dev/null
+/**
+ * @file arch/x86/include/arch/asm_macros.h
+ *
+ * Macros for use in x86 assembly files.
+ */
+#ifndef XTF_X86_ASM_MACROS_H
+#define XTF_X86_ASM_MACROS_H
+
+#ifdef __ASSEMBLY__
+/* Declare data at the architectures width. */
+# if defined(__x86_64__)
+# define _WORD .quad
+# elif defined(__i386__)
+# define _WORD .long
+# endif
+#else
+# if defined(__x86_64__)
+# define _WORD ".quad "
+# elif defined(__i386__)
+# define _WORD ".long "
+# endif
+#endif
+
+#ifdef __ASSEMBLY__
+
+.macro SAVE_ALL
+ cld
+#if defined(__x86_64__)
+ push %rdi
+ push %rsi
+ push %rdx
+ push %rcx
+ push %rax
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+#elif defined(__i386__)
+ push %edi
+ push %esi
+ push %edx
+ push %ecx
+ push %eax
+ push %ebx
+ push %ebp
+#else
+# error Bad architecture for SAVE_ALL
+#endif
+.endm
+
+.macro RESTORE_ALL
+#if defined(__x86_64__)
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rax
+ pop %rcx
+ pop %rdx
+ pop %rsi
+ pop %rdi
+#elif defined(__i386__)
+ pop %ebp
+ pop %ebx
+ pop %eax
+ pop %ecx
+ pop %edx
+ pop %esi
+ pop %edi
+#else
+# error Bad architecture for RESTORE_ALL
+#endif
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* XTF_X86_ASM_MACROS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_BARRIER_H
+#define XTF_X86_BARRIER_H
+
+/*
+ * Memory barriers for x86 systems
+ *
+ * See Linux: Documentation/memory-barriers.txt for a very detailed
+ * description of the problems and their implications.
+ *
+ * Under Xen, we rely on the fact that only x86_64 cpus are supported, which
+ * guarantees that the {m,l,s}fence instructions are supported (SSE2 being a
+ * requirement of 64bit).
+ *
+ * x86 memory ordering requirements make the smp_???() variants easy. From
+ * the point of view of program order, reads may not be reordered with respect
+ * to other reads, and writes may not be reordered with respect to other
+ * writes, causing smp_rmb() and smp_wmb() to degrade to simple compiler
+ * barriers. smp_mb() however does need to be an mfence instruction, as reads
+ * are permitted to be reordered ahead of non-aliasing writes.
+ */
+
+#include <xtf/compiler.h>
+
+#define mb() __asm__ __volatile__ ("mfence" ::: "memory")
+#define rmb() __asm__ __volatile__ ("lfence" ::: "memory")
+#define wmb() __asm__ __volatile__ ("sfence" ::: "memory")
+
+#define smp_mb() mb()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+
+#endif /* XTF_X86_BARRIER_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/config.h
+ * Logic to split an environment into finer-grain @#define's
+ *
+ * Converts `CONFIG_ENV_$foo` into:
+ * - `CONFIG_PV` or `CONFIG_HVM`
+ * - `CONFIG_PAGING_LEVELS = $num`
+ *
+ * The `CONFIG_ENV_$foo` is then undefined, to prevent its use in general code.
+ */
+#ifndef XTF_X86_CONFIG_H
+#define XTF_X86_CONFIG_H
+
+#include <xtf/macro_magic.h>
+
+#if defined(CONFIG_ENV_pv64)
+
+#define CONFIG_PV 1
+#define CONFIG_64BIT 1
+#define CONFIG_PAGING_LEVELS 4
+#define ENVIRONMENT_DESCRIPTION "PV 64bit (Long mode 4 levels)"
+
+#undef CONFIG_ENV_pv64
+
+#elif defined(CONFIG_ENV_pv32pae)
+
+#define CONFIG_PV 1
+#define CONFIG_32BIT 1
+#define CONFIG_PAGING_LEVELS 3
+#define ENVIRONMENT_DESCRIPTION "PV 32bit (PAE 3 levels)"
+
+#undef CONFIG_ENV_pv32pae
+
+#elif defined(CONFIG_ENV_hvm64)
+
+#define CONFIG_HVM 1
+#define CONFIG_64BIT 1
+#define CONFIG_PAGING_LEVELS 4
+#define ENVIRONMENT_DESCRIPTION "HVM 64bit (Long mode 4 levels)"
+
+#undef CONFIG_ENV_hvm64
+
+#elif defined(CONFIG_ENV_hvm32pae)
+
+#define CONFIG_HVM 1
+#define CONFIG_32BIT 1
+#define CONFIG_PAGING_LEVELS 3
+#define ENVIRONMENT_DESCRIPTION "HVM 32bit (PAE 3 levels)"
+
+#undef CONFIG_ENV_hvm32pae
+
+#elif defined(CONFIG_ENV_hvm32pse)
+
+#define CONFIG_HVM 1
+#define CONFIG_32BIT 1
+#define CONFIG_PAGING_LEVELS 2
+#define ENVIRONMENT_DESCRIPTION "HVM 32bit (PSE 2 levels)"
+
+#undef CONFIG_ENV_hvm32pse
+
+#elif defined(CONFIG_ENV_hvm32)
+
+#define CONFIG_HVM 1
+#define CONFIG_32BIT 1
+#define CONFIG_PAGING_LEVELS 0
+#define ENVIRONMENT_DESCRIPTION "HVM 32bit (No paging)"
+
+#undef CONFIG_ENV_hvm32
+
+#else
+# error Bad environment
+#endif
+
+#endif /* XTF_X86_CONFIG_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_CPUID_H
+#define XTF_X86_CPUID_H
+
+#include <xtf/types.h>
+#include <xtf/numbers.h>
+
+#include <xen/arch-x86/cpufeatureset.h>
+
+typedef void (*cpuid_fn_t)(uint32_t leaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+typedef void (*cpuid_count_fn_t)(uint32_t leaf, uint32_t subleaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+
+enum x86_vendor
+{
+ X86_VENDOR_UNKNOWN,
+ X86_VENDOR_INTEL,
+ X86_VENDOR_AMD,
+};
+
+extern enum x86_vendor x86_vendor;
+extern unsigned int x86_family, x86_model, x86_stepping;
+extern unsigned int maxphysaddr, maxvirtaddr;
+
+static inline bool vendor_is(enum x86_vendor v)
+{
+ return x86_vendor == v;
+}
+
+#define vendor_is_intel vendor_is(X86_VENDOR_INTEL)
+#define vendor_is_amd vendor_is(X86_VENDOR_AMD)
+
+
+#define cpufeat_word(idx) ((idx) / 32)
+#define cpufeat_bit(idx) ((idx) % 32)
+#define cpufeat_mask(idx) (_AC(1, U) << cpufeat_bit(idx))
+
+#define FEATURESET_1d cpufeat_word(X86_FEATURE_FPU)
+#define FEATURESET_1c cpufeat_word(X86_FEATURE_SSE3)
+#define FEATURESET_e1d cpufeat_word(X86_FEATURE_SYSCALL)
+#define FEATURESET_e1c cpufeat_word(X86_FEATURE_LAHF_LM)
+#define FEATURESET_Da1 cpufeat_word(X86_FEATURE_XSAVEOPT)
+#define FEATURESET_7b0 cpufeat_word(X86_FEATURE_FSGSBASE)
+#define FEATURESET_7c0 cpufeat_word(X86_FEATURE_PREFETCHWT1)
+#define FEATURESET_e7d cpufeat_word(X86_FEATURE_ITSC)
+#define FEATURESET_e8b cpufeat_word(X86_FEATURE_CLZERO)
+
+#define FSCAPINTS (FEATURESET_e8b + 1)
+
+extern uint32_t x86_features[FSCAPINTS];
+
+static inline bool cpu_has(unsigned int feature)
+{
+ return x86_features[cpufeat_word(feature)] & cpufeat_mask(feature);
+}
+
+#define cpu_has_fpu cpu_has(X86_FEATURE_FPU)
+#define cpu_has_vme cpu_has(X86_FEATURE_VME)
+#define cpu_has_de cpu_has(X86_FEATURE_DE)
+#define cpu_has_pse cpu_has(X86_FEATURE_PSE)
+#define cpu_has_tsc cpu_has(X86_FEATURE_TSC)
+#define cpu_has_pae cpu_has(X86_FEATURE_PAE)
+#define cpu_has_mce cpu_has(X86_FEATURE_MCE)
+#define cpu_has_pge cpu_has(X86_FEATURE_PGE)
+#define cpu_has_mca cpu_has(X86_FEATURE_MCA)
+#define cpu_has_pat cpu_has(X86_FEATURE_PAT)
+#define cpu_has_pse36 cpu_has(X86_FEATURE_PSE36)
+#define cpu_has_mmx cpu_has(X86_FEATURE_MMX)
+#define cpu_has_fxsr cpu_has(X86_FEATURE_FXSR)
+
+#define cpu_has_sse cpu_has(X86_FEATURE_SSE)
+#define cpu_has_sse2 cpu_has(X86_FEATURE_SSE2)
+#define cpu_has_vmx cpu_has(X86_FEATURE_VMX)
+#define cpu_has_smx cpu_has(X86_FEATURE_SMX)
+#define cpu_has_pcid cpu_has(X86_FEATURE_PCID)
+#define cpu_has_xsave cpu_has(X86_FEATURE_XSAVE)
+#define cpu_has_avx cpu_has(X86_FEATURE_AVX)
+
+#define cpu_has_syscall cpu_has(X86_FEATURE_SYSCALL)
+#define cpu_has_nx cpu_has(X86_FEATURE_NX)
+#define cpu_has_page1gb cpu_has(X86_FEATURE_PAGE1GB)
+#define cpu_has_lm cpu_has(X86_FEATURE_LM)
+
+#define cpu_has_fsgsbase cpu_has(X86_FEATURE_FSGSBASE)
+#define cpu_has_smep cpu_has(X86_FEATURE_SMEP)
+#define cpu_has_smap cpu_has(X86_FEATURE_SMAP)
+
+#define cpu_has_umip cpu_has(X86_FEATURE_UMIP)
+#define cpu_has_pku cpu_has(X86_FEATURE_PKU)
+
+#endif /* XTF_X86_CPUID_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/decode.h
+ *
+ * Helper routines for decoding x86 state.
+ */
+#ifndef XTF_X86_DECODE_H
+#define XTF_X86_DECODE_H
+
+#include <xtf/types.h>
+
+#include <arch/cpuid.h>
+#include <arch/exinfo.h>
+
+/**
+ * String of the indentified vendor @p v.
+ *
+ * @param v Vendor.
+ * @return String.
+ */
+const char *x86_vendor_name(enum x86_vendor v);
+
+/**
+ * String abbreviation of @p ev.
+ *
+ * @param ev Entry Vector.
+ * @return String abbreviation.
+ */
+const char *x86_exc_short_name(unsigned int ev);
+
+/**
+ * Decodes an x86 error code into a readable form.
+ *
+ * @param buf Buffer to fill.
+ * @param bufsz Size of @p buf.
+ * @param ev Entry Vector.
+ * @param ec Error Code.
+ * @return snprintf(buf, bufsz, ...)
+ */
+int x86_exc_decode_ec(char *buf, size_t bufsz,
+ unsigned int ev, unsigned int ec);
+
+/**
+ * Decodes an exinfo_t into a readable form.
+ *
+ * @param buf Buffer to fill.
+ * @param bufsz Size of @p buf.
+ * @param info exinfo_t value.
+ * @return snprintf(buf, bufsz, ...)
+ */
+int x86_decode_exinfo(char *buf, size_t bufsz, exinfo_t info);
+
+#endif /* XTF_X86_DECODE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/desc.h
+ *
+ * %x86 segment descriptor infrastructure.
+ */
+
+#ifndef XTF_X86_DESC_H
+#define XTF_X86_DESC_H
+
+#include <xtf/types.h>
+#include <xtf/compiler.h>
+
+#include <arch/segment.h>
+
+/** 8 byte user segment descriptor (GDT/LDT entries with .s = 1) */
+struct __packed seg_desc32 {
+ union {
+ /** Raw backing integers. */
+ struct {
+ uint32_t lo, hi;
+ };
+ /** Common named fields. */
+ struct {
+ uint16_t limit0;
+ uint16_t base0;
+ uint8_t base1;
+ unsigned type: 4;
+ unsigned s: 1, dpl: 2, p: 1;
+ unsigned limit: 4;
+ unsigned avl: 1, l: 1, d: 1, g: 1;
+ uint8_t base2;
+ };
+ /** Code segment specific field names. */
+ struct {
+ uint16_t limit0;
+ uint16_t base0;
+ uint8_t base1;
+ unsigned a: 1, r: 1, c: 1, x: 1;
+ unsigned s: 1, dpl: 2, p: 1;
+ unsigned limit: 4;
+ unsigned avl: 1, l: 1, d: 1, g: 1;
+ uint8_t base2;
+ } code;
+ /** Data segment specific field names. */
+ struct {
+ uint16_t limit0;
+ uint16_t base0;
+ uint8_t base1;
+ unsigned a: 1, w: 1, e: 1, x: 1;
+ unsigned s: 1, dpl: 2, p: 1;
+ unsigned limit: 4;
+ unsigned avl: 1, _r0: 1, b: 1, g: 1;
+ uint8_t base2;
+ } data;
+ };
+};
+
+/** 8-byte gate - Protected mode IDT entry, GDT task/call gate. */
+struct __packed seg_gate32 {
+ union {
+ struct {
+ uint32_t lo, hi;
+ };
+ struct {
+ uint16_t offset0;
+ uint16_t selector;
+ uint8_t _r0;
+ unsigned type: 4, s: 1, dpl: 2, p: 1;
+ uint16_t offset1;
+ };
+ };
+};
+
+/** 16-byte gate - Long mode IDT entry. */
+struct __packed seg_gate64 {
+ union {
+ struct {
+ uint64_t lo, hi;
+ };
+ struct {
+ uint16_t offset0;
+ uint16_t selector;
+ unsigned ist: 3, _r0: 5, type: 4, s: 1, dpl: 2, p: 1;
+ uint16_t offset1;
+ uint32_t offset2;
+ uint32_t _r1;
+ };
+ };
+};
+
+/* GDT/LDT attribute flags for user segments */
+
+/* Common */
+#define SEG_ATTR_G 0x8000 /**< Granularity of limit (0 = 1, 1 = 4K) */
+#define SEG_ATTR_AVL 0x1000 /**< Available for software use */
+#define SEG_ATTR_P 0x0080 /**< Present? */
+#define SEG_ATTR_S 0x0010 /**< !System desc (0 = system, 1 = user) */
+#define SEG_ATTR_A 0x0001 /**< Accessed? (set by hardware) */
+
+#define SEG_ATTR_COMMON 0x8091 /**< Commonly set bits (G P S A) */
+
+#define SEG_ATTR_DPL0 0x0000 /**< Descriptor privilege level 0 */
+#define SEG_ATTR_DPL1 0x0020 /**< Descriptor privilege level 1 */
+#define SEG_ATTR_DPL2 0x0040 /**< Descriptor privilege level 2 */
+#define SEG_ATTR_DPL3 0x0060 /**< Descriptor privilege level 3 */
+#define SEG_ATTR_CODE 0x0008 /**< Type (0 = data, 1 = code) */
+#define SEG_ATTR_DATA 0x0000 /**< Type (0 = data, 1 = code) */
+
+/* Code segments */
+#define SEG_ATTR_D 0x4000 /**< Default operand size (0 = 16bit, 1 = 32bit) */
+#define SEG_ATTR_L 0x2000 /**< Long segment? (1 = 64bit) */
+#define SEG_ATTR_C 0x0004 /**< Conforming? (0 = non, 1 = conforming) */
+#define SEG_ATTR_R 0x0002 /**< Readable? (0 = XO seg, 1 = RX seg) */
+
+/* Data segments */
+#define SEG_ATTR_B 0x4000 /**< 'Big' flag.
+ * - For %ss, default operand size.
+ * - For expand-down segment, sets upper bound. */
+#define SEG_ATTR_E 0x0004 /**< Expand-down? (0 = normal, 1 = expand-down) */
+#define SEG_ATTR_W 0x0002 /**< Writable? (0 = RO seg, 1 = RW seg) */
+
+/**
+ * Initialise an LDT/GDT entry using a raw attribute number.
+ *
+ * @param base Segment base.
+ * @param limit Segment limit.
+ * @param attr Segment attributes.
+ */
+#define INIT_GDTE(base, limit, attr) { { { \
+ .lo = (((base) & 0xffff) << 16) | ((limit) & 0xffff), \
+ .hi = ((base) & 0xff000000) | ((limit) & 0xf0000) | \
+ (((attr) & 0xf0ff) << 8) | (((base) & 0xff0000) >> 16) \
+ } } }
+
+/** Long mode lgdt/lidt table pointer. */
+struct __packed desc_ptr64 {
+ uint16_t limit;
+ uint64_t base;
+};
+
+/** Protected mode lgdt/lidt table pointer. */
+struct __packed desc_ptr32 {
+ uint16_t limit;
+ uint32_t base;
+};
+
+struct __packed hw_tss32 {
+ uint16_t link; uint16_t _r0;
+
+ uint32_t esp0;
+ uint16_t ss0; uint16_t _r1;
+
+ uint32_t esp1;
+ uint16_t ss1; uint16_t _r2;
+
+ uint32_t esp2;
+ uint16_t ss2; uint16_t _r3;
+
+ uint32_t cr3;
+ uint32_t eip;
+ uint32_t eflags;
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+
+ uint16_t es; uint16_t _r4;
+ uint16_t cs; uint16_t _r5;
+ uint16_t ss; uint16_t _r6;
+ uint16_t ds; uint16_t _r7;
+ uint16_t fs; uint16_t _r8;
+ uint16_t gs; uint16_t _r9;
+ uint16_t ldtr; uint16_t _r10;
+ uint16_t t; uint16_t iopb;
+};
+
+struct __packed hw_tss64 {
+ uint16_t link; uint16_t _r0;
+
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+
+ uint64_t _r1;
+
+ uint64_t ist[7]; /* 1-based structure */
+
+ uint64_t _r2;
+
+ uint16_t t;
+ uint16_t iopb;
+};
+
+#define X86_TSS_INVALID_IO_BITMAP 0x8000
+
+#if defined(__x86_64__)
+
+typedef struct desc_ptr64 desc_ptr;
+typedef struct seg_desc32 user_desc;
+typedef struct seg_gate64 gate_desc;
+typedef struct hw_tss64 hw_tss;
+
+#elif defined(__i386__)
+
+typedef struct desc_ptr32 desc_ptr;
+typedef struct seg_desc32 user_desc;
+typedef struct seg_gate32 gate_desc;
+typedef struct hw_tss32 hw_tss;
+
+#else
+# error Bad architecture for descriptor infrastructure
+#endif
+
+extern user_desc gdt[NR_GDT_ENTRIES];
+extern desc_ptr gdt_ptr;
+
+#if defined(CONFIG_HVM)
+extern gate_desc idt[256];
+extern desc_ptr idt_ptr;
+
+extern hw_tss tss;
+#endif
+
+#endif /* XTF_X86_DESC_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_DIV_H
+#define XTF_X86_DIV_H
+
+#include <xtf/types.h>
+
+/*
+ * Divide a 64bit number by 32bit divisor without software support.
+ *
+ * The dividend is modified in place, and the modulus is returned.
+ */
+static inline uint32_t divmod64(uint64_t *dividend, uint32_t divisor)
+{
+ uint32_t mod;
+
+#ifdef __x86_64__
+
+ /*
+ * On 64bit, issue a straight 'div' instruction.
+ */
+
+ mod = *dividend % divisor;
+ *dividend /= divisor;
+#else
+ {
+ /*
+ * On 32bit, this is harder.
+ *
+ * In x86, 'divl' can take a 64bit dividend, but the resulting
+ * quotient must fit in %eax or a #DE will occur.
+ *
+ * To avoid this, we split the division in two. The remainder from
+ * the higher divide can safely be used in the upper 32bits of the
+ * lower divide, as it will not cause an overflow.
+ */
+ uint32_t high = *dividend >> 32, low = *dividend, umod = 0;
+
+ if ( high )
+ {
+ umod = high % divisor;
+ high /= divisor;
+ }
+
+ asm ("divl %2"
+ : "=a" (low), "=d" (mod)
+ : "rm" (divisor), "0" (low), "1" (umod));
+
+ *dividend = (((uint64_t)high) << 32) | low;
+ }
+#endif
+
+ return mod;
+}
+
+#endif /* XTF_X86_DIV_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/exinfo.h
+ *
+ * An encapsulation of an x86 exception with error code
+ */
+#ifndef XTF_X86_EXINFO_H
+#define XTF_X86_EXINFO_H
+
+#include <arch/processor.h>
+
+/**
+ * Packed exception and error code information
+ *
+ * - Bottom 16 bits are error code
+ * - Next 8 bits are the entry vector
+ * - Top bit it set to disambiguate @#DE from no exception
+ */
+typedef unsigned int exinfo_t;
+
+#define EXINFO_EXPECTED (1u << 31)
+
+#define EXINFO(vec, ec) (EXINFO_EXPECTED | ((vec & 0xff) << 16) | (ec & 0xffff))
+
+#define EXINFO_SYM(exc, ec) EXINFO(X86_EXC_ ## exc, ec)
+
+static inline unsigned int exinfo_vec(exinfo_t info)
+{
+ return (info >> 16) & 0xff;
+}
+
+static inline unsigned int exinfo_ec(exinfo_t info)
+{
+ return info & 0xffff;
+}
+
+#endif /* XTF_X86_EXINFO_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/idt.h
+ *
+ * %x86 IDT vector infrastructure.
+ */
+
+#ifndef XTF_X86_IDT_H
+#define XTF_X86_IDT_H
+
+/**
+ * Return to kernel mode.
+ *
+ * To enable easy transition between user and kernel mode for tests.
+ */
+#define X86_VEC_RET2KERN 0x20
+
+/**
+ * Available for test use.
+ */
+#define X86_VEC_AVAIL 0x21
+
+
+#ifndef __ASSEMBLY__
+
+/** A guest agnostic represention of IDT information. */
+struct xtf_idte
+{
+ unsigned long addr;
+ unsigned int cs, dpl;
+};
+
+/**
+ * Set up an IDT Entry, in a guest agnostic way.
+ *
+ * Construct an IDT Entry at the specified @p vector, using configuration
+ * provided in @p idte.
+ *
+ * @param vector Vector to set up.
+ * @param idte Details to set up.
+ * @returns 0 for HVM guests, hypercall result for PV guests.
+ */
+int xtf_set_idte(unsigned int vector,
+ struct xtf_idte *idte);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* XTF_X86_IDT_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_LIB_H
+#define XTF_X86_LIB_H
+
+#include <xtf/types.h>
+#include <xen/arch-x86/xen.h>
+#include <arch/desc.h>
+
+static inline uint64_t rdmsr(uint32_t idx)
+{
+ uint32_t lo, hi;
+
+ asm volatile("rdmsr": "=a" (lo), "=d" (hi): "c" (idx));
+
+ return (((uint64_t)hi) << 32) | lo;
+}
+
+static inline bool rdmsr_safe(uint32_t idx, uint64_t *val)
+{
+ uint32_t lo, hi, new_idx;
+
+ asm volatile("1: rdmsr; 2:"
+ _ASM_EXTABLE_HANDLER(1b, 2b, ex_rdmsr_safe)
+ : "=a" (lo), "=d" (hi), "=c" (new_idx)
+ : "c" (idx));
+
+ bool fault = idx != new_idx;
+
+ if ( !fault )
+ *val = (((uint64_t)hi) << 32) | lo;
+
+ return fault;
+}
+
+static inline void wrmsr(uint32_t idx, uint64_t val)
+{
+ asm volatile ("wrmsr":
+ : "c" (idx), "a" ((uint32_t)val),
+ "d" ((uint32_t)(val >> 32)));
+}
+
+static inline bool wrmsr_safe(uint32_t idx, uint64_t val)
+{
+ uint32_t new_idx;
+
+ asm volatile ("1: wrmsr; 2:"
+ _ASM_EXTABLE_HANDLER(1b, 2b, ex_wrmsr_safe)
+ : "=c" (new_idx)
+ : "c" (idx), "a" ((uint32_t)val),
+ "d" ((uint32_t)(val >> 32)));
+
+ return idx != new_idx;
+}
+
+static inline void cpuid(uint32_t leaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ asm volatile ("cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (leaf));
+}
+
+static inline uint32_t cpuid_eax(uint32_t leaf)
+{
+ uint32_t eax, tmp;
+
+ cpuid(leaf, &eax, &tmp, &tmp, &tmp);
+
+ return eax;
+}
+
+static inline uint32_t cpuid_ebx(uint32_t leaf)
+{
+ uint32_t ebx, tmp;
+
+ cpuid(leaf, &tmp, &ebx, &tmp, &tmp);
+
+ return ebx;
+}
+
+static inline uint32_t cpuid_ecx(uint32_t leaf)
+{
+ uint32_t ecx, tmp;
+
+ cpuid(leaf, &tmp, &tmp, &ecx, &tmp);
+
+ return ecx;
+}
+
+static inline uint32_t cpuid_edx(uint32_t leaf)
+{
+ uint32_t edx, tmp;
+
+ cpuid(leaf, &tmp, &tmp, &tmp, &edx);
+
+ return edx;
+}
+
+static inline void pv_cpuid(uint32_t leaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ asm volatile (_ASM_XEN_FEP "cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (leaf));
+}
+
+static inline void cpuid_count(uint32_t leaf, uint32_t subleaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ asm volatile ("cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (leaf), "2" (subleaf));
+}
+
+static inline void pv_cpuid_count(uint32_t leaf, uint32_t subleaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ asm volatile (_ASM_XEN_FEP "cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (leaf), "2" (subleaf));
+}
+
+static inline uint8_t inb(uint16_t port)
+{
+ uint8_t val;
+
+ asm volatile("inb %w1, %b0": "=a" (val): "Nd" (port));
+
+ return val;
+}
+
+static inline uint16_t inw(uint16_t port)
+{
+ uint16_t val;
+
+ asm volatile("inw %w1, %w0": "=a" (val): "Nd" (port));
+
+ return val;
+}
+
+static inline uint32_t inl(uint16_t port)
+{
+ uint32_t val;
+
+ asm volatile("inl %w1, %k0": "=a" (val): "Nd" (port));
+
+ return val;
+}
+
+static inline void outb(uint8_t val, uint16_t port)
+{
+ asm volatile("outb %b0, %w1": : "a" (val), "Nd" (port));
+}
+
+static inline void outw(uint16_t val, uint16_t port)
+{
+ asm volatile("outw %w0, %w1": : "a" (val), "Nd" (port));
+}
+
+static inline void outl(uint32_t val, uint16_t port)
+{
+ asm volatile("outl %k0, %w1": : "a" (val), "Nd" (port));
+}
+
+static inline unsigned int read_cs(void)
+{
+ unsigned int cs;
+
+ asm volatile ("mov %%cs, %0" : "=r" (cs));
+
+ return cs;
+}
+
+static inline unsigned int read_ds(void)
+{
+ unsigned int ds;
+
+ asm volatile ("mov %%ds, %0" : "=r" (ds));
+
+ return ds;
+}
+
+static inline unsigned int read_es(void)
+{
+ unsigned int es;
+
+ asm volatile ("mov %%es, %0" : "=r" (es));
+
+ return es;
+}
+
+static inline unsigned int read_fs(void)
+{
+ unsigned int fs;
+
+ asm volatile ("mov %%fs, %0" : "=r" (fs));
+
+ return fs;
+}
+
+static inline unsigned int read_gs(void)
+{
+ unsigned int gs;
+
+ asm volatile ("mov %%gs, %0" : "=r" (gs));
+
+ return gs;
+}
+
+static inline unsigned int read_ss(void)
+{
+ unsigned int ss;
+
+ asm volatile ("mov %%ss, %0" : "=r" (ss));
+
+ return ss;
+}
+
+static inline void write_cs(unsigned int cs)
+{
+ asm volatile ("push %0;"
+ "push $1f;"
+#if __x86_64__
+ "rex64 "
+#endif
+ "lret; 1:"
+ :: "qI" (cs));
+}
+
+static inline void write_ds(unsigned int ds)
+{
+ asm volatile ("mov %0, %%ds" :: "r" (ds));
+}
+
+static inline void write_es(unsigned int es)
+{
+ asm volatile ("mov %0, %%es" :: "r" (es));
+}
+
+static inline void write_fs(unsigned int fs)
+{
+ asm volatile ("mov %0, %%fs" :: "r" (fs));
+}
+
+static inline void write_gs(unsigned int gs)
+{
+ asm volatile ("mov %0, %%gs" :: "r" (gs));
+}
+
+static inline void write_ss(unsigned int ss)
+{
+ asm volatile ("mov %0, %%ss" :: "r" (ss));
+}
+
+static inline unsigned long read_dr6(void)
+{
+ unsigned long val;
+
+ asm volatile ("mov %%dr6, %0" : "=r" (val));
+
+ return val;
+}
+
+static inline unsigned long read_dr7(void)
+{
+ unsigned long val;
+
+ asm volatile ("mov %%dr7, %0" : "=r" (val));
+
+ return val;
+}
+
+static inline unsigned long read_cr0(void)
+{
+ unsigned long cr0;
+
+ asm volatile ("mov %%cr0, %0" : "=r" (cr0));
+
+ return cr0;
+}
+
+static inline unsigned long read_cr2(void)
+{
+ unsigned long cr2;
+
+ asm volatile ("mov %%cr2, %0" : "=r" (cr2));
+
+ return cr2;
+}
+
+static inline unsigned long read_cr3(void)
+{
+ unsigned long cr3;
+
+ asm volatile ("mov %%cr3, %0" : "=r" (cr3));
+
+ return cr3;
+}
+
+static inline unsigned long read_cr4(void)
+{
+ unsigned long cr4;
+
+ asm volatile ("mov %%cr4, %0" : "=r" (cr4));
+
+ return cr4;
+}
+
+static inline unsigned long read_cr8(void)
+{
+ unsigned long cr8;
+
+ asm volatile ("mov %%cr8, %0" : "=r" (cr8));
+
+ return cr8;
+}
+
+static inline void write_cr0(unsigned long cr0)
+{
+ asm volatile ("mov %0, %%cr0" :: "r" (cr0));
+}
+
+static inline void write_cr2(unsigned long cr2)
+{
+ asm volatile ("mov %0, %%cr2" :: "r" (cr2));
+}
+
+static inline void write_cr3(unsigned long cr3)
+{
+ asm volatile ("mov %0, %%cr3" :: "r" (cr3));
+}
+
+static inline void write_cr4(unsigned long cr4)
+{
+ asm volatile ("mov %0, %%cr4" :: "r" (cr4));
+}
+
+static inline void write_cr8(unsigned long cr8)
+{
+ asm volatile ("mov %0, %%cr8" :: "r" (cr8));
+}
+
+static inline void invlpg(const void *va)
+{
+ asm volatile ("invlpg (%0)" :: "r" (va));
+}
+
+static inline void lgdt(const desc_ptr *gdtr)
+{
+ asm volatile ("lgdt %0" :: "m" (*gdtr));
+}
+
+static inline void lidt(const desc_ptr *idtr)
+{
+ asm volatile ("lidt %0" :: "m" (*idtr));
+}
+
+static inline void lldt(unsigned int sel)
+{
+ asm volatile ("lldt %w0" :: "rm" (sel));
+}
+
+static inline void ltr(unsigned int sel)
+{
+ asm volatile ("ltr %w0" :: "rm" (sel));
+}
+
+static inline void sgdt(desc_ptr *gdtr)
+{
+ asm volatile ("sgdt %0" : "=m" (*gdtr));
+}
+
+static inline void sidt(desc_ptr *idtr)
+{
+ asm volatile ("sidt %0" : "=m" (*idtr));
+}
+
+static inline unsigned int sldt(void)
+{
+ unsigned int sel;
+
+ asm volatile ("sldt %0" : "=r" (sel));
+
+ return sel;
+}
+
+static inline unsigned int str(void)
+{
+ unsigned int sel;
+
+ asm volatile ("str %0" : "=r" (sel));
+
+ return sel;
+}
+
+static inline uint64_t xgetbv(uint32_t index)
+{
+ uint32_t feat_lo;
+ uint64_t feat_hi;
+
+ asm volatile ("xgetbv" : "=a" (feat_lo), "=d" (feat_hi)
+ : "c" (index) );
+
+ return feat_lo | (feat_hi << 32);
+}
+
+static inline void xsetbv(uint32_t index, uint64_t value)
+{
+ asm volatile ("xsetbv" :: "a" ((uint32_t)value), "d" (value >> 32),
+ "c" (index) );
+}
+
+static inline uint64_t read_xcr0(void)
+{
+ return xgetbv(0);
+}
+
+static inline void write_xcr0(uint64_t xcr0)
+{
+ xsetbv(0, xcr0);
+}
+
+#endif /* XTF_X86_LIB_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_MM_H
+#define XTF_X86_MM_H
+
+#include <xtf/types.h>
+
+#include <arch/page.h>
+
+#include <xen/xen.h>
+
+/*
+ * Terminology (inherited from Xen):
+ *
+ * GFN - Guest Frame Number
+ * What a guest writes into its pagetables.
+ * MFN - Machine Frame Number
+ * What Xen writes into its pagetables.
+ * PFN - Pseudophysical Frame Number
+ * A linear idea of a guests physical address space.
+ *
+ * For HVM, PFN == GFN, and MFN is strictly irrelevent.
+ * For PV, MFN == GFN != PFN.
+ *
+ * XTF memory layout.
+ *
+ * Wherever possible, identity layout for simplicity.
+ *
+ * PV guests: VIRT_OFFSET is 0 which causes all linked virtual addresses to be
+ * contiguous in the pagetables created by the domain builder. Therefore,
+ * virt == pfn << PAGE_SHIFT for any pfn constructed by the domain builder.
+ *
+ * HVM guests: All memory from 0 to 4GB is identity mapped.
+ */
+
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return (void *)(pfn << PAGE_SHIFT);
+}
+
+static inline unsigned long virt_to_pfn(const void *va)
+{
+ return ((unsigned long)va) >> PAGE_SHIFT;
+}
+
+#if defined(CONFIG_PV)
+
+#define m2p ((unsigned long *)MACH2PHYS_VIRT_START)
+extern struct start_info *start_info;
+
+static inline void *mfn_to_virt(unsigned long mfn)
+{
+ return pfn_to_virt(m2p[mfn]);
+}
+
+static inline void *maddr_to_virt(uint64_t maddr)
+{
+ return mfn_to_virt(maddr >> PAGE_SHIFT) + (maddr & ~PAGE_MASK);
+}
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long *p2m = _p(start_info->mfn_list);
+
+ return p2m[pfn];
+}
+
+static inline unsigned long virt_to_mfn(const void *va)
+{
+ return pfn_to_mfn(virt_to_pfn(va));
+}
+
+#undef m2p
+
+#endif /* CONFIG_PV */
+
+static inline void *gfn_to_virt(unsigned long gfn)
+{
+#if defined(CONFIG_PV)
+ return mfn_to_virt(gfn);
+#else
+ return pfn_to_virt(gfn);
+#endif
+}
+
+static inline unsigned long virt_to_gfn(const void *va)
+{
+#if defined(CONFIG_PV)
+ return virt_to_mfn(va);
+#else
+ return virt_to_pfn(va);
+#endif
+}
+
+#endif /* XTF_X86_MM_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XFT_X86_MSR_INDEX_H
+#define XFT_X86_MSR_INDEX_H
+
+#include <xtf/numbers.h>
+
+#define MSR_INTEL_PLATFORM_INFO 0x000000ce
+#define _MSR_PLATFORM_INFO_CPUID_FAULTING 31
+#define MSR_PLATFORM_INFO_CPUID_FAULTING (1ULL << _MSR_PLATFORM_INFO_CPUID_FAULTING)
+
+#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140
+#define _MSR_MISC_FEATURES_CPUID_FAULTING 0
+#define MSR_MISC_FEATURES_CPUID_FAULTING (1ULL << _MSR_MISC_FEATURES_CPUID_FAULTING)
+
+#define MSR_EFER 0xc0000080 /* Extended Feature register. */
+#define _EFER_SCE 0 /* SYSCALL Enable. */
+#define EFER_SCE (_AC(1, L) << _EFER_SCE)
+#define _EFER_LME 8 /* Long mode enable. */
+#define EFER_LME (_AC(1, L) << _EFER_LME)
+#define _EFER_LMA 10 /* Long mode Active. */
+#define EFER_LMA (_AC(1, L) << _EFER_LMA)
+#define _EFER_NXE 11 /* No-Execute Enable. */
+#define EFER_NXE (_AC(1, L) << _EFER_NXE)
+#define _EFER_SVME 12 /* Secure Virtual Machine Enable. */
+#define EFER_SVME (_AC(1, L) << _EFER_SVME)
+#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable. */
+#define EFER_LMSLE (_AC(1, L) << _EFER_LMSLE)
+#define _EFER_FFXSR 14 /* Fast FXSAVE/FXRSTOR. */
+#define EFER_FFXSR (_AC(1, L) << _EFER_FFXSR)
+#define _EFER_TCE 15 /* Translation Cache Extension. */
+#define EFER_TCE (_AC(1, L) << _EFER_TCE)
+
+#define MSR_FS_BASE 0xc0000100
+#define MSR_GS_BASE 0xc0000101
+#define MSR_SHADOW_GS_BASE 0xc0000102
+
+#endif /* XFT_X86_MSR_INDEX_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+
--- /dev/null
+/**
+ * @file arch/x86/include/arch/page-pae.h
+ *
+ * Definitions and helpers for PAE pagetable handling.
+ */
+#ifndef XTF_X86_PAGE_PAE_H
+#define XTF_X86_PAGE_PAE_H
+
+/** PAE pagetable entries are 64 bits wide. */
+#define PAE_PTE_SIZE 8
+/** PAE pagetable entries are 64 bits wide. */
+#define PAE_PTE_ORDER 3
+
+/** PAE pagetables encode 9 bits of index. */
+#define PAE_PT_ORDER 9
+
+/** @{ */
+/** All PAE pagetables contain 512 entries. */
+#define PAE_L1_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
+#define PAE_L2_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
+#define PAE_L3_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
+#define PAE_L4_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
+
+/* Other than PAE32_L3, which has 4 entries. */
+#define PAE32_L3_ENTRIES 4
+/** @} */
+
+#define PAE_L1_PT_SHIFT 12
+#define PAE_L2_PT_SHIFT 21
+#define PAE_L3_PT_SHIFT 30
+#define PAE_L4_PT_SHIFT 39
+
+#ifndef __ASSEMBLY__
+
+/** Integer representation of a PTE. */
+typedef uint64_t pae_intpte_t;
+#define PAE_PRIpte "016"PRIx64
+
+static inline unsigned int pae_l1_table_offset(unsigned long va)
+{
+ return (va >> PAE_L1_PT_SHIFT) & (PAE_L1_PT_ENTRIES - 1);
+}
+static inline unsigned int pae_l2_table_offset(unsigned long va)
+{
+ return (va >> PAE_L2_PT_SHIFT) & (PAE_L2_PT_ENTRIES - 1);
+}
+static inline unsigned int pae_l3_table_offset(unsigned long va)
+{
+ return (va >> PAE_L3_PT_SHIFT) & (PAE_L3_PT_ENTRIES - 1);
+}
+#ifdef __x86_64__
+static inline unsigned int pae_l4_table_offset(unsigned long va)
+{
+ return (va >> PAE_L4_PT_SHIFT) & (PAE_L4_PT_ENTRIES - 1);
+}
+#endif /* __x86_64__ */
+
+#endif /* __ASSEMBLY__ */
+#endif /* XTF_X86_PAGE_PAE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/page-pse.h
+ *
+ * Definitions and helpers for PSE pagetable handling.
+ */
+#ifndef XTF_X86_PAGE_PSE_H
+#define XTF_X86_PAGE_PSE_H
+
+/** PSE pagetable entries are 32 bits wide. */
+#define PSE_PTE_SIZE 4
+/** PSE pagetable entries are 32 bits wide. */
+#define PSE_PTE_ORDER 2
+
+/** PAE pagetables encode 10 bits of index. */
+#define PSE_PT_ORDER 10
+
+/** @{ */
+/** All PSE pagetables contain 1024 entries. */
+#define PSE_L1_PT_ENTRIES (PAGE_SIZE / PSE_PTE_SIZE)
+#define PSE_L2_PT_ENTRIES (PAGE_SIZE / PSE_PTE_SIZE)
+/** @} */
+
+#define PSE_L1_PT_SHIFT 12
+#define PSE_L2_PT_SHIFT 22
+
+#ifndef __ASSEMBLY__
+
+/** Integer representation of a PTE. */
+typedef uint32_t pse_intpte_t;
+#define PSE_PRIpte "08"PRIx32
+
+static inline unsigned int pse_l1_table_offset(unsigned long va)
+{
+ return (va >> PSE_L1_PT_SHIFT) & (PSE_L1_PT_ENTRIES - 1);
+}
+static inline unsigned int pse_l2_table_offset(unsigned long va)
+{
+ return (va >> PSE_L2_PT_SHIFT) & (PSE_L2_PT_ENTRIES - 1);
+}
+
+static inline uint32_t fold_pse36(uint64_t val)
+{
+ return (val & ~(0x1ffULL << 13)) | ((val & (0x1ffULL << 32)) >> (32 - 13));
+}
+
+static inline uint64_t unfold_pse36(uint32_t val)
+{
+ return (val & ~(0x1ffULL << 13)) | ((val & (0x1ffULL << 13)) << (32 - 13));
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* XTF_X86_PAGE_PSE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_PAGE_H
+#define XTF_X86_PAGE_H
+
+#include <xtf/numbers.h>
+
+/*
+ * Nomenclature inherited from Xen.
+ */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, L) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define PADDR_BITS 52
+#define PADDR_MASK ((_AC(1, ULL) << PADDR_BITS) - 1)
+
+#include "page-pae.h"
+#include "page-pse.h"
+
+#define PAGE_ORDER_4K 0
+#define PAGE_ORDER_2M 9
+#define PAGE_ORDER_4M 10
+#define PAGE_ORDER_1G 18
+
+#define _PAGE_PRESENT 0x0001
+#define _PAGE_RW 0x0002
+#define _PAGE_USER 0x0004
+#define _PAGE_PWT 0x0008
+#define _PAGE_PCD 0x0010
+#define _PAGE_ACCESSED 0x0020
+#define _PAGE_DIRTY 0x0040
+#define _PAGE_AD (_PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_PSE 0x0080
+#define _PAGE_PAT 0x0080
+#define _PAGE_GLOBAL 0x0100
+#define _PAGE_AVAIL 0x0e00
+#define _PAGE_PSE_PAT 0x1000
+#define _PAGE_NX (_AC(1, ULL) << 63)
+
+/* Shortened flags for use with PF_SYM(). */
+#define _PAGE_P _PAGE_PRESENT
+#define _PAGE_U _PAGE_USER
+#define _PAGE_A _PAGE_ACCESSED
+#define _PAGE_D _PAGE_DIRTY
+
+#if CONFIG_PAGING_LEVELS == 2 /* PSE Paging */
+
+#define PTE_SIZE PSE_PTE_SIZE
+#define PTE_ORDER PSE_PTE_ORDER
+
+#define PT_ORDER PSE_PT_ORDER
+
+#define L1_PT_SHIFT PSE_L1_PT_SHIFT
+#define L2_PT_SHIFT PSE_L2_PT_SHIFT
+
+#define L1_PT_ENTRIES PSE_L1_PT_ENTRIES
+#define L2_PT_ENTRIES PSE_L2_PT_ENTRIES
+
+#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
+
+#define PTE_SIZE PAE_PTE_SIZE
+#define PTE_ORDER PAE_PTE_ORDER
+
+#define PT_ORDER PAE_PT_ORDER
+
+#define L1_PT_SHIFT PAE_L1_PT_SHIFT
+#define L2_PT_SHIFT PAE_L2_PT_SHIFT
+
+#define L1_PT_ENTRIES PAE_L1_PT_ENTRIES
+#define L2_PT_ENTRIES PAE_L2_PT_ENTRIES
+
+#endif /* !CONFIG_PAGING_LEVELS == 2 */
+
+#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
+
+#define L3_PT_SHIFT PAE_L3_PT_SHIFT
+
+#define L3_PT_ENTRIES PAE_L3_PT_ENTRIES
+
+#endif /* CONFIG_PAGING_LEVELS >= 3 */
+
+#if CONFIG_PAGING_LEVELS >= 4 /* PAE Paging */
+
+#define L4_PT_SHIFT PAE_L4_PT_SHIFT
+
+#define L4_PT_ENTRIES PAE_L4_PT_ENTRIES
+
+#endif /* CONFIG_PAGING_LEVELS >= 4 */
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Always consider "physical" addresses to be 64bits wide, even in 32bit mode.
+ */
+typedef uint64_t paddr_t;
+#define PRIpaddr "016"PRIx64
+
+#if CONFIG_PAGING_LEVELS > 0 /* Some form of pagetables. */
+
+#if CONFIG_PAGING_LEVELS == 2 /* PSE Paging */
+
+typedef pse_intpte_t intpte_t;
+#define PRIpte PSE_PRIpte
+
+static inline unsigned int l1_table_offset(unsigned long va)
+{
+ return pse_l1_table_offset(va);
+}
+static inline unsigned int l2_table_offset(unsigned long va)
+{
+ return pse_l2_table_offset(va);
+}
+
+#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
+
+typedef pae_intpte_t intpte_t;
+#define PRIpte PAE_PRIpte
+
+static inline unsigned int l1_table_offset(unsigned long va)
+{
+ return pae_l1_table_offset(va);
+}
+static inline unsigned int l2_table_offset(unsigned long va)
+{
+ return pae_l2_table_offset(va);
+}
+
+#endif /* !CONFIG_PAGING_LEVELS == 2 */
+
+#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
+
+static inline unsigned int l3_table_offset(unsigned long va)
+{
+ return pae_l3_table_offset(va);
+}
+
+#endif /* CONFIG_PAGING_LEVELS >= 3 */
+
+#if CONFIG_PAGING_LEVELS >= 4 /* PAE Paging */
+
+static inline unsigned int l4_table_offset(unsigned long va)
+{
+ return pae_l4_table_offset(va);
+}
+
+#endif /* CONFIG_PAGING_LEVELS >= 4 */
+
+#else /* CONFIG_PAGING_LEVELS > 0 */
+
+/* Enough compatibility to compile in unpaged environments. */
+typedef unsigned long intpte_t;
+#define PRIpte "08lx"
+
+#endif
+
+#ifdef CONFIG_HVM
+
+extern pae_intpte_t pae_l1_identmap[PAE_L1_PT_ENTRIES];
+extern pae_intpte_t pae_l2_identmap[4 * PAE_L2_PT_ENTRIES];
+extern pae_intpte_t pae_l3_identmap[PAE_L3_PT_ENTRIES];
+extern pae_intpte_t pae_l4_identmap[PAE_L4_PT_ENTRIES];
+extern pae_intpte_t pae32_l3_identmap[PAE32_L3_ENTRIES];
+
+extern pse_intpte_t pse_l1_identmap[PSE_L1_PT_ENTRIES];
+extern pse_intpte_t pse_l2_identmap[PSE_L2_PT_ENTRIES];
+
+/* Aliases of the live tables (PAE or PSE as appropriate). */
+extern intpte_t l1_identmap[L1_PT_ENTRIES];
+#if CONFIG_PAGING_LEVELS >= 3
+extern intpte_t l2_identmap[4 *L2_PT_ENTRIES];
+#else
+extern intpte_t l2_identmap[L2_PT_ENTRIES];
+#endif
+
+/* Alias of the pagetable %cr3 points at. */
+extern intpte_t cr3_target[];
+
+#endif /* CONFIG_HVM */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* XTF_X86_PAGE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_PAGETABLE_H
+#define XTF_X86_PAGETABLE_H
+
+#include <arch/mm.h>
+
+#if CONFIG_PAGING_LEVELS > 0
+
+static inline paddr_t pte_to_paddr(intpte_t pte)
+{
+ return pte & PADDR_MASK & PAGE_MASK;
+}
+
+static inline intpte_t pte_from_paddr(paddr_t paddr, uint64_t flags)
+{
+ return ((paddr & (PADDR_MASK & PAGE_MASK)) |
+ (flags & ~(PADDR_MASK & PAGE_MASK)));
+}
+
+static inline intpte_t pte_from_gfn(unsigned long gfn, uint64_t flags)
+{
+ return pte_from_paddr((paddr_t)gfn << PAGE_SHIFT, flags);
+}
+
+static inline intpte_t pte_from_virt(const void *va, uint64_t flags)
+{
+ return pte_from_paddr((paddr_t)virt_to_gfn(va) << PAGE_SHIFT, flags);
+}
+
+#else /* CONFIG_PAGING_LEVELS > 0 */
+
+/* Enough compatibility to compile in unpaged environments. */
+extern paddr_t pte_to_paddr(intpte_t pte);
+extern intpte_t pte_from_paddr(paddr_t paddr, uint64_t flags);
+extern intpte_t pte_from_gfn(unsigned long gfn, uint64_t flags);
+extern intpte_t pte_from_virt(const void *va, uint64_t flags);
+
+#endif
+
+#endif /* XTF_X86_PAGETABLE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_PROCESSOR_H
+#define XTF_X86_PROCESSOR_H
+
+/*
+ * EFLAGS bits.
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_MBS 0x00000002 /* Resvd bit */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * CPU flags in CR0.
+ */
+#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
+#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
+#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
+#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
+#define X86_CR0_ET 0x00000010 /* Extension type (RO) */
+#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
+#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
+#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
+#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
+#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
+#define X86_CR0_PG 0x80000000 /* Paging (RW) */
+
+/*
+ * CPU features in CR4.
+ */
+#define X86_CR4_VME 0x00000001 /* VM86 extensions */
+#define X86_CR4_PVI 0x00000002 /* Virtual interrupts flag */
+#define X86_CR4_TSD 0x00000004 /* Disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x00000008 /* Debugging extensions */
+#define X86_CR4_PSE 0x00000010 /* Page size extensions */
+#define X86_CR4_PAE 0x00000020 /* Physical address extensions */
+#define X86_CR4_MCE 0x00000040 /* Machine check */
+#define X86_CR4_PGE 0x00000080 /* Global pages */
+#define X86_CR4_PCE 0x00000100 /* Performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x00000200 /* Fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x00000400 /* Unmasked SSE exceptions */
+#define X86_CR4_UMIP 0x00000800 /* UMIP */
+#define X86_CR4_VMXE 0x00002000 /* VMX */
+#define X86_CR4_SMXE 0x00004000 /* SMX */
+#define X86_CR4_FSGSBASE 0x00010000 /* {rd,wr}{fs,gs}base */
+#define X86_CR4_PCIDE 0x00020000 /* PCID */
+#define X86_CR4_OSXSAVE 0x00040000 /* XSAVE/XRSTOR */
+#define X86_CR4_SMEP 0x00100000 /* SMEP */
+#define X86_CR4_SMAP 0x00200000 /* SMAP */
+
+/*
+ * DR6 status bits.
+ */
+#define X86_DR6_B0 (1u << 0) /* Breakpoint 0 triggered */
+#define X86_DR6_B1 (1u << 1) /* Breakpoint 1 triggered */
+#define X86_DR6_B2 (1u << 2) /* Breakpoint 2 triggered */
+#define X86_DR6_B3 (1u << 3) /* Breakpoint 3 triggered */
+#define X86_DR6_BD (1u << 13) /* Debug register accessed */
+#define X86_DR6_BS (1u << 14) /* Single step */
+#define X86_DR6_BT (1u << 15) /* Task switch */
+
+/*
+ * CPU features in XCR0.
+ */
+#define _XSTATE_FP 0
+#define XSTATE_FP (1ULL << _XSTATE_FP)
+#define _XSTATE_SSE 1
+#define XSTATE_SSE (1ULL << _XSTATE_SSE)
+#define _XSTATE_YMM 2
+#define XSTATE_YMM (1ULL << _XSTATE_YMM)
+#define _XSTATE_BNDREGS 3
+#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS)
+#define _XSTATE_BNDCSR 4
+#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR)
+#define _XSTATE_OPMASK 5
+#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK)
+#define _XSTATE_ZMM 6
+#define XSTATE_ZMM (1ULL << _XSTATE_ZMM)
+#define _XSTATE_HI_ZMM 7
+#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM)
+#define _XSTATE_PKRU 9
+#define XSTATE_PKRU (1ULL << _XSTATE_PKRU)
+#define _XSTATE_LWP 62
+#define XSTATE_LWP (1ULL << _XSTATE_LWP)
+
+/*
+ * Exception mnemonics.
+ */
+#define X86_EXC_DE 0 /* Divide Error. */
+#define X86_EXC_DB 1 /* Debug Exception. */
+#define X86_EXC_NMI 2 /* NMI. */
+#define X86_EXC_BP 3 /* Breakpoint. */
+#define X86_EXC_OF 4 /* Overflow. */
+#define X86_EXC_BR 5 /* BOUND Range. */
+#define X86_EXC_UD 6 /* Invalid Opcode. */
+#define X86_EXC_NM 7 /* Device Not Available. */
+#define X86_EXC_DF 8 /* Double Fault. */
+#define X86_EXC_CSO 9 /* Coprocessor Segment Overrun. */
+#define X86_EXC_TS 10 /* Invalid TSS. */
+#define X86_EXC_NP 11 /* Segment Not Present. */
+#define X86_EXC_SS 12 /* Stack-Segment Fault. */
+#define X86_EXC_GP 13 /* General Porection Fault. */
+#define X86_EXC_PF 14 /* Page Fault. */
+#define X86_EXC_SPV 15 /* PIC Spurious Interrupt Vector. */
+#define X86_EXC_MF 16 /* Maths fault (x87 FPU). */
+#define X86_EXC_AC 17 /* Alignment Check. */
+#define X86_EXC_MC 18 /* Machine Check. */
+#define X86_EXC_XM 19 /* SIMD Exception. */
+#define X86_EXC_VE 20 /* Virtualisation Exception. */
+
+/* Bitmap of exceptions which have error codes. */
+#define X86_EXC_HAVE_EC ((1 << X86_EXC_DF) | (1 << X86_EXC_TS) | \
+ (1 << X86_EXC_NP) | (1 << X86_EXC_SS) | \
+ (1 << X86_EXC_GP) | (1 << X86_EXC_PF) | \
+ (1 << X86_EXC_AC))
+
+/* Bitmap of exceptions which are classified as faults. */
+#define X86_EXC_FAULTS ((1 << X86_EXC_DE) | (1 << X86_EXC_BR) | \
+ (1 << X86_EXC_UD) | (1 << X86_EXC_NM) | \
+ (1 << X86_EXC_CSO) | (1 << X86_EXC_TS) | \
+ (1 << X86_EXC_NP) | (1 << X86_EXC_SS) | \
+ (1 << X86_EXC_GP) | (1 << X86_EXC_PF) | \
+ (1 << X86_EXC_MF) | (1 << X86_EXC_AC) | \
+ (1 << X86_EXC_XM) | (1 << X86_EXC_VE))
+
+/* Bitmap of exceptions which are classified as interrupts. */
+#define X86_EXC_INTERRUPTS (1 << X86_EXC_NMI)
+
+/* Bitmap of exceptions which are classified as traps. */
+#define X86_EXC_TRAPS ((1 << X86_EXC_BP) | (1 << X86_EXC_OF))
+
+/* Bitmap of exceptions which are classified as aborts. */
+#define X86_EXC_ABORTS ((1 << X86_EXC_DF) | (1 << X86_EXC_MC))
+
+/* Number of reserved vectors for exceptions. */
+#define X86_NR_RESERVED_VECTORS 32
+
+/*
+ * Error Code mnemonics.
+ */
+/* Segment-based Error Code - architecturally defined. */
+#define X86_EC_EXT (1U << 0) /* External event. */
+#define X86_EC_IDT (1U << 1) /* Descriptor Location. IDT, or LDT/GDT */
+#define X86_EC_TI (1U << 2) /* Only if !IDT. LDT or GDT. */
+
+/* Segment-based Error Code - supplemental constants. */
+#define X86_EC_TABLE_MASK (3 << 1)
+#define X86_EC_SEL_SHIFT 3
+#define X86_EC_SEL_MASK (~0U << X86_EC_SEL_SHIFT)
+#define X86_EC_GDT 0
+#define X86_EC_LDT X86_EC_TI
+
+/* Pagefault Error Code - architecturally defined. */
+#define X86_PFEC_PRESENT (1U << 0)
+#define X86_PFEC_WRITE (1U << 1)
+#define X86_PFEC_USER (1U << 2)
+#define X86_PFEC_RSVD (1U << 3)
+#define X86_PFEC_INSN (1U << 4)
+#define X86_PFEC_PK (1U << 5)
+
+/*
+ * Selector mnemonics.
+ */
+/* Architecturally defined. */
+#define X86_SEL_TI (1U << 2) /* Table Indicator. */
+
+/* Supplemental constants. */
+#define X86_SEL_RPL_MASK 3 /* RPL is the bottom two bits. */
+#define X86_SEL_GDT 0
+#define X86_SEL_LDT X86_SEL_TI
+
+#endif /* XTF_X86_PROCESSOR_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_REGS_H
+#define XTF_X86_REGS_H
+
+#include <xtf/types.h>
+
+#if defined(__i386__)
+
+#define DECL_REG(n) \
+ union { uint32_t e ## n; unsigned long n; }
+#define _DECL_REG(n) \
+ union { uint32_t _e ## n; unsigned long _ ## n; }
+
+struct cpu_regs {
+ DECL_REG(bp);
+ DECL_REG(bx);
+ DECL_REG(ax);
+ DECL_REG(cx);
+ DECL_REG(dx);
+ DECL_REG(si);
+ DECL_REG(di);
+
+ uint32_t entry_vector;
+ uint32_t error_code;
+
+/* Hardware exception frame. */
+ DECL_REG(ip);
+ uint16_t cs, _pad1[1];
+ DECL_REG(flags);
+ _DECL_REG(sp); /* Won't be valid if stack */
+ uint16_t _ss, _pad0[1]; /* switch didn't occur. */
+/* Top of stack. */
+};
+
+#elif defined(__x86_64__)
+
+#define DECL_REG(n) \
+ union { uint64_t r ## n; uint32_t e ## n; unsigned long n; }
+#define _DECL_REG(n) \
+ union { uint64_t _r ## n; uint32_t _e ## n; unsigned long _ ## n; }
+
+struct cpu_regs {
+ uint64_t r15;
+ uint64_t r14;
+ uint64_t r13;
+ uint64_t r12;
+ DECL_REG(bp);
+ DECL_REG(bx);
+ uint64_t r11;
+ uint64_t r10;
+ uint64_t r9;
+ uint64_t r8;
+ DECL_REG(ax);
+ DECL_REG(cx);
+ DECL_REG(dx);
+ DECL_REG(si);
+ DECL_REG(di);
+
+ uint32_t error_code;
+ uint32_t entry_vector;
+
+/* Hardware exception frame. */
+ DECL_REG(ip);
+ uint16_t cs, _pad1[3];
+ DECL_REG(flags);
+ _DECL_REG(sp);
+ uint16_t _ss, _pad0[3];
+/* Top of stack. */
+};
+
+#endif /* __i386__ / __x86_64__ */
+
+#endif /* XTF_X86_REGS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_SEGMENT_H
+#define XTF_X86_SEGMENT_H
+
+#include <xen/arch-x86/xen.h>
+
+/*
+ * GDT layout:
+ *
+ * For simplicitly, the gdt is shared as much as possible between different
+ * environments.
+ *
+ * 0 - null
+ * 1 - 64bit supervisor code
+ * 2 - 32bit supervisor code
+ * 3 - 32bit supervisor data
+ * 4 - 64bit userspace code
+ * 5 - 32bit userspace code
+ * 6 - 32bit userspace data
+ * 7/8 - TSS (two slots in long mode)
+ * 8 - DF TSS (32bit only)
+ *
+ * 9-12 - Available for test use
+ */
+
+#define GDTE_CS64_DPL0 1
+#define GDTE_CS32_DPL0 2
+#define GDTE_DS32_DPL0 3
+#define GDTE_CS64_DPL3 4
+#define GDTE_CS32_DPL3 5
+#define GDTE_DS32_DPL3 6
+
+#define GDTE_TSS 7
+#define GDTE_TSS_DF 8
+
+#define GDTE_AVAIL0 9
+#define GDTE_AVAIL1 10
+#define GDTE_AVAIL2 11
+#define GDTE_AVAIL3 12
+
+#define NR_GDT_ENTRIES 13
+
+/*
+ * HVM guests use the GDT directly.
+ */
+#if defined(CONFIG_HVM)
+
+#ifdef __x86_64__
+
+#define __KERN_CS (GDTE_CS64_DPL0 * 8)
+#define __KERN_DS (0)
+#define __USER_CS (GDTE_CS64_DPL3 * 8 + 3)
+#define __USER_DS (GDTE_DS32_DPL3 * 8 + 3)
+
+#else /* __x86_64__ */
+
+#define __KERN_CS (GDTE_CS32_DPL0 * 8)
+#define __KERN_DS (GDTE_DS32_DPL0 * 8)
+#define __USER_CS (GDTE_CS32_DPL3 * 8 + 3)
+#define __USER_DS (GDTE_DS32_DPL3 * 8 + 3)
+
+#endif /* __x86_64__ */
+
+#endif /* CONFIG_HVM */
+
+/*
+ * PV guests by default use the Xen ABI-provided selectors.
+ */
+#if defined(CONFIG_PV)
+
+#ifdef __x86_64__
+/*
+ * 64bit PV guest kernels run in cpl3, but exception frames generated by Xen
+ * report cpl0 when interrupting kernel mode. Trim the kernel selectors down
+ * to rpl0 so they match the exception frames; Xen will take care of bumping
+ * rpl back to 3 when required.
+ *
+ * In Long mode, it is permitted to have NULL selectors for the plain data
+ * segment selectors (this is expressed in the Xen ABI), but not for %ss. As
+ * __{KERN,USER}_DS are used for all data selectors including %ss, use the
+ * FLAT_RING3_SS64 rather than FLAT_RING3_DS64.
+ */
+#define __KERN_CS (FLAT_RING3_CS64 & ~3)
+#define __KERN_DS (FLAT_RING3_SS64 & ~3)
+#define __USER_CS FLAT_RING3_CS64
+#define __USER_DS FLAT_RING3_SS64
+
+#else /* __x86_64__ */
+
+#define __KERN_CS FLAT_RING1_CS
+#define __KERN_DS FLAT_RING1_DS
+#define __USER_CS FLAT_RING3_CS
+#define __USER_DS FLAT_RING3_DS
+
+#endif /* __x86_64__ */
+
+#endif /* CONFIG_PV */
+
+#endif /* XTF_X86_SEGMENT_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/**
+ * @file arch/x86/include/arch/symbolic-const.h
+ *
+ * Macros for creating constants using mnemonics.
+ */
+#ifndef XTF_X86_SYMBOLIC_CONST_H
+#define XTF_X86_SYMBOLIC_CONST_H
+
+#include <xtf/macro_magic.h>
+
+#include <arch/desc.h>
+#include <arch/processor.h>
+
+/**
+ * Tokenise and OR together.
+ *
+ * For each varadic parameter, tokenise with 't' and OR together.
+ *
+ * @param t Common stem partial token.
+ * @param ... Partial tokens.
+ *
+ * Example:
+ * <pre>
+ * TOK_OR(t, x, y) => (t ## x | t ## y)
+ * TOK_OR(t, x, y, z) => (t ## x | t ## y | t ## z)
+ * </pre>
+ */
+/** @cond */
+#define TOK_OR0(t) (0)
+#define TOK_OR1(t, x) (t ## x)
+#define TOK_OR2(t, x, ...) (t ## x | TOK_OR1(t, ##__VA_ARGS__))
+#define TOK_OR3(t, x, ...) (t ## x | TOK_OR2(t, ##__VA_ARGS__))
+#define TOK_OR4(t, x, ...) (t ## x | TOK_OR3(t, ##__VA_ARGS__))
+#define TOK_OR5(t, x, ...) (t ## x | TOK_OR4(t, ##__VA_ARGS__))
+#define TOK_OR6(t, x, ...) (t ## x | TOK_OR5(t, ##__VA_ARGS__))
+#define TOK_OR7(t, x, ...) (t ## x | TOK_OR6(t, ##__VA_ARGS__))
+#define TOK_OR8(t, x, ...) (t ## x | TOK_OR7(t, ##__VA_ARGS__))
+#define TOK_OR9(t, x, ...) (t ## x | TOK_OR8(t, ##__VA_ARGS__))
+#define TOK_OR10(t, x, ...) (t ## x | TOK_OR9(t, ##__VA_ARGS__))
+#define TOK_OR11(t, x, ...) (t ## x | TOK_OR10(t, ##__VA_ARGS__))
+/** @endcond */
+#define TOK_OR(t, ...) VAR_MACRO_C1(TOK_OR, t, ##__VA_ARGS__)
+
+/**
+ * Initialise an LDT/GDT entry using SEG_ATTR_ mnemonics.
+ *
+ * @param base Segment base.
+ * @param limit Segment limit.
+ * @param ... Partial SEG_ATTR_ tokens for attributes.
+ *
+ * Example usage:
+ * - INIT_GDTE_SYM(0, 0xfffff, P)
+ * - uses @ref SEG_ATTR_P
+ *
+ * - INIT_GDTE_SYM(0, 0xfffff, CODE, L)
+ * - uses @ref SEG_ATTR_CODE and @ref SEG_ATTR_L
+ */
+#define INIT_GDTE_SYM(base, limit, ...) \
+ INIT_GDTE(base, limit, TOK_OR(SEG_ATTR_, ##__VA_ARGS__))
+
+/**
+ * Create a selector based error code using X86_EC_ mnemonics.
+ *
+ * @param sel Selector value.
+ * @param ... Partial X86_EC_ tokens.
+ *
+ * Example usage:
+ * - SEL_EC_SYM(0, GDT)
+ * - Uses @ref X86_EC_GDT.
+ *
+ * - SEL_EC_SYM(0, IDT, EXT)
+ * - Uses @ref X86_EC_IDT and @ref X86_EC_EXT.
+ */
+#define SEL_EC_SYM(sel, ...) (sel | TOK_OR(X86_EC_, ##__VA_ARGS__))
+
+/**
+ * Create an exception selector based error code using mnemonics, with
+ * implicit @ref X86_EC_IDT.
+ *
+ * @param exc Partial X86_EXC_ token for selector.
+ * @param ... Partial X86_EC_ tokens.
+ *
+ * Example usage:
+ * - EXC_EC_SYM(DE)
+ * - Uses @ref X86_EXC_DE and @ref X86_EC_IDT.
+ *
+ * - EXC_EC_SYM(DB, EXT)
+ * - Uses @ref X86_EXC_DB, @ref X86_EC_IDT and @ref X86_EC_EXT.
+ */
+#define EXC_EC_SYM(exc, ...) \
+ SEL_EC_SYM(((X86_EXC_ ## exc) << 3), IDT, ##__VA_ARGS__)
+
+/**
+ * Create pagetable entry flags based on mnemonics.
+ *
+ * @param ... Partial _PAGE_ tokens.
+ *
+ * Example usage:
+ * - PF_SYM(AD, U, RW, P)
+ * - Accessed, Dirty, User, Writeable, Present.
+ */
+#define PF_SYM(...) TOK_OR(_PAGE_, ##__VA_ARGS__)
+
+#endif /* XTF_X86_SYMBOLIC_CONST_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_TRAPS_H
+#define XTF_X86_TRAPS_H
+
+#include <xtf/compiler.h>
+#include <arch/regs.h>
+#include <arch/page.h>
+
+/*
+ * Arch-specific function to initialise the exception entry points, etc.
+ */
+void arch_init_traps(void);
+
+/*
+ * Arch-specific function to quiesce the domain, in the event that a
+ * shutdown(crash) hypercall has not succeeded.
+ */
+void __noreturn arch_crash_hard(void);
+
+/*
+ * Return the correct %ss/%esp from an exception. In 32bit if no stack switch
+ * occurs, an exception frame doesn't contain this information.
+ */
+unsigned long cpu_regs_sp(const struct cpu_regs *regs);
+unsigned int cpu_regs_ss(const struct cpu_regs *regs);
+
+extern uint8_t boot_stack[3 * PAGE_SIZE];
+
+#if defined(CONFIG_PV)
+#include <xen/xen.h>
+
+extern struct start_info *start_info;
+#endif
+
+#endif /* XTF_X86_TRAPS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_32_HYPERCALL_H
+#define XTF_X86_32_HYPERCALL_H
+
+/*
+ * Hypercall primatives for 32bit
+ *
+ * Inputs: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6)
+ */
+
+#define _hypercall32_1(type, hcall, a1) \
+ ({ \
+ long __res, __ign1; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=b" (__ign1) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#define _hypercall32_2(type, hcall, a1, a2) \
+ ({ \
+ long __res, __ign1, __ign2; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)), "2" ((long)(a2)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#define _hypercall32_3(type, hcall, a1, a2, a3) \
+ ({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), "=d" (__ign3) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#define _hypercall32_4(type, hcall, a1, a2, a3, a4) \
+ ({ \
+ long __res, __ign1, __ign2, __ign3, __ign4; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), "=d" (__ign3),\
+ "=S" (__ign4) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \
+ "4" ((long)(a4)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#endif /* XTF_X86_32_HYPERCALL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_64_HYPERCALL_H
+#define XTF_X86_64_HYPERCALL_H
+
+/*
+ * Hypercall primatives for 64bit
+ *
+ * Inputs: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6)
+ */
+
+#define _hypercall64_1(type, hcall, a1) \
+ ({ \
+ long __res, __ign1; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=D" (__ign1) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#define _hypercall64_2(type, hcall, a1, a2) \
+ ({ \
+ long __res, __ign1, __ign2; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)), "2" ((long)(a2)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#define _hypercall64_3(type, hcall, a1, a2, a3) \
+ ({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), "=d" (__ign3) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#define _hypercall64_4(type, hcall, a1, a2, a3, a4) \
+ ({ \
+ long __res, __ign1, __ign2, __ign3, __ign4; \
+ register long _a4 asm ("r10") = ((long)(a4)); \
+ asm volatile ( \
+ "call hypercall_page + %c[offset]" \
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), "=d" (__ign3),\
+ "=&r" (__ign4) \
+ : [offset] "i" (hcall * 32), \
+ "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \
+ "4" (_a4) \
+ : "memory" ); \
+ (type)__res; \
+ })
+
+#endif /* XTF_X86_64_HYPERCALL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef XTF_X86_XTF_H
+#define XTF_X86_XTF_H
+
+#include <arch/cpuid.h>
+#include <arch/lib.h>
+
+extern char _end[];
+
+#endif /* XTF_X86_XTF_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
*/
#include <xtf/numbers.h>
-#include <arch/x86/page.h>
+#include <arch/page.h>
/* Don't clobber the ld directive */
#undef i386
#include <xtf/hypercall.h>
#include <xtf/test.h>
-#include <arch/x86/idt.h>
-#include <arch/x86/lib.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/segment.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/idt.h>
+#include <arch/lib.h>
+#include <arch/processor.h>
+#include <arch/segment.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
/* Real entry points */
void entry_DE(void);
#include <xtf/extable.h>
#include <xtf/report.h>
-#include <arch/x86/cpuid.h>
-#include <arch/x86/desc.h>
-#include <arch/x86/lib.h>
-#include <arch/x86/mm.h>
-#include <arch/x86/traps.h>
+#include <arch/cpuid.h>
+#include <arch/desc.h>
+#include <arch/lib.h>
+#include <arch/mm.h>
+#include <arch/traps.h>
/*
* XTF Stack layout:
#include <xtf/traps.h>
#include <xtf/exlog.h>
-#include <arch/x86/decode.h>
-#include <arch/x86/lib.h>
-#include <arch/x86/processor.h>
+#include <arch/decode.h>
+#include <arch/lib.h>
+#include <arch/processor.h>
bool (*xtf_unhandled_exception_hook)(struct cpu_regs *regs);
hvm32pse_arch := x86_32
hvm32_arch := x86_32
-COMMON_FLAGS := -pipe -I$(ROOT)/include -MMD -MP
+COMMON_FLAGS := -pipe -I$(ROOT)/include -I$(ROOT)/arch/x86/include -MMD -MP
COMMON_AFLAGS := $(COMMON_FLAGS) -D__ASSEMBLY__
COMMON_CFLAGS := $(COMMON_FLAGS) -Wall -Wextra -Werror -std=gnu99 -Wstrict-prototypes -O3 -g
AFLAGS_$($(1)_arch) := $$(COMMON_AFLAGS) $$(COMMON_AFLAGS-$($(1)_arch))
CFLAGS_$($(1)_arch) := $$(COMMON_CFLAGS) $$(COMMON_CFLAGS-$($(1)_arch))
-AFLAGS_$(1) := $$(AFLAGS_$($(1)_arch)) $$(COMMON_AFLAGS-$(1)) -DCONFIG_ENV_$(1) -include arch/x86/config.h
-CFLAGS_$(1) := $$(CFLAGS_$($(1)_arch)) $$(COMMON_CFLAGS-$(1)) -DCONFIG_ENV_$(1) -include arch/x86/config.h
+AFLAGS_$(1) := $$(AFLAGS_$($(1)_arch)) $$(COMMON_AFLAGS-$(1)) -DCONFIG_ENV_$(1) -include arch/config.h
+CFLAGS_$(1) := $$(CFLAGS_$($(1)_arch)) $$(COMMON_CFLAGS-$(1)) -DCONFIG_ENV_$(1) -include arch/config.h
link-$(1) := $(ROOT)/arch/x86/link-$(1).lds
-#include <arch/x86/div.h>
+#include <arch/div.h>
#include <xtf/libc.h>
#include <xtf/compiler.h>
+++ /dev/null
-/**
- * @file include/arch/x86/asm_macros.h
- *
- * Macros for use in x86 assembly files.
- */
-#ifndef XTF_X86_ASM_MACROS_H
-#define XTF_X86_ASM_MACROS_H
-
-#ifdef __ASSEMBLY__
-/* Declare data at the architectures width. */
-# if defined(__x86_64__)
-# define _WORD .quad
-# elif defined(__i386__)
-# define _WORD .long
-# endif
-#else
-# if defined(__x86_64__)
-# define _WORD ".quad "
-# elif defined(__i386__)
-# define _WORD ".long "
-# endif
-#endif
-
-#ifdef __ASSEMBLY__
-
-.macro SAVE_ALL
- cld
-#if defined(__x86_64__)
- push %rdi
- push %rsi
- push %rdx
- push %rcx
- push %rax
- push %r8
- push %r9
- push %r10
- push %r11
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
-#elif defined(__i386__)
- push %edi
- push %esi
- push %edx
- push %ecx
- push %eax
- push %ebx
- push %ebp
-#else
-# error Bad architecture for SAVE_ALL
-#endif
-.endm
-
-.macro RESTORE_ALL
-#if defined(__x86_64__)
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %r11
- pop %r10
- pop %r9
- pop %r8
- pop %rax
- pop %rcx
- pop %rdx
- pop %rsi
- pop %rdi
-#elif defined(__i386__)
- pop %ebp
- pop %ebx
- pop %eax
- pop %ecx
- pop %edx
- pop %esi
- pop %edi
-#else
-# error Bad architecture for RESTORE_ALL
-#endif
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* XTF_X86_ASM_MACROS_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_BARRIER_H
-#define XTF_X86_BARRIER_H
-
-/*
- * Memory barriers for x86 systems
- *
- * See Linux: Documentation/memory-barriers.txt for a very detailed
- * description of the problems and their implications.
- *
- * Under Xen, we rely on the fact that only x86_64 cpus are supported, which
- * guarantees that the {m,l,s}fence instructions are supported (SSE2 being a
- * requirement of 64bit).
- *
- * x86 memory ordering requirements make the smp_???() variants easy. From
- * the point of view of program order, reads may not be reordered with respect
- * to other reads, and writes may not be reordered with respect to other
- * writes, causing smp_rmb() and smp_wmb() to degrade to simple compiler
- * barriers. smp_mb() however does need to be an mfence instruction, as reads
- * are permitted to be reordered ahead of non-aliasing writes.
- */
-
-#include <xtf/compiler.h>
-
-#define mb() __asm__ __volatile__ ("mfence" ::: "memory")
-#define rmb() __asm__ __volatile__ ("lfence" ::: "memory")
-#define wmb() __asm__ __volatile__ ("sfence" ::: "memory")
-
-#define smp_mb() mb()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-
-#endif /* XTF_X86_BARRIER_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/config.h
- * Logic to split an environment into finer-grain @#define's
- *
- * Converts `CONFIG_ENV_$foo` into:
- * - `CONFIG_PV` or `CONFIG_HVM`
- * - `CONFIG_PAGING_LEVELS = $num`
- *
- * The `CONFIG_ENV_$foo` is then undefined, to prevent its use in general code.
- */
-#ifndef XTF_X86_CONFIG_H
-#define XTF_X86_CONFIG_H
-
-#include <xtf/macro_magic.h>
-
-#if defined(CONFIG_ENV_pv64)
-
-#define CONFIG_PV 1
-#define CONFIG_64BIT 1
-#define CONFIG_PAGING_LEVELS 4
-#define ENVIRONMENT_DESCRIPTION "PV 64bit (Long mode 4 levels)"
-
-#undef CONFIG_ENV_pv64
-
-#elif defined(CONFIG_ENV_pv32pae)
-
-#define CONFIG_PV 1
-#define CONFIG_32BIT 1
-#define CONFIG_PAGING_LEVELS 3
-#define ENVIRONMENT_DESCRIPTION "PV 32bit (PAE 3 levels)"
-
-#undef CONFIG_ENV_pv32pae
-
-#elif defined(CONFIG_ENV_hvm64)
-
-#define CONFIG_HVM 1
-#define CONFIG_64BIT 1
-#define CONFIG_PAGING_LEVELS 4
-#define ENVIRONMENT_DESCRIPTION "HVM 64bit (Long mode 4 levels)"
-
-#undef CONFIG_ENV_hvm64
-
-#elif defined(CONFIG_ENV_hvm32pae)
-
-#define CONFIG_HVM 1
-#define CONFIG_32BIT 1
-#define CONFIG_PAGING_LEVELS 3
-#define ENVIRONMENT_DESCRIPTION "HVM 32bit (PAE 3 levels)"
-
-#undef CONFIG_ENV_hvm32pae
-
-#elif defined(CONFIG_ENV_hvm32pse)
-
-#define CONFIG_HVM 1
-#define CONFIG_32BIT 1
-#define CONFIG_PAGING_LEVELS 2
-#define ENVIRONMENT_DESCRIPTION "HVM 32bit (PSE 2 levels)"
-
-#undef CONFIG_ENV_hvm32pse
-
-#elif defined(CONFIG_ENV_hvm32)
-
-#define CONFIG_HVM 1
-#define CONFIG_32BIT 1
-#define CONFIG_PAGING_LEVELS 0
-#define ENVIRONMENT_DESCRIPTION "HVM 32bit (No paging)"
-
-#undef CONFIG_ENV_hvm32
-
-#else
-# error Bad environment
-#endif
-
-#endif /* XTF_X86_CONFIG_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_CPUID_H
-#define XTF_X86_CPUID_H
-
-#include <xtf/types.h>
-#include <xtf/numbers.h>
-
-#include <xen/arch-x86/cpufeatureset.h>
-
-typedef void (*cpuid_fn_t)(uint32_t leaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx);
-typedef void (*cpuid_count_fn_t)(uint32_t leaf, uint32_t subleaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx);
-
-enum x86_vendor
-{
- X86_VENDOR_UNKNOWN,
- X86_VENDOR_INTEL,
- X86_VENDOR_AMD,
-};
-
-extern enum x86_vendor x86_vendor;
-extern unsigned int x86_family, x86_model, x86_stepping;
-extern unsigned int maxphysaddr, maxvirtaddr;
-
-static inline bool vendor_is(enum x86_vendor v)
-{
- return x86_vendor == v;
-}
-
-#define vendor_is_intel vendor_is(X86_VENDOR_INTEL)
-#define vendor_is_amd vendor_is(X86_VENDOR_AMD)
-
-
-#define cpufeat_word(idx) ((idx) / 32)
-#define cpufeat_bit(idx) ((idx) % 32)
-#define cpufeat_mask(idx) (_AC(1, U) << cpufeat_bit(idx))
-
-#define FEATURESET_1d cpufeat_word(X86_FEATURE_FPU)
-#define FEATURESET_1c cpufeat_word(X86_FEATURE_SSE3)
-#define FEATURESET_e1d cpufeat_word(X86_FEATURE_SYSCALL)
-#define FEATURESET_e1c cpufeat_word(X86_FEATURE_LAHF_LM)
-#define FEATURESET_Da1 cpufeat_word(X86_FEATURE_XSAVEOPT)
-#define FEATURESET_7b0 cpufeat_word(X86_FEATURE_FSGSBASE)
-#define FEATURESET_7c0 cpufeat_word(X86_FEATURE_PREFETCHWT1)
-#define FEATURESET_e7d cpufeat_word(X86_FEATURE_ITSC)
-#define FEATURESET_e8b cpufeat_word(X86_FEATURE_CLZERO)
-
-#define FSCAPINTS (FEATURESET_e8b + 1)
-
-extern uint32_t x86_features[FSCAPINTS];
-
-static inline bool cpu_has(unsigned int feature)
-{
- return x86_features[cpufeat_word(feature)] & cpufeat_mask(feature);
-}
-
-#define cpu_has_fpu cpu_has(X86_FEATURE_FPU)
-#define cpu_has_vme cpu_has(X86_FEATURE_VME)
-#define cpu_has_de cpu_has(X86_FEATURE_DE)
-#define cpu_has_pse cpu_has(X86_FEATURE_PSE)
-#define cpu_has_tsc cpu_has(X86_FEATURE_TSC)
-#define cpu_has_pae cpu_has(X86_FEATURE_PAE)
-#define cpu_has_mce cpu_has(X86_FEATURE_MCE)
-#define cpu_has_pge cpu_has(X86_FEATURE_PGE)
-#define cpu_has_mca cpu_has(X86_FEATURE_MCA)
-#define cpu_has_pat cpu_has(X86_FEATURE_PAT)
-#define cpu_has_pse36 cpu_has(X86_FEATURE_PSE36)
-#define cpu_has_mmx cpu_has(X86_FEATURE_MMX)
-#define cpu_has_fxsr cpu_has(X86_FEATURE_FXSR)
-
-#define cpu_has_sse cpu_has(X86_FEATURE_SSE)
-#define cpu_has_sse2 cpu_has(X86_FEATURE_SSE2)
-#define cpu_has_vmx cpu_has(X86_FEATURE_VMX)
-#define cpu_has_smx cpu_has(X86_FEATURE_SMX)
-#define cpu_has_pcid cpu_has(X86_FEATURE_PCID)
-#define cpu_has_xsave cpu_has(X86_FEATURE_XSAVE)
-#define cpu_has_avx cpu_has(X86_FEATURE_AVX)
-
-#define cpu_has_syscall cpu_has(X86_FEATURE_SYSCALL)
-#define cpu_has_nx cpu_has(X86_FEATURE_NX)
-#define cpu_has_page1gb cpu_has(X86_FEATURE_PAGE1GB)
-#define cpu_has_lm cpu_has(X86_FEATURE_LM)
-
-#define cpu_has_fsgsbase cpu_has(X86_FEATURE_FSGSBASE)
-#define cpu_has_smep cpu_has(X86_FEATURE_SMEP)
-#define cpu_has_smap cpu_has(X86_FEATURE_SMAP)
-
-#define cpu_has_umip cpu_has(X86_FEATURE_UMIP)
-#define cpu_has_pku cpu_has(X86_FEATURE_PKU)
-
-#endif /* XTF_X86_CPUID_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/decode.h
- *
- * Helper routines for decoding x86 state.
- */
-#ifndef XTF_X86_DECODE_H
-#define XTF_X86_DECODE_H
-
-#include <xtf/types.h>
-
-#include <arch/x86/cpuid.h>
-#include <arch/x86/exinfo.h>
-
-/**
- * String of the indentified vendor @p v.
- *
- * @param v Vendor.
- * @return String.
- */
-const char *x86_vendor_name(enum x86_vendor v);
-
-/**
- * String abbreviation of @p ev.
- *
- * @param ev Entry Vector.
- * @return String abbreviation.
- */
-const char *x86_exc_short_name(unsigned int ev);
-
-/**
- * Decodes an x86 error code into a readable form.
- *
- * @param buf Buffer to fill.
- * @param bufsz Size of @p buf.
- * @param ev Entry Vector.
- * @param ec Error Code.
- * @return snprintf(buf, bufsz, ...)
- */
-int x86_exc_decode_ec(char *buf, size_t bufsz,
- unsigned int ev, unsigned int ec);
-
-/**
- * Decodes an exinfo_t into a readable form.
- *
- * @param buf Buffer to fill.
- * @param bufsz Size of @p buf.
- * @param info exinfo_t value.
- * @return snprintf(buf, bufsz, ...)
- */
-int x86_decode_exinfo(char *buf, size_t bufsz, exinfo_t info);
-
-#endif /* XTF_X86_DECODE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/desc.h
- *
- * %x86 segment descriptor infrastructure.
- */
-
-#ifndef XTF_X86_DESC_H
-#define XTF_X86_DESC_H
-
-#include <xtf/types.h>
-#include <xtf/compiler.h>
-
-#include <arch/x86/segment.h>
-
-/** 8 byte user segment descriptor (GDT/LDT entries with .s = 1) */
-struct __packed seg_desc32 {
- union {
- /** Raw backing integers. */
- struct {
- uint32_t lo, hi;
- };
- /** Common named fields. */
- struct {
- uint16_t limit0;
- uint16_t base0;
- uint8_t base1;
- unsigned type: 4;
- unsigned s: 1, dpl: 2, p: 1;
- unsigned limit: 4;
- unsigned avl: 1, l: 1, d: 1, g: 1;
- uint8_t base2;
- };
- /** Code segment specific field names. */
- struct {
- uint16_t limit0;
- uint16_t base0;
- uint8_t base1;
- unsigned a: 1, r: 1, c: 1, x: 1;
- unsigned s: 1, dpl: 2, p: 1;
- unsigned limit: 4;
- unsigned avl: 1, l: 1, d: 1, g: 1;
- uint8_t base2;
- } code;
- /** Data segment specific field names. */
- struct {
- uint16_t limit0;
- uint16_t base0;
- uint8_t base1;
- unsigned a: 1, w: 1, e: 1, x: 1;
- unsigned s: 1, dpl: 2, p: 1;
- unsigned limit: 4;
- unsigned avl: 1, _r0: 1, b: 1, g: 1;
- uint8_t base2;
- } data;
- };
-};
-
-/** 8-byte gate - Protected mode IDT entry, GDT task/call gate. */
-struct __packed seg_gate32 {
- union {
- struct {
- uint32_t lo, hi;
- };
- struct {
- uint16_t offset0;
- uint16_t selector;
- uint8_t _r0;
- unsigned type: 4, s: 1, dpl: 2, p: 1;
- uint16_t offset1;
- };
- };
-};
-
-/** 16-byte gate - Long mode IDT entry. */
-struct __packed seg_gate64 {
- union {
- struct {
- uint64_t lo, hi;
- };
- struct {
- uint16_t offset0;
- uint16_t selector;
- unsigned ist: 3, _r0: 5, type: 4, s: 1, dpl: 2, p: 1;
- uint16_t offset1;
- uint32_t offset2;
- uint32_t _r1;
- };
- };
-};
-
-/* GDT/LDT attribute flags for user segments */
-
-/* Common */
-#define SEG_ATTR_G 0x8000 /**< Granularity of limit (0 = 1, 1 = 4K) */
-#define SEG_ATTR_AVL 0x1000 /**< Available for software use */
-#define SEG_ATTR_P 0x0080 /**< Present? */
-#define SEG_ATTR_S 0x0010 /**< !System desc (0 = system, 1 = user) */
-#define SEG_ATTR_A 0x0001 /**< Accessed? (set by hardware) */
-
-#define SEG_ATTR_COMMON 0x8091 /**< Commonly set bits (G P S A) */
-
-#define SEG_ATTR_DPL0 0x0000 /**< Descriptor privilege level 0 */
-#define SEG_ATTR_DPL1 0x0020 /**< Descriptor privilege level 1 */
-#define SEG_ATTR_DPL2 0x0040 /**< Descriptor privilege level 2 */
-#define SEG_ATTR_DPL3 0x0060 /**< Descriptor privilege level 3 */
-#define SEG_ATTR_CODE 0x0008 /**< Type (0 = data, 1 = code) */
-#define SEG_ATTR_DATA 0x0000 /**< Type (0 = data, 1 = code) */
-
-/* Code segments */
-#define SEG_ATTR_D 0x4000 /**< Default operand size (0 = 16bit, 1 = 32bit) */
-#define SEG_ATTR_L 0x2000 /**< Long segment? (1 = 64bit) */
-#define SEG_ATTR_C 0x0004 /**< Conforming? (0 = non, 1 = conforming) */
-#define SEG_ATTR_R 0x0002 /**< Readable? (0 = XO seg, 1 = RX seg) */
-
-/* Data segments */
-#define SEG_ATTR_B 0x4000 /**< 'Big' flag.
- * - For %ss, default operand size.
- * - For expand-down segment, sets upper bound. */
-#define SEG_ATTR_E 0x0004 /**< Expand-down? (0 = normal, 1 = expand-down) */
-#define SEG_ATTR_W 0x0002 /**< Writable? (0 = RO seg, 1 = RW seg) */
-
-/**
- * Initialise an LDT/GDT entry using a raw attribute number.
- *
- * @param base Segment base.
- * @param limit Segment limit.
- * @param attr Segment attributes.
- */
-#define INIT_GDTE(base, limit, attr) { { { \
- .lo = (((base) & 0xffff) << 16) | ((limit) & 0xffff), \
- .hi = ((base) & 0xff000000) | ((limit) & 0xf0000) | \
- (((attr) & 0xf0ff) << 8) | (((base) & 0xff0000) >> 16) \
- } } }
-
-/** Long mode lgdt/lidt table pointer. */
-struct __packed desc_ptr64 {
- uint16_t limit;
- uint64_t base;
-};
-
-/** Protected mode lgdt/lidt table pointer. */
-struct __packed desc_ptr32 {
- uint16_t limit;
- uint32_t base;
-};
-
-struct __packed hw_tss32 {
- uint16_t link; uint16_t _r0;
-
- uint32_t esp0;
- uint16_t ss0; uint16_t _r1;
-
- uint32_t esp1;
- uint16_t ss1; uint16_t _r2;
-
- uint32_t esp2;
- uint16_t ss2; uint16_t _r3;
-
- uint32_t cr3;
- uint32_t eip;
- uint32_t eflags;
- uint32_t eax;
- uint32_t ecx;
- uint32_t edx;
- uint32_t ebx;
- uint32_t esp;
- uint32_t ebp;
- uint32_t esi;
- uint32_t edi;
-
- uint16_t es; uint16_t _r4;
- uint16_t cs; uint16_t _r5;
- uint16_t ss; uint16_t _r6;
- uint16_t ds; uint16_t _r7;
- uint16_t fs; uint16_t _r8;
- uint16_t gs; uint16_t _r9;
- uint16_t ldtr; uint16_t _r10;
- uint16_t t; uint16_t iopb;
-};
-
-struct __packed hw_tss64 {
- uint16_t link; uint16_t _r0;
-
- uint64_t rsp0;
- uint64_t rsp1;
- uint64_t rsp2;
-
- uint64_t _r1;
-
- uint64_t ist[7]; /* 1-based structure */
-
- uint64_t _r2;
-
- uint16_t t;
- uint16_t iopb;
-};
-
-#define X86_TSS_INVALID_IO_BITMAP 0x8000
-
-#if defined(__x86_64__)
-
-typedef struct desc_ptr64 desc_ptr;
-typedef struct seg_desc32 user_desc;
-typedef struct seg_gate64 gate_desc;
-typedef struct hw_tss64 hw_tss;
-
-#elif defined(__i386__)
-
-typedef struct desc_ptr32 desc_ptr;
-typedef struct seg_desc32 user_desc;
-typedef struct seg_gate32 gate_desc;
-typedef struct hw_tss32 hw_tss;
-
-#else
-# error Bad architecture for descriptor infrastructure
-#endif
-
-extern user_desc gdt[NR_GDT_ENTRIES];
-extern desc_ptr gdt_ptr;
-
-#if defined(CONFIG_HVM)
-extern gate_desc idt[256];
-extern desc_ptr idt_ptr;
-
-extern hw_tss tss;
-#endif
-
-#endif /* XTF_X86_DESC_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_DIV_H
-#define XTF_X86_DIV_H
-
-#include <xtf/types.h>
-
-/*
- * Divide a 64bit number by 32bit divisor without software support.
- *
- * The dividend is modified in place, and the modulus is returned.
- */
-static inline uint32_t divmod64(uint64_t *dividend, uint32_t divisor)
-{
- uint32_t mod;
-
-#ifdef __x86_64__
-
- /*
- * On 64bit, issue a straight 'div' instruction.
- */
-
- mod = *dividend % divisor;
- *dividend /= divisor;
-#else
- {
- /*
- * On 32bit, this is harder.
- *
- * In x86, 'divl' can take a 64bit dividend, but the resulting
- * quotient must fit in %eax or a #DE will occur.
- *
- * To avoid this, we split the division in two. The remainder from
- * the higher divide can safely be used in the upper 32bits of the
- * lower divide, as it will not cause an overflow.
- */
- uint32_t high = *dividend >> 32, low = *dividend, umod = 0;
-
- if ( high )
- {
- umod = high % divisor;
- high /= divisor;
- }
-
- asm ("divl %2"
- : "=a" (low), "=d" (mod)
- : "rm" (divisor), "0" (low), "1" (umod));
-
- *dividend = (((uint64_t)high) << 32) | low;
- }
-#endif
-
- return mod;
-}
-
-#endif /* XTF_X86_DIV_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/exinfo.h
- *
- * An encapsulation of an x86 exception with error code
- */
-#ifndef XTF_X86_EXINFO_H
-#define XTF_X86_EXINFO_H
-
-#include <arch/x86/processor.h>
-
-/**
- * Packed exception and error code information
- *
- * - Bottom 16 bits are error code
- * - Next 8 bits are the entry vector
- * - Top bit it set to disambiguate @#DE from no exception
- */
-typedef unsigned int exinfo_t;
-
-#define EXINFO_EXPECTED (1u << 31)
-
-#define EXINFO(vec, ec) (EXINFO_EXPECTED | ((vec & 0xff) << 16) | (ec & 0xffff))
-
-#define EXINFO_SYM(exc, ec) EXINFO(X86_EXC_ ## exc, ec)
-
-static inline unsigned int exinfo_vec(exinfo_t info)
-{
- return (info >> 16) & 0xff;
-}
-
-static inline unsigned int exinfo_ec(exinfo_t info)
-{
- return info & 0xffff;
-}
-
-#endif /* XTF_X86_EXINFO_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/idt.h
- *
- * %x86 IDT vector infrastructure.
- */
-
-#ifndef XTF_X86_IDT_H
-#define XTF_X86_IDT_H
-
-/**
- * Return to kernel mode.
- *
- * To enable easy transition between user and kernel mode for tests.
- */
-#define X86_VEC_RET2KERN 0x20
-
-/**
- * Available for test use.
- */
-#define X86_VEC_AVAIL 0x21
-
-
-#ifndef __ASSEMBLY__
-
-/** A guest agnostic represention of IDT information. */
-struct xtf_idte
-{
- unsigned long addr;
- unsigned int cs, dpl;
-};
-
-/**
- * Set up an IDT Entry, in a guest agnostic way.
- *
- * Construct an IDT Entry at the specified @p vector, using configuration
- * provided in @p idte.
- *
- * @param vector Vector to set up.
- * @param idte Details to set up.
- * @returns 0 for HVM guests, hypercall result for PV guests.
- */
-int xtf_set_idte(unsigned int vector,
- struct xtf_idte *idte);
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* XTF_X86_IDT_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_LIB_H
-#define XTF_X86_LIB_H
-
-#include <xtf/types.h>
-#include <xen/arch-x86/xen.h>
-#include <arch/x86/desc.h>
-
-static inline uint64_t rdmsr(uint32_t idx)
-{
- uint32_t lo, hi;
-
- asm volatile("rdmsr": "=a" (lo), "=d" (hi): "c" (idx));
-
- return (((uint64_t)hi) << 32) | lo;
-}
-
-static inline bool rdmsr_safe(uint32_t idx, uint64_t *val)
-{
- uint32_t lo, hi, new_idx;
-
- asm volatile("1: rdmsr; 2:"
- _ASM_EXTABLE_HANDLER(1b, 2b, ex_rdmsr_safe)
- : "=a" (lo), "=d" (hi), "=c" (new_idx)
- : "c" (idx));
-
- bool fault = idx != new_idx;
-
- if ( !fault )
- *val = (((uint64_t)hi) << 32) | lo;
-
- return fault;
-}
-
-static inline void wrmsr(uint32_t idx, uint64_t val)
-{
- asm volatile ("wrmsr":
- : "c" (idx), "a" ((uint32_t)val),
- "d" ((uint32_t)(val >> 32)));
-}
-
-static inline bool wrmsr_safe(uint32_t idx, uint64_t val)
-{
- uint32_t new_idx;
-
- asm volatile ("1: wrmsr; 2:"
- _ASM_EXTABLE_HANDLER(1b, 2b, ex_wrmsr_safe)
- : "=c" (new_idx)
- : "c" (idx), "a" ((uint32_t)val),
- "d" ((uint32_t)(val >> 32)));
-
- return idx != new_idx;
-}
-
-static inline void cpuid(uint32_t leaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- asm volatile ("cpuid"
- : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "0" (leaf));
-}
-
-static inline uint32_t cpuid_eax(uint32_t leaf)
-{
- uint32_t eax, tmp;
-
- cpuid(leaf, &eax, &tmp, &tmp, &tmp);
-
- return eax;
-}
-
-static inline uint32_t cpuid_ebx(uint32_t leaf)
-{
- uint32_t ebx, tmp;
-
- cpuid(leaf, &tmp, &ebx, &tmp, &tmp);
-
- return ebx;
-}
-
-static inline uint32_t cpuid_ecx(uint32_t leaf)
-{
- uint32_t ecx, tmp;
-
- cpuid(leaf, &tmp, &tmp, &ecx, &tmp);
-
- return ecx;
-}
-
-static inline uint32_t cpuid_edx(uint32_t leaf)
-{
- uint32_t edx, tmp;
-
- cpuid(leaf, &tmp, &tmp, &tmp, &edx);
-
- return edx;
-}
-
-static inline void pv_cpuid(uint32_t leaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- asm volatile (_ASM_XEN_FEP "cpuid"
- : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "0" (leaf));
-}
-
-static inline void cpuid_count(uint32_t leaf, uint32_t subleaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- asm volatile ("cpuid"
- : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "0" (leaf), "2" (subleaf));
-}
-
-static inline void pv_cpuid_count(uint32_t leaf, uint32_t subleaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- asm volatile (_ASM_XEN_FEP "cpuid"
- : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "0" (leaf), "2" (subleaf));
-}
-
-static inline uint8_t inb(uint16_t port)
-{
- uint8_t val;
-
- asm volatile("inb %w1, %b0": "=a" (val): "Nd" (port));
-
- return val;
-}
-
-static inline uint16_t inw(uint16_t port)
-{
- uint16_t val;
-
- asm volatile("inw %w1, %w0": "=a" (val): "Nd" (port));
-
- return val;
-}
-
-static inline uint32_t inl(uint16_t port)
-{
- uint32_t val;
-
- asm volatile("inl %w1, %k0": "=a" (val): "Nd" (port));
-
- return val;
-}
-
-static inline void outb(uint8_t val, uint16_t port)
-{
- asm volatile("outb %b0, %w1": : "a" (val), "Nd" (port));
-}
-
-static inline void outw(uint16_t val, uint16_t port)
-{
- asm volatile("outw %w0, %w1": : "a" (val), "Nd" (port));
-}
-
-static inline void outl(uint32_t val, uint16_t port)
-{
- asm volatile("outl %k0, %w1": : "a" (val), "Nd" (port));
-}
-
-static inline unsigned int read_cs(void)
-{
- unsigned int cs;
-
- asm volatile ("mov %%cs, %0" : "=r" (cs));
-
- return cs;
-}
-
-static inline unsigned int read_ds(void)
-{
- unsigned int ds;
-
- asm volatile ("mov %%ds, %0" : "=r" (ds));
-
- return ds;
-}
-
-static inline unsigned int read_es(void)
-{
- unsigned int es;
-
- asm volatile ("mov %%es, %0" : "=r" (es));
-
- return es;
-}
-
-static inline unsigned int read_fs(void)
-{
- unsigned int fs;
-
- asm volatile ("mov %%fs, %0" : "=r" (fs));
-
- return fs;
-}
-
-static inline unsigned int read_gs(void)
-{
- unsigned int gs;
-
- asm volatile ("mov %%gs, %0" : "=r" (gs));
-
- return gs;
-}
-
-static inline unsigned int read_ss(void)
-{
- unsigned int ss;
-
- asm volatile ("mov %%ss, %0" : "=r" (ss));
-
- return ss;
-}
-
-static inline void write_cs(unsigned int cs)
-{
- asm volatile ("push %0;"
- "push $1f;"
-#if __x86_64__
- "rex64 "
-#endif
- "lret; 1:"
- :: "qI" (cs));
-}
-
-static inline void write_ds(unsigned int ds)
-{
- asm volatile ("mov %0, %%ds" :: "r" (ds));
-}
-
-static inline void write_es(unsigned int es)
-{
- asm volatile ("mov %0, %%es" :: "r" (es));
-}
-
-static inline void write_fs(unsigned int fs)
-{
- asm volatile ("mov %0, %%fs" :: "r" (fs));
-}
-
-static inline void write_gs(unsigned int gs)
-{
- asm volatile ("mov %0, %%gs" :: "r" (gs));
-}
-
-static inline void write_ss(unsigned int ss)
-{
- asm volatile ("mov %0, %%ss" :: "r" (ss));
-}
-
-static inline unsigned long read_dr6(void)
-{
- unsigned long val;
-
- asm volatile ("mov %%dr6, %0" : "=r" (val));
-
- return val;
-}
-
-static inline unsigned long read_dr7(void)
-{
- unsigned long val;
-
- asm volatile ("mov %%dr7, %0" : "=r" (val));
-
- return val;
-}
-
-static inline unsigned long read_cr0(void)
-{
- unsigned long cr0;
-
- asm volatile ("mov %%cr0, %0" : "=r" (cr0));
-
- return cr0;
-}
-
-static inline unsigned long read_cr2(void)
-{
- unsigned long cr2;
-
- asm volatile ("mov %%cr2, %0" : "=r" (cr2));
-
- return cr2;
-}
-
-static inline unsigned long read_cr3(void)
-{
- unsigned long cr3;
-
- asm volatile ("mov %%cr3, %0" : "=r" (cr3));
-
- return cr3;
-}
-
-static inline unsigned long read_cr4(void)
-{
- unsigned long cr4;
-
- asm volatile ("mov %%cr4, %0" : "=r" (cr4));
-
- return cr4;
-}
-
-static inline unsigned long read_cr8(void)
-{
- unsigned long cr8;
-
- asm volatile ("mov %%cr8, %0" : "=r" (cr8));
-
- return cr8;
-}
-
-static inline void write_cr0(unsigned long cr0)
-{
- asm volatile ("mov %0, %%cr0" :: "r" (cr0));
-}
-
-static inline void write_cr2(unsigned long cr2)
-{
- asm volatile ("mov %0, %%cr2" :: "r" (cr2));
-}
-
-static inline void write_cr3(unsigned long cr3)
-{
- asm volatile ("mov %0, %%cr3" :: "r" (cr3));
-}
-
-static inline void write_cr4(unsigned long cr4)
-{
- asm volatile ("mov %0, %%cr4" :: "r" (cr4));
-}
-
-static inline void write_cr8(unsigned long cr8)
-{
- asm volatile ("mov %0, %%cr8" :: "r" (cr8));
-}
-
-static inline void invlpg(const void *va)
-{
- asm volatile ("invlpg (%0)" :: "r" (va));
-}
-
-static inline void lgdt(const desc_ptr *gdtr)
-{
- asm volatile ("lgdt %0" :: "m" (*gdtr));
-}
-
-static inline void lidt(const desc_ptr *idtr)
-{
- asm volatile ("lidt %0" :: "m" (*idtr));
-}
-
-static inline void lldt(unsigned int sel)
-{
- asm volatile ("lldt %w0" :: "rm" (sel));
-}
-
-static inline void ltr(unsigned int sel)
-{
- asm volatile ("ltr %w0" :: "rm" (sel));
-}
-
-static inline void sgdt(desc_ptr *gdtr)
-{
- asm volatile ("sgdt %0" : "=m" (*gdtr));
-}
-
-static inline void sidt(desc_ptr *idtr)
-{
- asm volatile ("sidt %0" : "=m" (*idtr));
-}
-
-static inline unsigned int sldt(void)
-{
- unsigned int sel;
-
- asm volatile ("sldt %0" : "=r" (sel));
-
- return sel;
-}
-
-static inline unsigned int str(void)
-{
- unsigned int sel;
-
- asm volatile ("str %0" : "=r" (sel));
-
- return sel;
-}
-
-static inline uint64_t xgetbv(uint32_t index)
-{
- uint32_t feat_lo;
- uint64_t feat_hi;
-
- asm volatile ("xgetbv" : "=a" (feat_lo), "=d" (feat_hi)
- : "c" (index) );
-
- return feat_lo | (feat_hi << 32);
-}
-
-static inline void xsetbv(uint32_t index, uint64_t value)
-{
- asm volatile ("xsetbv" :: "a" ((uint32_t)value), "d" (value >> 32),
- "c" (index) );
-}
-
-static inline uint64_t read_xcr0(void)
-{
- return xgetbv(0);
-}
-
-static inline void write_xcr0(uint64_t xcr0)
-{
- xsetbv(0, xcr0);
-}
-
-#endif /* XTF_X86_LIB_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_MM_H
-#define XTF_X86_MM_H
-
-#include <xtf/types.h>
-
-#include <arch/x86/page.h>
-
-#include <xen/xen.h>
-
-/*
- * Terminology (inherited from Xen):
- *
- * GFN - Guest Frame Number
- * What a guest writes into its pagetables.
- * MFN - Machine Frame Number
- * What Xen writes into its pagetables.
- * PFN - Pseudophysical Frame Number
- * A linear idea of a guests physical address space.
- *
- * For HVM, PFN == GFN, and MFN is strictly irrelevent.
- * For PV, MFN == GFN != PFN.
- *
- * XTF memory layout.
- *
- * Wherever possible, identity layout for simplicity.
- *
- * PV guests: VIRT_OFFSET is 0 which causes all linked virtual addresses to be
- * contiguous in the pagetables created by the domain builder. Therefore,
- * virt == pfn << PAGE_SHIFT for any pfn constructed by the domain builder.
- *
- * HVM guests: All memory from 0 to 4GB is identity mapped.
- */
-
-static inline void *pfn_to_virt(unsigned long pfn)
-{
- return (void *)(pfn << PAGE_SHIFT);
-}
-
-static inline unsigned long virt_to_pfn(const void *va)
-{
- return ((unsigned long)va) >> PAGE_SHIFT;
-}
-
-#if defined(CONFIG_PV)
-
-#define m2p ((unsigned long *)MACH2PHYS_VIRT_START)
-extern struct start_info *start_info;
-
-static inline void *mfn_to_virt(unsigned long mfn)
-{
- return pfn_to_virt(m2p[mfn]);
-}
-
-static inline void *maddr_to_virt(uint64_t maddr)
-{
- return mfn_to_virt(maddr >> PAGE_SHIFT) + (maddr & ~PAGE_MASK);
-}
-
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
-{
- unsigned long *p2m = _p(start_info->mfn_list);
-
- return p2m[pfn];
-}
-
-static inline unsigned long virt_to_mfn(const void *va)
-{
- return pfn_to_mfn(virt_to_pfn(va));
-}
-
-#undef m2p
-
-#endif /* CONFIG_PV */
-
-static inline void *gfn_to_virt(unsigned long gfn)
-{
-#if defined(CONFIG_PV)
- return mfn_to_virt(gfn);
-#else
- return pfn_to_virt(gfn);
-#endif
-}
-
-static inline unsigned long virt_to_gfn(const void *va)
-{
-#if defined(CONFIG_PV)
- return virt_to_mfn(va);
-#else
- return virt_to_pfn(va);
-#endif
-}
-
-#endif /* XTF_X86_MM_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XFT_X86_MSR_INDEX_H
-#define XFT_X86_MSR_INDEX_H
-
-#include <xtf/numbers.h>
-
-#define MSR_INTEL_PLATFORM_INFO 0x000000ce
-#define _MSR_PLATFORM_INFO_CPUID_FAULTING 31
-#define MSR_PLATFORM_INFO_CPUID_FAULTING (1ULL << _MSR_PLATFORM_INFO_CPUID_FAULTING)
-
-#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140
-#define _MSR_MISC_FEATURES_CPUID_FAULTING 0
-#define MSR_MISC_FEATURES_CPUID_FAULTING (1ULL << _MSR_MISC_FEATURES_CPUID_FAULTING)
-
-#define MSR_EFER 0xc0000080 /* Extended Feature register. */
-#define _EFER_SCE 0 /* SYSCALL Enable. */
-#define EFER_SCE (_AC(1, L) << _EFER_SCE)
-#define _EFER_LME 8 /* Long mode enable. */
-#define EFER_LME (_AC(1, L) << _EFER_LME)
-#define _EFER_LMA 10 /* Long mode Active. */
-#define EFER_LMA (_AC(1, L) << _EFER_LMA)
-#define _EFER_NXE 11 /* No-Execute Enable. */
-#define EFER_NXE (_AC(1, L) << _EFER_NXE)
-#define _EFER_SVME 12 /* Secure Virtual Machine Enable. */
-#define EFER_SVME (_AC(1, L) << _EFER_SVME)
-#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable. */
-#define EFER_LMSLE (_AC(1, L) << _EFER_LMSLE)
-#define _EFER_FFXSR 14 /* Fast FXSAVE/FXRSTOR. */
-#define EFER_FFXSR (_AC(1, L) << _EFER_FFXSR)
-#define _EFER_TCE 15 /* Translation Cache Extension. */
-#define EFER_TCE (_AC(1, L) << _EFER_TCE)
-
-#define MSR_FS_BASE 0xc0000100
-#define MSR_GS_BASE 0xc0000101
-#define MSR_SHADOW_GS_BASE 0xc0000102
-
-#endif /* XFT_X86_MSR_INDEX_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
-
+++ /dev/null
-/**
- * @file include/arch/x86/page-pae.h
- *
- * Definitions and helpers for PAE pagetable handling.
- */
-#ifndef XTF_X86_PAGE_PAE_H
-#define XTF_X86_PAGE_PAE_H
-
-/** PAE pagetable entries are 64 bits wide. */
-#define PAE_PTE_SIZE 8
-/** PAE pagetable entries are 64 bits wide. */
-#define PAE_PTE_ORDER 3
-
-/** PAE pagetables encode 9 bits of index. */
-#define PAE_PT_ORDER 9
-
-/** @{ */
-/** All PAE pagetables contain 512 entries. */
-#define PAE_L1_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
-#define PAE_L2_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
-#define PAE_L3_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
-#define PAE_L4_PT_ENTRIES (PAGE_SIZE / PAE_PTE_SIZE)
-
-/* Other than PAE32_L3, which has 4 entries. */
-#define PAE32_L3_ENTRIES 4
-/** @} */
-
-#define PAE_L1_PT_SHIFT 12
-#define PAE_L2_PT_SHIFT 21
-#define PAE_L3_PT_SHIFT 30
-#define PAE_L4_PT_SHIFT 39
-
-#ifndef __ASSEMBLY__
-
-/** Integer representation of a PTE. */
-typedef uint64_t pae_intpte_t;
-#define PAE_PRIpte "016"PRIx64
-
-static inline unsigned int pae_l1_table_offset(unsigned long va)
-{
- return (va >> PAE_L1_PT_SHIFT) & (PAE_L1_PT_ENTRIES - 1);
-}
-static inline unsigned int pae_l2_table_offset(unsigned long va)
-{
- return (va >> PAE_L2_PT_SHIFT) & (PAE_L2_PT_ENTRIES - 1);
-}
-static inline unsigned int pae_l3_table_offset(unsigned long va)
-{
- return (va >> PAE_L3_PT_SHIFT) & (PAE_L3_PT_ENTRIES - 1);
-}
-#ifdef __x86_64__
-static inline unsigned int pae_l4_table_offset(unsigned long va)
-{
- return (va >> PAE_L4_PT_SHIFT) & (PAE_L4_PT_ENTRIES - 1);
-}
-#endif /* __x86_64__ */
-
-#endif /* __ASSEMBLY__ */
-#endif /* XTF_X86_PAGE_PAE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/page-pse.h
- *
- * Definitions and helpers for PSE pagetable handling.
- */
-#ifndef XTF_X86_PAGE_PSE_H
-#define XTF_X86_PAGE_PSE_H
-
-/** PSE pagetable entries are 32 bits wide. */
-#define PSE_PTE_SIZE 4
-/** PSE pagetable entries are 32 bits wide. */
-#define PSE_PTE_ORDER 2
-
-/** PAE pagetables encode 10 bits of index. */
-#define PSE_PT_ORDER 10
-
-/** @{ */
-/** All PSE pagetables contain 1024 entries. */
-#define PSE_L1_PT_ENTRIES (PAGE_SIZE / PSE_PTE_SIZE)
-#define PSE_L2_PT_ENTRIES (PAGE_SIZE / PSE_PTE_SIZE)
-/** @} */
-
-#define PSE_L1_PT_SHIFT 12
-#define PSE_L2_PT_SHIFT 22
-
-#ifndef __ASSEMBLY__
-
-/** Integer representation of a PTE. */
-typedef uint32_t pse_intpte_t;
-#define PSE_PRIpte "08"PRIx32
-
-static inline unsigned int pse_l1_table_offset(unsigned long va)
-{
- return (va >> PSE_L1_PT_SHIFT) & (PSE_L1_PT_ENTRIES - 1);
-}
-static inline unsigned int pse_l2_table_offset(unsigned long va)
-{
- return (va >> PSE_L2_PT_SHIFT) & (PSE_L2_PT_ENTRIES - 1);
-}
-
-static inline uint32_t fold_pse36(uint64_t val)
-{
- return (val & ~(0x1ffULL << 13)) | ((val & (0x1ffULL << 32)) >> (32 - 13));
-}
-
-static inline uint64_t unfold_pse36(uint32_t val)
-{
- return (val & ~(0x1ffULL << 13)) | ((val & (0x1ffULL << 13)) << (32 - 13));
-}
-
-#endif /* __ASSEMBLY__ */
-#endif /* XTF_X86_PAGE_PSE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_PAGE_H
-#define XTF_X86_PAGE_H
-
-#include <xtf/numbers.h>
-
-/*
- * Nomenclature inherited from Xen.
- */
-
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1, L) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-
-#define PADDR_BITS 52
-#define PADDR_MASK ((_AC(1, ULL) << PADDR_BITS) - 1)
-
-#include "page-pae.h"
-#include "page-pse.h"
-
-#define PAGE_ORDER_4K 0
-#define PAGE_ORDER_2M 9
-#define PAGE_ORDER_4M 10
-#define PAGE_ORDER_1G 18
-
-#define _PAGE_PRESENT 0x0001
-#define _PAGE_RW 0x0002
-#define _PAGE_USER 0x0004
-#define _PAGE_PWT 0x0008
-#define _PAGE_PCD 0x0010
-#define _PAGE_ACCESSED 0x0020
-#define _PAGE_DIRTY 0x0040
-#define _PAGE_AD (_PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_PSE 0x0080
-#define _PAGE_PAT 0x0080
-#define _PAGE_GLOBAL 0x0100
-#define _PAGE_AVAIL 0x0e00
-#define _PAGE_PSE_PAT 0x1000
-#define _PAGE_NX (_AC(1, ULL) << 63)
-
-/* Shortened flags for use with PF_SYM(). */
-#define _PAGE_P _PAGE_PRESENT
-#define _PAGE_U _PAGE_USER
-#define _PAGE_A _PAGE_ACCESSED
-#define _PAGE_D _PAGE_DIRTY
-
-#if CONFIG_PAGING_LEVELS == 2 /* PSE Paging */
-
-#define PTE_SIZE PSE_PTE_SIZE
-#define PTE_ORDER PSE_PTE_ORDER
-
-#define PT_ORDER PSE_PT_ORDER
-
-#define L1_PT_SHIFT PSE_L1_PT_SHIFT
-#define L2_PT_SHIFT PSE_L2_PT_SHIFT
-
-#define L1_PT_ENTRIES PSE_L1_PT_ENTRIES
-#define L2_PT_ENTRIES PSE_L2_PT_ENTRIES
-
-#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
-
-#define PTE_SIZE PAE_PTE_SIZE
-#define PTE_ORDER PAE_PTE_ORDER
-
-#define PT_ORDER PAE_PT_ORDER
-
-#define L1_PT_SHIFT PAE_L1_PT_SHIFT
-#define L2_PT_SHIFT PAE_L2_PT_SHIFT
-
-#define L1_PT_ENTRIES PAE_L1_PT_ENTRIES
-#define L2_PT_ENTRIES PAE_L2_PT_ENTRIES
-
-#endif /* !CONFIG_PAGING_LEVELS == 2 */
-
-#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
-
-#define L3_PT_SHIFT PAE_L3_PT_SHIFT
-
-#define L3_PT_ENTRIES PAE_L3_PT_ENTRIES
-
-#endif /* CONFIG_PAGING_LEVELS >= 3 */
-
-#if CONFIG_PAGING_LEVELS >= 4 /* PAE Paging */
-
-#define L4_PT_SHIFT PAE_L4_PT_SHIFT
-
-#define L4_PT_ENTRIES PAE_L4_PT_ENTRIES
-
-#endif /* CONFIG_PAGING_LEVELS >= 4 */
-
-
-#ifndef __ASSEMBLY__
-
-/*
- * Always consider "physical" addresses to be 64bits wide, even in 32bit mode.
- */
-typedef uint64_t paddr_t;
-#define PRIpaddr "016"PRIx64
-
-#if CONFIG_PAGING_LEVELS > 0 /* Some form of pagetables. */
-
-#if CONFIG_PAGING_LEVELS == 2 /* PSE Paging */
-
-typedef pse_intpte_t intpte_t;
-#define PRIpte PSE_PRIpte
-
-static inline unsigned int l1_table_offset(unsigned long va)
-{
- return pse_l1_table_offset(va);
-}
-static inline unsigned int l2_table_offset(unsigned long va)
-{
- return pse_l2_table_offset(va);
-}
-
-#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
-
-typedef pae_intpte_t intpte_t;
-#define PRIpte PAE_PRIpte
-
-static inline unsigned int l1_table_offset(unsigned long va)
-{
- return pae_l1_table_offset(va);
-}
-static inline unsigned int l2_table_offset(unsigned long va)
-{
- return pae_l2_table_offset(va);
-}
-
-#endif /* !CONFIG_PAGING_LEVELS == 2 */
-
-#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
-
-static inline unsigned int l3_table_offset(unsigned long va)
-{
- return pae_l3_table_offset(va);
-}
-
-#endif /* CONFIG_PAGING_LEVELS >= 3 */
-
-#if CONFIG_PAGING_LEVELS >= 4 /* PAE Paging */
-
-static inline unsigned int l4_table_offset(unsigned long va)
-{
- return pae_l4_table_offset(va);
-}
-
-#endif /* CONFIG_PAGING_LEVELS >= 4 */
-
-#else /* CONFIG_PAGING_LEVELS > 0 */
-
-/* Enough compatibility to compile in unpaged environments. */
-typedef unsigned long intpte_t;
-#define PRIpte "08lx"
-
-#endif
-
-#ifdef CONFIG_HVM
-
-extern pae_intpte_t pae_l1_identmap[PAE_L1_PT_ENTRIES];
-extern pae_intpte_t pae_l2_identmap[4 * PAE_L2_PT_ENTRIES];
-extern pae_intpte_t pae_l3_identmap[PAE_L3_PT_ENTRIES];
-extern pae_intpte_t pae_l4_identmap[PAE_L4_PT_ENTRIES];
-extern pae_intpte_t pae32_l3_identmap[PAE32_L3_ENTRIES];
-
-extern pse_intpte_t pse_l1_identmap[PSE_L1_PT_ENTRIES];
-extern pse_intpte_t pse_l2_identmap[PSE_L2_PT_ENTRIES];
-
-/* Aliases of the live tables (PAE or PSE as appropriate). */
-extern intpte_t l1_identmap[L1_PT_ENTRIES];
-#if CONFIG_PAGING_LEVELS >= 3
-extern intpte_t l2_identmap[4 *L2_PT_ENTRIES];
-#else
-extern intpte_t l2_identmap[L2_PT_ENTRIES];
-#endif
-
-/* Alias of the pagetable %cr3 points at. */
-extern intpte_t cr3_target[];
-
-#endif /* CONFIG_HVM */
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* XTF_X86_PAGE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_PAGETABLE_H
-#define XTF_X86_PAGETABLE_H
-
-#include <arch/x86/mm.h>
-
-#if CONFIG_PAGING_LEVELS > 0
-
-static inline paddr_t pte_to_paddr(intpte_t pte)
-{
- return pte & PADDR_MASK & PAGE_MASK;
-}
-
-static inline intpte_t pte_from_paddr(paddr_t paddr, uint64_t flags)
-{
- return ((paddr & (PADDR_MASK & PAGE_MASK)) |
- (flags & ~(PADDR_MASK & PAGE_MASK)));
-}
-
-static inline intpte_t pte_from_gfn(unsigned long gfn, uint64_t flags)
-{
- return pte_from_paddr((paddr_t)gfn << PAGE_SHIFT, flags);
-}
-
-static inline intpte_t pte_from_virt(const void *va, uint64_t flags)
-{
- return pte_from_paddr((paddr_t)virt_to_gfn(va) << PAGE_SHIFT, flags);
-}
-
-#else /* CONFIG_PAGING_LEVELS > 0 */
-
-/* Enough compatibility to compile in unpaged environments. */
-extern paddr_t pte_to_paddr(intpte_t pte);
-extern intpte_t pte_from_paddr(paddr_t paddr, uint64_t flags);
-extern intpte_t pte_from_gfn(unsigned long gfn, uint64_t flags);
-extern intpte_t pte_from_virt(const void *va, uint64_t flags);
-
-#endif
-
-#endif /* XTF_X86_PAGETABLE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_PROCESSOR_H
-#define XTF_X86_PROCESSOR_H
-
-/*
- * EFLAGS bits.
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_MBS 0x00000002 /* Resvd bit */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-
-/*
- * CPU flags in CR0.
- */
-#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
-#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
-#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
-#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
-#define X86_CR0_ET 0x00000010 /* Extension type (RO) */
-#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
-#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
-#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
-#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
-#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
-#define X86_CR0_PG 0x80000000 /* Paging (RW) */
-
-/*
- * CPU features in CR4.
- */
-#define X86_CR4_VME 0x00000001 /* VM86 extensions */
-#define X86_CR4_PVI 0x00000002 /* Virtual interrupts flag */
-#define X86_CR4_TSD 0x00000004 /* Disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x00000008 /* Debugging extensions */
-#define X86_CR4_PSE 0x00000010 /* Page size extensions */
-#define X86_CR4_PAE 0x00000020 /* Physical address extensions */
-#define X86_CR4_MCE 0x00000040 /* Machine check */
-#define X86_CR4_PGE 0x00000080 /* Global pages */
-#define X86_CR4_PCE 0x00000100 /* Performance counters at ipl 3 */
-#define X86_CR4_OSFXSR 0x00000200 /* Fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x00000400 /* Unmasked SSE exceptions */
-#define X86_CR4_UMIP 0x00000800 /* UMIP */
-#define X86_CR4_VMXE 0x00002000 /* VMX */
-#define X86_CR4_SMXE 0x00004000 /* SMX */
-#define X86_CR4_FSGSBASE 0x00010000 /* {rd,wr}{fs,gs}base */
-#define X86_CR4_PCIDE 0x00020000 /* PCID */
-#define X86_CR4_OSXSAVE 0x00040000 /* XSAVE/XRSTOR */
-#define X86_CR4_SMEP 0x00100000 /* SMEP */
-#define X86_CR4_SMAP 0x00200000 /* SMAP */
-
-/*
- * DR6 status bits.
- */
-#define X86_DR6_B0 (1u << 0) /* Breakpoint 0 triggered */
-#define X86_DR6_B1 (1u << 1) /* Breakpoint 1 triggered */
-#define X86_DR6_B2 (1u << 2) /* Breakpoint 2 triggered */
-#define X86_DR6_B3 (1u << 3) /* Breakpoint 3 triggered */
-#define X86_DR6_BD (1u << 13) /* Debug register accessed */
-#define X86_DR6_BS (1u << 14) /* Single step */
-#define X86_DR6_BT (1u << 15) /* Task switch */
-
-/*
- * CPU features in XCR0.
- */
-#define _XSTATE_FP 0
-#define XSTATE_FP (1ULL << _XSTATE_FP)
-#define _XSTATE_SSE 1
-#define XSTATE_SSE (1ULL << _XSTATE_SSE)
-#define _XSTATE_YMM 2
-#define XSTATE_YMM (1ULL << _XSTATE_YMM)
-#define _XSTATE_BNDREGS 3
-#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS)
-#define _XSTATE_BNDCSR 4
-#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR)
-#define _XSTATE_OPMASK 5
-#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK)
-#define _XSTATE_ZMM 6
-#define XSTATE_ZMM (1ULL << _XSTATE_ZMM)
-#define _XSTATE_HI_ZMM 7
-#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM)
-#define _XSTATE_PKRU 9
-#define XSTATE_PKRU (1ULL << _XSTATE_PKRU)
-#define _XSTATE_LWP 62
-#define XSTATE_LWP (1ULL << _XSTATE_LWP)
-
-/*
- * Exception mnemonics.
- */
-#define X86_EXC_DE 0 /* Divide Error. */
-#define X86_EXC_DB 1 /* Debug Exception. */
-#define X86_EXC_NMI 2 /* NMI. */
-#define X86_EXC_BP 3 /* Breakpoint. */
-#define X86_EXC_OF 4 /* Overflow. */
-#define X86_EXC_BR 5 /* BOUND Range. */
-#define X86_EXC_UD 6 /* Invalid Opcode. */
-#define X86_EXC_NM 7 /* Device Not Available. */
-#define X86_EXC_DF 8 /* Double Fault. */
-#define X86_EXC_CSO 9 /* Coprocessor Segment Overrun. */
-#define X86_EXC_TS 10 /* Invalid TSS. */
-#define X86_EXC_NP 11 /* Segment Not Present. */
-#define X86_EXC_SS 12 /* Stack-Segment Fault. */
-#define X86_EXC_GP 13 /* General Porection Fault. */
-#define X86_EXC_PF 14 /* Page Fault. */
-#define X86_EXC_SPV 15 /* PIC Spurious Interrupt Vector. */
-#define X86_EXC_MF 16 /* Maths fault (x87 FPU). */
-#define X86_EXC_AC 17 /* Alignment Check. */
-#define X86_EXC_MC 18 /* Machine Check. */
-#define X86_EXC_XM 19 /* SIMD Exception. */
-#define X86_EXC_VE 20 /* Virtualisation Exception. */
-
-/* Bitmap of exceptions which have error codes. */
-#define X86_EXC_HAVE_EC ((1 << X86_EXC_DF) | (1 << X86_EXC_TS) | \
- (1 << X86_EXC_NP) | (1 << X86_EXC_SS) | \
- (1 << X86_EXC_GP) | (1 << X86_EXC_PF) | \
- (1 << X86_EXC_AC))
-
-/* Bitmap of exceptions which are classified as faults. */
-#define X86_EXC_FAULTS ((1 << X86_EXC_DE) | (1 << X86_EXC_BR) | \
- (1 << X86_EXC_UD) | (1 << X86_EXC_NM) | \
- (1 << X86_EXC_CSO) | (1 << X86_EXC_TS) | \
- (1 << X86_EXC_NP) | (1 << X86_EXC_SS) | \
- (1 << X86_EXC_GP) | (1 << X86_EXC_PF) | \
- (1 << X86_EXC_MF) | (1 << X86_EXC_AC) | \
- (1 << X86_EXC_XM) | (1 << X86_EXC_VE))
-
-/* Bitmap of exceptions which are classified as interrupts. */
-#define X86_EXC_INTERRUPTS (1 << X86_EXC_NMI)
-
-/* Bitmap of exceptions which are classified as traps. */
-#define X86_EXC_TRAPS ((1 << X86_EXC_BP) | (1 << X86_EXC_OF))
-
-/* Bitmap of exceptions which are classified as aborts. */
-#define X86_EXC_ABORTS ((1 << X86_EXC_DF) | (1 << X86_EXC_MC))
-
-/* Number of reserved vectors for exceptions. */
-#define X86_NR_RESERVED_VECTORS 32
-
-/*
- * Error Code mnemonics.
- */
-/* Segment-based Error Code - architecturally defined. */
-#define X86_EC_EXT (1U << 0) /* External event. */
-#define X86_EC_IDT (1U << 1) /* Descriptor Location. IDT, or LDT/GDT */
-#define X86_EC_TI (1U << 2) /* Only if !IDT. LDT or GDT. */
-
-/* Segment-based Error Code - supplemental constants. */
-#define X86_EC_TABLE_MASK (3 << 1)
-#define X86_EC_SEL_SHIFT 3
-#define X86_EC_SEL_MASK (~0U << X86_EC_SEL_SHIFT)
-#define X86_EC_GDT 0
-#define X86_EC_LDT X86_EC_TI
-
-/* Pagefault Error Code - architecturally defined. */
-#define X86_PFEC_PRESENT (1U << 0)
-#define X86_PFEC_WRITE (1U << 1)
-#define X86_PFEC_USER (1U << 2)
-#define X86_PFEC_RSVD (1U << 3)
-#define X86_PFEC_INSN (1U << 4)
-#define X86_PFEC_PK (1U << 5)
-
-/*
- * Selector mnemonics.
- */
-/* Architecturally defined. */
-#define X86_SEL_TI (1U << 2) /* Table Indicator. */
-
-/* Supplemental constants. */
-#define X86_SEL_RPL_MASK 3 /* RPL is the bottom two bits. */
-#define X86_SEL_GDT 0
-#define X86_SEL_LDT X86_SEL_TI
-
-#endif /* XTF_X86_PROCESSOR_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_REGS_H
-#define XTF_X86_REGS_H
-
-#include <xtf/types.h>
-
-#if defined(__i386__)
-
-#define DECL_REG(n) \
- union { uint32_t e ## n; unsigned long n; }
-#define _DECL_REG(n) \
- union { uint32_t _e ## n; unsigned long _ ## n; }
-
-struct cpu_regs {
- DECL_REG(bp);
- DECL_REG(bx);
- DECL_REG(ax);
- DECL_REG(cx);
- DECL_REG(dx);
- DECL_REG(si);
- DECL_REG(di);
-
- uint32_t entry_vector;
- uint32_t error_code;
-
-/* Hardware exception frame. */
- DECL_REG(ip);
- uint16_t cs, _pad1[1];
- DECL_REG(flags);
- _DECL_REG(sp); /* Won't be valid if stack */
- uint16_t _ss, _pad0[1]; /* switch didn't occur. */
-/* Top of stack. */
-};
-
-#elif defined(__x86_64__)
-
-#define DECL_REG(n) \
- union { uint64_t r ## n; uint32_t e ## n; unsigned long n; }
-#define _DECL_REG(n) \
- union { uint64_t _r ## n; uint32_t _e ## n; unsigned long _ ## n; }
-
-struct cpu_regs {
- uint64_t r15;
- uint64_t r14;
- uint64_t r13;
- uint64_t r12;
- DECL_REG(bp);
- DECL_REG(bx);
- uint64_t r11;
- uint64_t r10;
- uint64_t r9;
- uint64_t r8;
- DECL_REG(ax);
- DECL_REG(cx);
- DECL_REG(dx);
- DECL_REG(si);
- DECL_REG(di);
-
- uint32_t error_code;
- uint32_t entry_vector;
-
-/* Hardware exception frame. */
- DECL_REG(ip);
- uint16_t cs, _pad1[3];
- DECL_REG(flags);
- _DECL_REG(sp);
- uint16_t _ss, _pad0[3];
-/* Top of stack. */
-};
-
-#endif /* __i386__ / __x86_64__ */
-
-#endif /* XTF_X86_REGS_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_SEGMENT_H
-#define XTF_X86_SEGMENT_H
-
-#include <xen/arch-x86/xen.h>
-
-/*
- * GDT layout:
- *
- * For simplicitly, the gdt is shared as much as possible between different
- * environments.
- *
- * 0 - null
- * 1 - 64bit supervisor code
- * 2 - 32bit supervisor code
- * 3 - 32bit supervisor data
- * 4 - 64bit userspace code
- * 5 - 32bit userspace code
- * 6 - 32bit userspace data
- * 7/8 - TSS (two slots in long mode)
- * 8 - DF TSS (32bit only)
- *
- * 9-12 - Available for test use
- */
-
-#define GDTE_CS64_DPL0 1
-#define GDTE_CS32_DPL0 2
-#define GDTE_DS32_DPL0 3
-#define GDTE_CS64_DPL3 4
-#define GDTE_CS32_DPL3 5
-#define GDTE_DS32_DPL3 6
-
-#define GDTE_TSS 7
-#define GDTE_TSS_DF 8
-
-#define GDTE_AVAIL0 9
-#define GDTE_AVAIL1 10
-#define GDTE_AVAIL2 11
-#define GDTE_AVAIL3 12
-
-#define NR_GDT_ENTRIES 13
-
-/*
- * HVM guests use the GDT directly.
- */
-#if defined(CONFIG_HVM)
-
-#ifdef __x86_64__
-
-#define __KERN_CS (GDTE_CS64_DPL0 * 8)
-#define __KERN_DS (0)
-#define __USER_CS (GDTE_CS64_DPL3 * 8 + 3)
-#define __USER_DS (GDTE_DS32_DPL3 * 8 + 3)
-
-#else /* __x86_64__ */
-
-#define __KERN_CS (GDTE_CS32_DPL0 * 8)
-#define __KERN_DS (GDTE_DS32_DPL0 * 8)
-#define __USER_CS (GDTE_CS32_DPL3 * 8 + 3)
-#define __USER_DS (GDTE_DS32_DPL3 * 8 + 3)
-
-#endif /* __x86_64__ */
-
-#endif /* CONFIG_HVM */
-
-/*
- * PV guests by default use the Xen ABI-provided selectors.
- */
-#if defined(CONFIG_PV)
-
-#ifdef __x86_64__
-/*
- * 64bit PV guest kernels run in cpl3, but exception frames generated by Xen
- * report cpl0 when interrupting kernel mode. Trim the kernel selectors down
- * to rpl0 so they match the exception frames; Xen will take care of bumping
- * rpl back to 3 when required.
- *
- * In Long mode, it is permitted to have NULL selectors for the plain data
- * segment selectors (this is expressed in the Xen ABI), but not for %ss. As
- * __{KERN,USER}_DS are used for all data selectors including %ss, use the
- * FLAT_RING3_SS64 rather than FLAT_RING3_DS64.
- */
-#define __KERN_CS (FLAT_RING3_CS64 & ~3)
-#define __KERN_DS (FLAT_RING3_SS64 & ~3)
-#define __USER_CS FLAT_RING3_CS64
-#define __USER_DS FLAT_RING3_SS64
-
-#else /* __x86_64__ */
-
-#define __KERN_CS FLAT_RING1_CS
-#define __KERN_DS FLAT_RING1_DS
-#define __USER_CS FLAT_RING3_CS
-#define __USER_DS FLAT_RING3_DS
-
-#endif /* __x86_64__ */
-
-#endif /* CONFIG_PV */
-
-#endif /* XTF_X86_SEGMENT_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/**
- * @file include/arch/x86/symbolic-const.h
- *
- * Macros for creating constants using mnemonics.
- */
-#ifndef XTF_X86_SYMBOLIC_CONST_H
-#define XTF_X86_SYMBOLIC_CONST_H
-
-#include <xtf/macro_magic.h>
-
-#include <arch/x86/desc.h>
-#include <arch/x86/processor.h>
-
-/**
- * Tokenise and OR together.
- *
- * For each varadic parameter, tokenise with 't' and OR together.
- *
- * @param t Common stem partial token.
- * @param ... Partial tokens.
- *
- * Example:
- * <pre>
- * TOK_OR(t, x, y) => (t ## x | t ## y)
- * TOK_OR(t, x, y, z) => (t ## x | t ## y | t ## z)
- * </pre>
- */
-/** @cond */
-#define TOK_OR0(t) (0)
-#define TOK_OR1(t, x) (t ## x)
-#define TOK_OR2(t, x, ...) (t ## x | TOK_OR1(t, ##__VA_ARGS__))
-#define TOK_OR3(t, x, ...) (t ## x | TOK_OR2(t, ##__VA_ARGS__))
-#define TOK_OR4(t, x, ...) (t ## x | TOK_OR3(t, ##__VA_ARGS__))
-#define TOK_OR5(t, x, ...) (t ## x | TOK_OR4(t, ##__VA_ARGS__))
-#define TOK_OR6(t, x, ...) (t ## x | TOK_OR5(t, ##__VA_ARGS__))
-#define TOK_OR7(t, x, ...) (t ## x | TOK_OR6(t, ##__VA_ARGS__))
-#define TOK_OR8(t, x, ...) (t ## x | TOK_OR7(t, ##__VA_ARGS__))
-#define TOK_OR9(t, x, ...) (t ## x | TOK_OR8(t, ##__VA_ARGS__))
-#define TOK_OR10(t, x, ...) (t ## x | TOK_OR9(t, ##__VA_ARGS__))
-#define TOK_OR11(t, x, ...) (t ## x | TOK_OR10(t, ##__VA_ARGS__))
-/** @endcond */
-#define TOK_OR(t, ...) VAR_MACRO_C1(TOK_OR, t, ##__VA_ARGS__)
-
-/**
- * Initialise an LDT/GDT entry using SEG_ATTR_ mnemonics.
- *
- * @param base Segment base.
- * @param limit Segment limit.
- * @param ... Partial SEG_ATTR_ tokens for attributes.
- *
- * Example usage:
- * - INIT_GDTE_SYM(0, 0xfffff, P)
- * - uses @ref SEG_ATTR_P
- *
- * - INIT_GDTE_SYM(0, 0xfffff, CODE, L)
- * - uses @ref SEG_ATTR_CODE and @ref SEG_ATTR_L
- */
-#define INIT_GDTE_SYM(base, limit, ...) \
- INIT_GDTE(base, limit, TOK_OR(SEG_ATTR_, ##__VA_ARGS__))
-
-/**
- * Create a selector based error code using X86_EC_ mnemonics.
- *
- * @param sel Selector value.
- * @param ... Partial X86_EC_ tokens.
- *
- * Example usage:
- * - SEL_EC_SYM(0, GDT)
- * - Uses @ref X86_EC_GDT.
- *
- * - SEL_EC_SYM(0, IDT, EXT)
- * - Uses @ref X86_EC_IDT and @ref X86_EC_EXT.
- */
-#define SEL_EC_SYM(sel, ...) (sel | TOK_OR(X86_EC_, ##__VA_ARGS__))
-
-/**
- * Create an exception selector based error code using mnemonics, with
- * implicit @ref X86_EC_IDT.
- *
- * @param exc Partial X86_EXC_ token for selector.
- * @param ... Partial X86_EC_ tokens.
- *
- * Example usage:
- * - EXC_EC_SYM(DE)
- * - Uses @ref X86_EXC_DE and @ref X86_EC_IDT.
- *
- * - EXC_EC_SYM(DB, EXT)
- * - Uses @ref X86_EXC_DB, @ref X86_EC_IDT and @ref X86_EC_EXT.
- */
-#define EXC_EC_SYM(exc, ...) \
- SEL_EC_SYM(((X86_EXC_ ## exc) << 3), IDT, ##__VA_ARGS__)
-
-/**
- * Create pagetable entry flags based on mnemonics.
- *
- * @param ... Partial _PAGE_ tokens.
- *
- * Example usage:
- * - PF_SYM(AD, U, RW, P)
- * - Accessed, Dirty, User, Writeable, Present.
- */
-#define PF_SYM(...) TOK_OR(_PAGE_, ##__VA_ARGS__)
-
-#endif /* XTF_X86_SYMBOLIC_CONST_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_TRAPS_H
-#define XTF_X86_TRAPS_H
-
-#include <xtf/compiler.h>
-#include <arch/x86/regs.h>
-#include <arch/x86/page.h>
-
-/*
- * Arch-specific function to initialise the exception entry points, etc.
- */
-void arch_init_traps(void);
-
-/*
- * Arch-specific function to quiesce the domain, in the event that a
- * shutdown(crash) hypercall has not succeeded.
- */
-void __noreturn arch_crash_hard(void);
-
-/*
- * Return the correct %ss/%esp from an exception. In 32bit if no stack switch
- * occurs, an exception frame doesn't contain this information.
- */
-unsigned long cpu_regs_sp(const struct cpu_regs *regs);
-unsigned int cpu_regs_ss(const struct cpu_regs *regs);
-
-extern uint8_t boot_stack[3 * PAGE_SIZE];
-
-#if defined(CONFIG_PV)
-#include <xen/xen.h>
-
-extern struct start_info *start_info;
-#endif
-
-#endif /* XTF_X86_TRAPS_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_32_HYPERCALL_H
-#define XTF_X86_32_HYPERCALL_H
-
-/*
- * Hypercall primatives for 32bit
- *
- * Inputs: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6)
- */
-
-#define _hypercall32_1(type, hcall, a1) \
- ({ \
- long __res, __ign1; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=b" (__ign1) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#define _hypercall32_2(type, hcall, a1, a2) \
- ({ \
- long __res, __ign1, __ign2; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)), "2" ((long)(a2)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#define _hypercall32_3(type, hcall, a1, a2, a3) \
- ({ \
- long __res, __ign1, __ign2, __ign3; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), "=d" (__ign3) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#define _hypercall32_4(type, hcall, a1, a2, a3, a4) \
- ({ \
- long __res, __ign1, __ign2, __ign3, __ign4; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), "=d" (__ign3),\
- "=S" (__ign4) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \
- "4" ((long)(a4)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#endif /* XTF_X86_32_HYPERCALL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_64_HYPERCALL_H
-#define XTF_X86_64_HYPERCALL_H
-
-/*
- * Hypercall primatives for 64bit
- *
- * Inputs: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6)
- */
-
-#define _hypercall64_1(type, hcall, a1) \
- ({ \
- long __res, __ign1; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=D" (__ign1) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#define _hypercall64_2(type, hcall, a1, a2) \
- ({ \
- long __res, __ign1, __ign2; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)), "2" ((long)(a2)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#define _hypercall64_3(type, hcall, a1, a2, a3) \
- ({ \
- long __res, __ign1, __ign2, __ign3; \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), "=d" (__ign3) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \
- : "memory" ); \
- (type)__res; \
- })
-
-#define _hypercall64_4(type, hcall, a1, a2, a3, a4) \
- ({ \
- long __res, __ign1, __ign2, __ign3, __ign4; \
- register long _a4 asm ("r10") = ((long)(a4)); \
- asm volatile ( \
- "call hypercall_page + %c[offset]" \
- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), "=d" (__ign3),\
- "=&r" (__ign4) \
- : [offset] "i" (hcall * 32), \
- "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \
- "4" (_a4) \
- : "memory" ); \
- (type)__res; \
- })
-
-#endif /* XTF_X86_64_HYPERCALL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-#ifndef XTF_X86_XTF_H
-#define XTF_X86_XTF_H
-
-#include <arch/x86/cpuid.h>
-#include <arch/x86/lib.h>
-
-extern char _end[];
-
-#endif /* XTF_X86_XTF_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
#include <xtf/traps.h>
/* Arch specific headers. */
-#include <arch/x86/xtf.h>
+#include <arch/xtf.h>
#endif /* XTF_H */
#include <xtf/numbers.h>
-#include <arch/x86/asm_macros.h>
+#include <arch/asm_macros.h>
#ifdef __ASSEMBLY__
#define XTF_BARRIER_H
#if defined(__x86_64__) || defined (__i386__)
-# include <arch/x86/barrier.h>
+# include <arch/barrier.h>
#else
# error Bad architecture
#endif
#define XTF_EXLOG_H
#include <xtf/types.h>
-#include <arch/x86/regs.h>
+#include <arch/regs.h>
void xtf_exlog_start(void);
void xtf_exlog_reset(void);
#define XTF_HYPERCALL_H
#include <xtf/types.h>
-#include <arch/x86/page.h>
+#include <arch/page.h>
#if defined(__x86_64__)
-# include <arch/x86/x86_64/hypercall-x86_64.h>
+# include <arch/x86_64/hypercall-x86_64.h>
# define HYPERCALL1 _hypercall64_1
# define HYPERCALL2 _hypercall64_2
# define HYPERCALL3 _hypercall64_3
#elif defined(__i386__)
-# include <arch/x86/x86_32/hypercall-x86_32.h>
+# include <arch/x86_32/hypercall-x86_32.h>
# define HYPERCALL1 _hypercall32_1
# define HYPERCALL2 _hypercall32_2
# define HYPERCALL3 _hypercall32_3
#include <xtf/extable.h>
-#include <arch/x86/traps.h>
+#include <arch/traps.h>
/**
* Function pointer to allow tests to install an unhandled exception hook.
*/
#include <xtf.h>
-#include <arch/x86/exinfo.h>
-#include <arch/x86/msr-index.h>
-#include <arch/x86/processor.h>
+#include <arch/exinfo.h>
+#include <arch/msr-index.h>
+#include <arch/processor.h>
const char test_title[] = "Guest CPUID Faulting support";
*/
#include <xtf.h>
-#include <arch/x86/decode.h>
-#include <arch/x86/exinfo.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/decode.h>
+#include <arch/exinfo.h>
+#include <arch/processor.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "FPU Exception Emulation";
*/
#include <xtf.h>
-#include <arch/x86/decode.h>
-#include <arch/x86/desc.h>
-#include <arch/x86/msr-index.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/decode.h>
+#include <arch/desc.h>
+#include <arch/msr-index.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "Invlpg tests";
-#include <arch/x86/processor.h>
-#include <arch/x86/segment.h>
+#include <arch/processor.h>
+#include <arch/segment.h>
#include <xtf/asm_macros.h>
ENTRY(exec_user_with_iopl) /* void (*fn)(void), unsigned int iopl */
*/
#include <xtf.h>
-#include <arch/x86/processor.h>
+#include <arch/processor.h>
const char test_title[] = "PV IOPL emulation";
*/
#include <xtf.h>
-#include <arch/x86/idt.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/segment.h>
+#include <arch/idt.h>
+#include <arch/processor.h>
+#include <arch/segment.h>
const char test_title[] = "XTF Selftests";
#include <xtf/extable.h>
#include <xtf/asm_macros.h>
#include <xen/arch-x86/xen.h>
-#include <arch/x86/processor.h>
+#include <arch/processor.h>
/* Macro to generate a single test function. */
/* See lowlevel.h for a description of nomenclature. */
*/
#include <xtf.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/processor.h>
+#include <arch/symbolic-const.h>
#include "lowlevel.h"
#include <xtf.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-170 PoC";
*/
#include <xtf.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/pagetable.h>
+#include <arch/processor.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-173 PoC";
*/
#include <xtf.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-182 PoC";
*/
#include <xtf.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-183 PoC";
*/
#include <xtf.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-185 PoC";
*/
#include <xtf.h>
-#include <arch/x86/idt.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/idt.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-186 PoC";
* @see tests/xsa-188/main.c
*/
#include <xtf.h>
-#include <arch/x86/mm.h>
+#include <arch/mm.h>
const char test_title[] = "XSA-188 PoC";
*/
#include <xtf.h>
-#include <arch/x86/decode.h>
-#include <arch/x86/desc.h>
-#include <arch/x86/exinfo.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/decode.h>
+#include <arch/desc.h>
+#include <arch/exinfo.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-191 PoC";
*/
#include <xtf.h>
-#include <arch/x86/desc.h>
-#include <arch/x86/exinfo.h>
-#include <arch/x86/idt.h>
+#include <arch/desc.h>
+#include <arch/exinfo.h>
+#include <arch/idt.h>
const char test_title[] = "XSA-192 PoC";
*/
#include <xtf.h>
-#include <arch/x86/msr-index.h>
+#include <arch/msr-index.h>
const char test_title[] = "XSA-193 PoC";
#include <xtf.h>
#include <xen/memory.h>
-#include <arch/x86/pagetable.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-195 PoC";
*/
#include <xtf.h>
-#include <arch/x86/exinfo.h>
-#include <arch/x86/idt.h>
-#include <arch/x86/processor.h>
-#include <arch/x86/symbolic-const.h>
+#include <arch/exinfo.h>
+#include <arch/idt.h>
+#include <arch/processor.h>
+#include <arch/symbolic-const.h>
const char test_title[] = "XSA-196 PoC";
*/
#include <xtf.h>
-#include <arch/x86/exinfo.h>
+#include <arch/exinfo.h>
const char test_title[] = "XSA-200 PoC";