+#include <xtf/numbers.h>
+
#include <arch/desc.h>
#include <arch/segment.h>
#include <arch/symbolic-const.h>
desc_ptr gdt_ptr =
{
.limit = sizeof(gdt) - 1,
- .base = (unsigned long)&gdt,
+ .base = _u(&gdt),
};
#if defined(CONFIG_HVM)
desc_ptr idt_ptr =
{
.limit = sizeof(idt) - 1,
- .base = (unsigned long)&idt,
+ .base = _u(&idt),
};
#endif
{
#if defined(__i386__)
- .esp0 = (unsigned long)&boot_stack[2 * PAGE_SIZE],
+ .esp0 = _u(&boot_stack[2 * PAGE_SIZE]),
.ss0 = __KERN_DS,
#elif defined(__x86_64__)
- .rsp0 = (unsigned long)&boot_stack[2 * PAGE_SIZE],
- .ist[0] = (unsigned long)&boot_stack[3 * PAGE_SIZE],
+ .rsp0 = _u(&boot_stack[2 * PAGE_SIZE]),
+ .ist[0] = _u(&boot_stack[3 * PAGE_SIZE]),
#endif
#if defined(__i386__)
static hw_tss tss_DF __aligned(16) =
{
- .esp = (unsigned long)&boot_stack[3 * PAGE_SIZE],
+ .esp = _u(&boot_stack[3 * PAGE_SIZE]),
.ss = __KERN_DS,
.ds = __KERN_DS,
.es = __KERN_DS,
.fs = __KERN_DS,
.gs = __KERN_DS,
- .eip = (unsigned long)&entry_DF,
+ .eip = _u(entry_DF),
.cs = __KERN_CS,
- .cr3 = (unsigned long)&cr3_target,
+ .cr3 = _u(cr3_target),
.iopb = X86_TSS_INVALID_IO_BITMAP,
};
static void setup_gate(unsigned int entry, void *addr, unsigned int dpl)
{
#if defined(__i386__)
- pack_gate32(&idt[entry], 14, (unsigned long)addr, dpl, __KERN_CS);
+ pack_gate32(&idt[entry], 14, _u(addr), dpl, __KERN_CS);
#elif defined(__x86_64__)
- pack_gate64(&idt[entry], 14, (unsigned long)addr, dpl, 0, __KERN_CS);
+ pack_gate64(&idt[entry], 14, _u(addr), dpl, 0, __KERN_CS);
#endif
}
static void setup_doublefault(void)
{
#if defined(__i386__)
- gdt[GDTE_TSS_DF] =
- (typeof(*gdt))INIT_GDTE((unsigned long)&tss_DF, 0x67, 0x89);
+ gdt[GDTE_TSS_DF] = (typeof(*gdt))INIT_GDTE(_u(&tss_DF), 0x67, 0x89);
pack_gate32(&idt[X86_EXC_DF], 5, 0, 0, GDTE_TSS_DF * 8);
#elif defined(__x86_64__)
- pack_gate64(&idt[X86_EXC_DF], 14, (unsigned long)entry_DF, 0, 1, __KERN_CS);
+ pack_gate64(&idt[X86_EXC_DF], 14, _u(entry_DF), 0, 1, __KERN_CS);
#endif
}
lidt(&idt_ptr);
- gdt[GDTE_TSS] = (typeof(*gdt))INIT_GDTE((unsigned long)&tss, 0x67, 0x89);
+ gdt[GDTE_TSS] = (typeof(*gdt))INIT_GDTE(_u(&tss), 0x67, 0x89);
ltr(GDTE_TSS * 8);
/*
for ( gfn = virt_to_gfn(__start_user_text); gfn < end; ++gfn )
l1_identmap[gfn] |= _PAGE_USER;
- write_cr3((unsigned long)&cr3_target);
+ write_cr3(_u(cr3_target));
}
}
static inline unsigned long virt_to_pfn(const void *va)
{
- return ((unsigned long)va) >> PAGE_SHIFT;
+ return _u(va) >> PAGE_SHIFT;
}
#if defined(CONFIG_PV)
struct xen_trap_info pv_default_trap_info[] =
{
- { X86_EXC_DE, 0|4, __KERN_CS, (unsigned long)&entry_DE },
- { X86_EXC_DB, 0|4, __KERN_CS, (unsigned long)&entry_DB },
- { X86_EXC_NMI, 0|4, __KERN_CS, (unsigned long)&entry_NMI },
- { X86_EXC_BP, 3|4, __KERN_CS, (unsigned long)&entry_BP },
- { X86_EXC_OF, 3|4, __KERN_CS, (unsigned long)&entry_OF },
- { X86_EXC_BR, 0|4, __KERN_CS, (unsigned long)&entry_BR },
- { X86_EXC_UD, 0|4, __KERN_CS, (unsigned long)&entry_UD },
- { X86_EXC_NM, 0|4, __KERN_CS, (unsigned long)&entry_NM },
- { X86_EXC_DF, 0|4, __KERN_CS, (unsigned long)&entry_DF },
- { X86_EXC_TS, 0|4, __KERN_CS, (unsigned long)&entry_TS },
- { X86_EXC_NP, 0|4, __KERN_CS, (unsigned long)&entry_NP },
- { X86_EXC_SS, 0|4, __KERN_CS, (unsigned long)&entry_SS },
- { X86_EXC_GP, 0|4, __KERN_CS, (unsigned long)&entry_GP },
- { X86_EXC_PF, 0|4, __KERN_CS, (unsigned long)&entry_PF },
- { X86_EXC_MF, 0|4, __KERN_CS, (unsigned long)&entry_MF },
- { X86_EXC_AC, 0|4, __KERN_CS, (unsigned long)&entry_AC },
- { X86_EXC_MC, 0|4, __KERN_CS, (unsigned long)&entry_MC },
- { X86_EXC_XM, 0|4, __KERN_CS, (unsigned long)&entry_XM },
- { X86_EXC_VE, 0|4, __KERN_CS, (unsigned long)&entry_VE },
-
- { X86_VEC_RET2KERN, 3|4, __KERN_CS, (unsigned long)&entry_ret_to_kernel },
+ { X86_EXC_DE, 0|4, __KERN_CS, _u(entry_DE) },
+ { X86_EXC_DB, 0|4, __KERN_CS, _u(entry_DB) },
+ { X86_EXC_NMI, 0|4, __KERN_CS, _u(entry_NMI) },
+ { X86_EXC_BP, 3|4, __KERN_CS, _u(entry_BP) },
+ { X86_EXC_OF, 3|4, __KERN_CS, _u(entry_OF) },
+ { X86_EXC_BR, 0|4, __KERN_CS, _u(entry_BR) },
+ { X86_EXC_UD, 0|4, __KERN_CS, _u(entry_UD) },
+ { X86_EXC_NM, 0|4, __KERN_CS, _u(entry_NM) },
+ { X86_EXC_DF, 0|4, __KERN_CS, _u(entry_DF) },
+ { X86_EXC_TS, 0|4, __KERN_CS, _u(entry_TS) },
+ { X86_EXC_NP, 0|4, __KERN_CS, _u(entry_NP) },
+ { X86_EXC_SS, 0|4, __KERN_CS, _u(entry_SS) },
+ { X86_EXC_GP, 0|4, __KERN_CS, _u(entry_GP) },
+ { X86_EXC_PF, 0|4, __KERN_CS, _u(entry_PF) },
+ { X86_EXC_MF, 0|4, __KERN_CS, _u(entry_MF) },
+ { X86_EXC_AC, 0|4, __KERN_CS, _u(entry_AC) },
+ { X86_EXC_MC, 0|4, __KERN_CS, _u(entry_MC) },
+ { X86_EXC_XM, 0|4, __KERN_CS, _u(entry_XM) },
+ { X86_EXC_VE, 0|4, __KERN_CS, _u(entry_VE) },
+
+ { X86_VEC_RET2KERN, 3|4, __KERN_CS, _u(entry_ret_to_kernel) },
{ 0, 0, 0, 0 }, /* Sentinel. */
};
panic("Unable to remap user_stack with _PAGE_USER\n");
extern const char __start_user_text[], __end_user_text[];
- unsigned long va = (unsigned long)__start_user_text;
- while ( va < (unsigned long)__end_user_text )
+ unsigned long va = _u(__start_user_text);
+
+ while ( va < _u(__end_user_text) )
{
nl1e = pte_from_virt(_p(va), PF_SYM(AD, U, RW, P));
panic("Unable to locate Xen CPUID leaves\n");
cpuid(base + 2, &eax, &ebx, &ecx, &edx);
- wrmsr(ebx, (unsigned long)&hypercall_page);
+ wrmsr(ebx, _u(hypercall_page));
barrier();
}
if ( (regs->cs & 3) > (cs & 3) )
return regs->_sp;
- return (unsigned long)regs + offsetof(struct cpu_regs, _sp);
+ return _u(regs) + offsetof(struct cpu_regs, _sp);
#endif
}
*/
#define _p(v) ((void*)(unsigned long)(v))
+/**
+ * Express an arbitrary value @p v as unsigned long.
+ */
+#define _u(v) ((unsigned long)(v))
+
#endif /* !__ASSEMBLY__ */
#endif /* XTF_NUMBERS_H */
printk("Testing 'invlpg' in normally-faulting conditions\n");
printk(" Test: Mapped address\n");
- invlpg_checked((unsigned long)&test_main);
+ invlpg_checked(_u(test_main));
printk(" Test: Unmapped address\n");
invlpg_checked(0);
return xtf_skip("Skip: MSR_DEBUGCTL.LBR not available\n");
/* Construct a function pointer to int3_stub() via its upper alias. */
- void (*int3_stub_alias)(void) =
- _p((unsigned long)&int3_stub | 0xffff800000000000);
+ void (*int3_stub_alias)(void) = _p(_u(int3_stub) | 0xffff800000000000);
int3_stub_alias();
/* Check that one entry has now been logged. */
if ( !check_nr_entries(1) ||
- !check_exlog_entry(0, __KERN_CS,
- (unsigned long)&label_test_exlog_int3,
- X86_EXC_BP, 0) )
+ !check_exlog_entry(0, __KERN_CS, _u(label_test_exlog_int3), X86_EXC_BP, 0) )
goto out;
asm volatile ("label_test_exlog_ud2a: ud2a; 1:"
/* Check that two entries have now been logged. */
if ( !check_nr_entries(2) ||
- !check_exlog_entry(1, __KERN_CS,
- (unsigned long)&label_test_exlog_ud2a,
- X86_EXC_UD, 0) )
+ !check_exlog_entry(1, __KERN_CS, _u(label_test_exlog_ud2a), X86_EXC_UD, 0) )
goto out;
xtf_exlog_reset();
: "=q" (tmp) :: "memory");
if ( check_nr_entries(1) )
- check_exlog_entry(0, __KERN_CS,
- (unsigned long)&label_test_NULL_unmapped,
- X86_EXC_PF, 0);
+ check_exlog_entry(0, __KERN_CS, _u(label_test_NULL_unmapped), X86_EXC_PF, 0);
xtf_exlog_stop();
}
return false;
}
- regs->ip = (unsigned long)hook_fixup;
+ regs->ip = _u(hook_fixup);
return true;
}
{
struct xtf_idte idte =
{
- .addr = (unsigned long)test_idte_handler,
+ .addr = _u(test_idte_handler),
/* PV guests need DPL1, HVM need DPL0. */
.dpl = IS_DEFINED(CONFIG_PV) ? 1 : 0,
.cs = __KERN_CS,
{
struct xtf_idte idte =
{
- .addr = (unsigned long)test_int_handler,
+ .addr = _u(test_int_handler),
.cs = __KERN_CS,
};
user_desc ldt[1] = { gdt[__KERN_DS >> 3] };
- gdt[GDTE_AVAIL0] =
- (typeof(*gdt))INIT_GDTE((unsigned long)ldt, sizeof(ldt) - 1, 0x82);
+ gdt[GDTE_AVAIL0] = (typeof(*gdt))INIT_GDTE(_u(ldt), sizeof(ldt) - 1, 0x82);
barrier();
lldt(GDTE_AVAIL0 << 3);
.cs = 0,
.ss = 0,
- .esp0 = (unsigned long)&boot_stack[2 * PAGE_SIZE],
+ .esp0 = _u(&boot_stack[2 * PAGE_SIZE]),
.ss0 = __KERN_DS,
.ldtr = 0,
{
struct xtf_idte idte =
{
- .addr = (unsigned long)ret_from_vm86,
+ .addr = _u(ret_from_vm86),
.cs = __KERN_CS,
.dpl = 3,
};
xtf_set_idte(X86_VEC_AVAIL, &idte);
/* Create the vm86 TSS descriptor. */
- gdt[GDTE_AVAIL0] =
- (user_desc)INIT_GDTE((unsigned long)&vm86_tss, 0x67, 0x89);
+ gdt[GDTE_AVAIL0] = (user_desc)INIT_GDTE(_u(&vm86_tss), 0x67, 0x89);
/* Copy a stub to somewhere vm86 can actually reach. */
uint8_t insn_buf[] = { 0xcd, X86_VEC_AVAIL }; /* `int $X86_VEC_AVAIL` */
void test_main(void)
{
bool leak_detected = false;
- uint32_t *size = _p(ROUNDUP((unsigned long)&_end[0],
- sizeof(unsigned long)));
+ uint32_t *size = _p(ROUNDUP(_u(_end), sizeof(unsigned long)));
Elf32_Ehdr *ehdr = _p(size) + 4;
if ( !(ehdr->e_ident[EI_MAG0] == ELFMAG0 &&
* A vulnerable Xen mis-calculates the memory adjustment, meaning that it
* will attempt to read from some other address.
*/
- unsigned long va = (unsigned long)mem - mem_adjust;
+ unsigned long va = _u(mem) - mem_adjust;
/*
* Make all of the virtual address space readable, so Xen's data fetch
struct xtf_idte idte =
{
- .addr = (unsigned long)custom_doublefault_handler,
+ .addr = _u(custom_doublefault_handler),
.cs = __KERN_CS,
.dpl = 0,
};
/* Set up the MSRs. */
wrmsr(MSR_STAR, ((((uint64_t)GDTE_AVAIL0 * 8 + 0) << 32) |
(((uint64_t)GDTE_AVAIL2 * 8 + 3) << 48)));
- wrmsr(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+ wrmsr(MSR_LSTAR, _u(entry_SYSCALL_64));
wrmsr(MSR_FMASK, X86_EFLAGS_TF);
xtf_exlog_start();