if ( IS_DEFINED(CONFIG_32BIT) )
{
gdt[GDTE_TSS_DF] = GDTE(_u(&tss_DF), 0x67, 0x89);
+ barrier();
pack_task_gate(&idt[X86_EXC_DF], GDTE_TSS_DF * 8);
}
lidt(&idt_ptr);
gdt[GDTE_TSS] = GDTE(_u(&tss), 0x67, 0x89);
+ barrier();
ltr(GDTE_TSS * 8);
/*
extern char _end[];
+/*** Misc helpers which are library code, but really want to be inline. ***/
+
+/**
+ * Helper to update a live LDT/GDT entry.
+ */
+static inline void update_desc(user_desc *ptr, const user_desc new)
+{
+ if ( IS_DEFINED(CONFIG_HVM) )
+ {
+ *ptr = new;
+
+ /*
+ * Prevent the compiler reordering later operations which refer to the
+ * descriptor which has been updated.
+ */
+ barrier();
+ }
+ else
+ {
+ int rc = hypercall_update_descriptor(virt_to_maddr(ptr), new);
+ if ( rc )
+ panic("Update descriptor failed: %d\n", rc);
+ }
+}
+
#endif /* XTF_X86_XTF_H */
/*
const struct tlb_refill_fs_test *t = &tlb_refill_fs_tests[i];
printk(" Test: %%fs %s\n", t->desc);
- gdt[GDTE_AVAIL0] = t->seg;
+ update_desc(&gdt[GDTE_AVAIL0], t->seg);
write_fs(GDTE_AVAIL0 << 3);
run_tlb_refill_test(invlpg_fs_refill, t->mapping);
}
invlpg_fs_checked(0);
printk(" Test: Past segment limit\n");
- gdt[GDTE_AVAIL0] = GDTE_SYM(0, 1, COMMON, DATA, DPL0, B, W);
+ update_desc(&gdt[GDTE_AVAIL0], GDTE_SYM(0, 1, COMMON, DATA, DPL0, B, W));
write_fs(GDTE_AVAIL0 << 3);
invlpg_fs_checked(0x2000);
printk(" Test: Before expand-down segment limit\n");
- gdt[GDTE_AVAIL0] = GDTE_SYM(0, 1, COMMON, DATA, DPL0, B, W, E);
+ update_desc(&gdt[GDTE_AVAIL0], GDTE_SYM(0, 1, COMMON, DATA, DPL0, B, W, E));
write_fs(GDTE_AVAIL0 << 3);
invlpg_fs_checked(0);
/* For 32bit, use segments with a limit of 2GB. */
if ( IS_DEFINED(CONFIG_32BIT) )
{
- user_desc code = GDTE_SYM(0, 0x7ffff, COMMON, CODE, DPL3, R, D);
- user_desc data = GDTE_SYM(0, 0x7ffff, COMMON, DATA, DPL3, B, W);
-
- if ( IS_DEFINED(CONFIG_HVM) )
- {
- gdt[GDTE_AVAIL0] = code;
- gdt[GDTE_AVAIL1] = data;
- }
- else
- {
- int rc = hypercall_update_descriptor(virt_to_maddr(
- &gdt[GDTE_AVAIL0]), code);
-
- if ( !rc )
- rc = hypercall_update_descriptor(virt_to_maddr(
- &gdt[GDTE_AVAIL1]), data);
-
- if ( rc )
- return xtf_error("Error: Update descriptor failed: %d\n", rc);
- }
-
+ /* Code selector in AVAIL0 */
+ update_desc(&gdt[GDTE_AVAIL0],
+ GDTE_SYM(0, 0x7ffff, COMMON, CODE, DPL3, R, D));
exec_user_cs = GDTE_AVAIL0 << 3 | 3;
+
+ /* Data selector in AVAIL1 */
+ update_desc(&gdt[GDTE_AVAIL1],
+ GDTE_SYM(0, 0x7ffff, COMMON, DATA, DPL3, B, W));
exec_user_ss = GDTE_AVAIL1 << 3 | 3;
}
* Set up NMI handling to be a task gate.
*/
xtf_unhandled_exception_hook = unhandled_exception;
- gdt[GDTE_AVAIL0] = GDTE(_u(&nmi_tss), 0x67, 0x89);
+ update_desc(&gdt[GDTE_AVAIL0], GDTE(_u(&nmi_tss), 0x67, 0x89));
pack_task_gate(&idt[X86_EXC_NMI], GDTE_AVAIL0 * 8);
- barrier();
/*
* Send an NMI from supervisor mode, checking that we task switch back to
* to execute the code with. The stub still runs with 32bit data
* segments, which is perfectly valid.
*/
- gdt[GDTE_AVAIL0] = GDTE_SYM(0, 0xfffff, COMMON, CODE, DPL0, R);
+ update_desc(&gdt[GDTE_AVAIL0],
+ GDTE_SYM(0, 0xfffff, COMMON, CODE, DPL0, R));
asm volatile ("push $%c[cs16];"
"push $1f;"
user_desc ldt[1] = { gdt[__KERN_DS >> 3] };
- gdt[GDTE_AVAIL0] = GDTE(_u(ldt), sizeof(ldt) - 1, 0x82);
- barrier();
+ update_desc(&gdt[GDTE_AVAIL0], GDTE(_u(ldt), sizeof(ldt) - 1, 0x82));
lldt(GDTE_AVAIL0 << 3);
lldt(0);
xtf_set_idte(X86_VEC_AVAIL, &idte);
/* Create the vm86 TSS descriptor. */
- gdt[GDTE_AVAIL0] = GDTE(_u(&vm86_tss), 0x67, 0x89);
+ update_desc(&gdt[GDTE_AVAIL0], GDTE(_u(&vm86_tss), 0x67, 0x89));
/* Copy a stub to somewhere vm86 can actually reach. */
uint8_t insn_buf[] = { 0xcd, X86_VEC_AVAIL }; /* `int $X86_VEC_AVAIL` */
*/
if ( vendor_is_amd )
{
- gdt[GDTE_AVAIL1] = GDTE(0, 0, 0x82);
+ update_desc(&gdt[GDTE_AVAIL1], GDTE(0, 0, 0x82));
lldt(GDTE_AVAIL1 << 3);
}
lldt(0);