/* Handle #DF with a task gate in 32bit, and IST 1 in 64bit. */
if ( IS_DEFINED(CONFIG_32BIT) )
{
- gdt[GDTE_TSS_DF] = GDTE(_u(&tss_DF), 0x67, 0x89);
+ pack_tss_desc(&gdt[GDTE_TSS_DF], &tss_DF);
pack_task_gate(&idt[X86_EXC_DF], GDTE_TSS_DF * 8);
}
else
lidt(&idt_ptr);
- gdt[GDTE_TSS] = GDTE(_u(&tss), 0x67, 0x89);
- barrier();
+ pack_tss_desc(&gdt[GDTE_TSS], &tss);
ltr(GDTE_TSS * 8);
/*
return limit;
}
+static inline void pack_tss_desc(user_desc *d, const env_tss *t)
+{
+ unsigned long base = (unsigned long)t;
+
+ d[0] = GDTE(base, sizeof(*t) - 1, 0x89);
+#ifdef __x86_64__
+ d[1] = (user_desc){{{ .lo = base >> 32, .hi = 0 }}};
+#endif
+ barrier(); /* Force desc update before ltr. */
+}
+
+static inline void pack_ldt_desc(user_desc *d, const user_desc *ldt,
+ unsigned int limit)
+{
+ unsigned long base = (unsigned long)ldt;
+
+ d[0] = GDTE(base, limit, 0x82);
+#ifdef __x86_64__
+ d[1] = (user_desc){{{ .lo = base >> 32, .hi = 0 }}};
+#endif
+ barrier(); /* Force desc update before lldt. */
+}
+
#endif /* XTF_X86_DESC_H */
/*
* Set up NMI handling to be a task gate.
*/
xtf_unhandled_exception_hook = unhandled_exception;
- update_desc(&gdt[GDTE_AVAIL0], GDTE(_u(&nmi_tss), 0x67, 0x89));
+ pack_tss_desc(&gdt[GDTE_AVAIL0], &nmi_tss);
pack_task_gate(&idt[X86_EXC_NMI], GDTE_AVAIL0 * 8);
/*
user_desc ldt[1] = { gdt[__KERN_DS >> 3] };
- update_desc(&gdt[GDTE_AVAIL0], GDTE(_u(ldt), sizeof(ldt) - 1, 0x82));
+ pack_ldt_desc(&gdt[GDTE_AVAIL0], ldt, sizeof(ldt) - 1);
lldt(GDTE_AVAIL0 << 3);
lldt(0);
xtf_set_idte(X86_VEC_AVAIL, &idte);
/* Create the vm86 TSS descriptor. */
- update_desc(&gdt[GDTE_AVAIL0], GDTE(_u(&vm86_tss), 0x67, 0x89));
+ pack_tss_desc(&gdt[GDTE_AVAIL0], &vm86_tss);
/* Copy a stub to somewhere vm86 can actually reach. */
uint8_t insn_buf[] = { 0xcd, X86_VEC_AVAIL }; /* `int $X86_VEC_AVAIL` */
*/
if ( vendor_is_amd )
{
- update_desc(&gdt[GDTE_AVAIL1], GDTE(0, 0, 0x82));
+ pack_ldt_desc(&gdt[GDTE_AVAIL1], 0, 0);
lldt(GDTE_AVAIL1 << 3);
}
lldt(0);