stack_top = stack_bottom & ~(STACK_SIZE - 1);
struct tss_struct *tss = &this_cpu(init_tss);
- struct desc_struct *gdt =
+ seg_desc_t *gdt =
this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY;
- struct desc_struct *compat_gdt =
+ seg_desc_t *compat_gdt =
this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY;
const struct desc_ptr gdtr = {
struct vcpu *p = per_cpu(curr_vcpu, cpu);
struct vcpu *n = current;
struct domain *pd = p->domain, *nd = n->domain;
- struct desc_struct *gdt;
+ seg_desc_t *gdt;
struct desc_ptr gdt_desc;
ASSERT(p != n);
enum x86_segment seg, uint16_t sel, unsigned int cpl, unsigned int eflags)
{
struct segment_register desctab, segr;
- struct desc_struct *pdesc = NULL, desc;
+ seg_desc_t *pdesc = NULL, desc;
u8 dpl, rpl;
bool_t writable;
int fault_type = TRAP_invalid_tss;
struct vcpu *v = current;
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct segment_register gdt, tr, prev_tr, segr;
- struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
+ seg_desc_t *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
bool_t otd_writable, ntd_writable;
unsigned int eflags, new_cpl;
pagefault_info_t pfinfo;
else
{
/* Keep GDT in sync. */
- struct desc_struct *desc = this_cpu(gdt_table) + LDT_ENTRY -
- FIRST_RESERVED_GDT_ENTRY;
+ seg_desc_t *desc =
+ this_cpu(gdt_table) + LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY;
_set_tssldt_desc(desc, ldt_base, ldt_ents * 8 - 1, SYS_DESC_ldt);
static int alloc_segdesc_page(struct page_info *page)
{
const struct domain *owner = page_get_owner(page);
- struct desc_struct *descs = __map_domain_page(page);
+ seg_desc_t *descs = __map_domain_page(page);
unsigned i;
for ( i = 0; i < 512; i++ )
unsigned long gmfn = pa >> PAGE_SHIFT;
unsigned long mfn;
unsigned int offset;
- struct desc_struct *gdt_pent, d;
+ seg_desc_t *gdt_pent, d;
struct page_info *page;
long ret = -EINVAL;
- offset = ((unsigned int)pa & ~PAGE_MASK) / sizeof(struct desc_struct);
+ offset = ((unsigned int)pa & ~PAGE_MASK) / sizeof(seg_desc_t);
*(uint64_t *)&d = desc;
page = get_page_from_gfn(currd, gmfn, NULL, P2M_ALLOC);
- if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
+ if ( (((unsigned int)pa % sizeof(seg_desc_t)) != 0) ||
!page ||
!check_descriptor(currd, &d) )
{
unsigned long *off,
unsigned int *ar)
{
- struct desc_struct desc;
- const struct desc_struct *pdesc = gdt_ldt_desc_ptr(gate_sel);
+ seg_desc_t desc;
+ const seg_desc_t *pdesc = gdt_ldt_desc_ptr(gate_sel);
if ( (gate_sel < 4) ||
((gate_sel >= FIRST_RESERVED_GDT_BYTE) && !(gate_sel & 4)) ||
unsigned long *base, unsigned long *limit,
unsigned int *ar, bool insn_fetch)
{
- struct desc_struct desc;
+ seg_desc_t desc;
if ( sel < 4)
desc.b = desc.a = 0;
}
/* Return a pointer to the GDT/LDT descriptor referenced by sel. */
-static inline const struct desc_struct *gdt_ldt_desc_ptr(unsigned int sel)
+static inline const seg_desc_t *gdt_ldt_desc_ptr(unsigned int sel)
{
const struct vcpu *curr = current;
- const struct desc_struct *tbl = (void *)
+ const seg_desc_t *tbl = (void *)
((sel & X86_XEC_TI) ? LDT_VIRT_START(curr) : GDT_VIRT_START(curr));
return &tbl[sel >> 3];
{
unsigned int i, order, memflags = 0;
nodeid_t node = cpu_to_node(cpu);
- struct desc_struct *gdt;
+ seg_desc_t *gdt;
unsigned long stub_page;
int rc = -ENOMEM;
DEFINE_PER_CPU(uint64_t, efer);
static DEFINE_PER_CPU(unsigned long, last_extable_addr);
-DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table);
-DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table);
+DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, gdt_table);
+DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, compat_gdt_table);
/* Master table, used by CPU0. */
idt_entry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
/* Returns TRUE if given descriptor is valid for GDT or LDT. */
-int check_descriptor(const struct domain *dom, struct desc_struct *d)
+int check_descriptor(const struct domain *dom, seg_desc_t *d)
{
u32 a = d->a, b = d->b;
u16 cs;
#define SYS_DESC_irq_gate 14
#define SYS_DESC_trap_gate 15
-struct desc_struct {
- u32 a, b;
-};
+typedef struct {
+ uint32_t a, b;
+} seg_desc_t;
typedef union {
struct {
unsigned long base;
};
-extern struct desc_struct boot_cpu_gdt_table[];
-DECLARE_PER_CPU(struct desc_struct *, gdt_table);
-extern struct desc_struct boot_cpu_compat_gdt_table[];
-DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);
+extern seg_desc_t boot_cpu_gdt_table[];
+DECLARE_PER_CPU(seg_desc_t *, gdt_table);
+extern seg_desc_t boot_cpu_compat_gdt_table[];
+DECLARE_PER_CPU(seg_desc_t *, compat_gdt_table);
extern void load_TR(void);
static inline void load_LDT(struct vcpu *v)
{
- struct desc_struct *desc;
+ seg_desc_t *desc;
unsigned long ents;
if ( (ents = v->arch.pv.ldt_ents) == 0 )
ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
ASSERT(page_get_owner(_p) == (_d))
-int check_descriptor(const struct domain *, struct desc_struct *d);
+int check_descriptor(const struct domain *d, seg_desc_t *desc);
extern paddr_t mem_hotplug;