static unsigned int cleared_caps[NCAPINTS];
static unsigned int forced_caps[NCAPINTS];
+DEFINE_PER_CPU(bool, full_gdt_loaded);
+
void __init setup_clear_cpu_cap(unsigned int cap)
{
const uint32_t *dfs;
offsetof(struct tss_struct, __cacheline_filler) - 1,
SYS_DESC_tss_busy);
+ per_cpu(full_gdt_loaded, cpu) = false;
lgdt(&gdtr);
lidt(&idtr);
ltr(TSS_ENTRY << 3);
: per_cpu(compat_gdt_table_l1e, cpu));
}
-static void load_full_gdt(const struct vcpu *v)
+static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
{
struct desc_ptr gdt_desc = {
.limit = LAST_RESERVED_GDT_BYTE,
};
lgdt(&gdt_desc);
+
+ per_cpu(full_gdt_loaded, cpu) = true;
}
static void load_default_gdt(unsigned int cpu)
};
lgdt(&gdt_desc);
+
+ per_cpu(full_gdt_loaded, cpu) = false;
}
static void __context_switch(void)
if ( need_full_gdt(nd) )
update_xen_slot_in_full_gdt(n, cpu);
- if ( need_full_gdt(pd) &&
+ if ( per_cpu(full_gdt_loaded, cpu) &&
((p->vcpu_id != n->vcpu_id) || !need_full_gdt(nd)) )
load_default_gdt(cpu);
svm_load_segs(0, 0, 0, 0, 0, 0, 0);
#endif
- if ( need_full_gdt(nd) &&
- ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(pd)) )
- load_full_gdt(n);
+ if ( need_full_gdt(nd) && !per_cpu(full_gdt_loaded, cpu) )
+ load_full_gdt(n, cpu);
if ( pd != nd )
cpumask_clear_cpu(cpu, pd->dirty_cpumask);
extern seg_desc_t boot_cpu_compat_gdt_table[];
DECLARE_PER_CPU(seg_desc_t *, compat_gdt_table);
DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_table_l1e);
+DECLARE_PER_CPU(bool, full_gdt_loaded);
extern void load_TR(void);