per_cpu(full_gdt_loaded, cpu) = false;
lgdt(&gdtr);
lidt(&idtr);
- ltr(TSS_ENTRY << 3);
+ ltr(TSS_SELECTOR);
lldt(0);
enable_each_ist(idt_tables[cpu]);
_set_tssldt_desc(desc, ldt_base, ldt_ents * 8 - 1, SYS_DESC_ldt);
- vmcb->ldtr.sel = LDT_ENTRY << 3;
+ vmcb->ldtr.sel = LDT_SELECTOR;
vmcb->ldtr.attr = SYS_DESC_ldt | (_SEGMENT_P >> 8);
vmcb->ldtr.limit = ldt_ents * 8 - 1;
vmcb->ldtr.base = ldt_base;
__vmwrite(HOST_GS_SELECTOR, 0);
__vmwrite(HOST_FS_BASE, 0);
__vmwrite(HOST_GS_BASE, 0);
- __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
+ __vmwrite(HOST_TR_SELECTOR, TSS_SELECTOR);
/* Host control registers. */
v->arch.hvm.vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
/* Switch to non-compat GDT (which has B bit clear) to execute LTR. */
asm volatile (
"sgdt %0; lgdt %2; ltr %w1; lgdt %0"
- : "=m" (old_gdt) : "rm" (TSS_ENTRY << 3), "m" (tss_gdt) : "memory" );
+ : "=m" (old_gdt) : "rm" (TSS_SELECTOR), "m" (tss_gdt) : "memory" );
}
static unsigned int calc_ler_msr(void)
console_force_unlock();
- asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_GDT_ENTRY << 3) );
+ asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_SELECTOR) );
/* Find information saved during fault and dump it to the console. */
printk("*** DOUBLE FAULT ***\n");
#define LDT_ENTRY (TSS_ENTRY + 2)
#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)
+#define TSS_SELECTOR (TSS_ENTRY << 3)
+#define LDT_SELECTOR (LDT_ENTRY << 3)
+#define PER_CPU_SELECTOR (PER_CPU_GDT_ENTRY << 3)
+
#ifndef __ASSEMBLY__
#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
desc = (!is_pv_32bit_vcpu(v) ? this_cpu(gdt) : this_cpu(compat_gdt))
+ LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY;
_set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt);
- lldt(LDT_ENTRY << 3);
+ lldt(LDT_SELECTOR);
}
}