{
#ifdef CONFIG_PV32
/* The 32-on-64 hypercall vector is only accessible from ring 1. */
- _set_gate(idt_table + HYPERCALL_VECTOR,
+ _set_gate(bsp_idt + HYPERCALL_VECTOR,
SYS_DESC_irq_gate, 1, entry_int82);
#endif
/* Fast trap for int80 (faster than taking the #GP-fixup path). */
- _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_irq_gate, 3,
+ _set_gate(bsp_idt + LEGACY_SYSCALL_VECTOR, SYS_DESC_irq_gate, 3,
&entry_int80);
open_softirq(NMI_SOFTIRQ, nmi_softirq);
idt_tables[cpu] = alloc_xenheap_pages(0, memflags);
if ( idt_tables[cpu] == NULL )
goto out;
- memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES * sizeof(idt_entry_t));
+ memcpy(idt_tables[cpu], bsp_idt, sizeof(bsp_idt));
disable_each_ist(idt_tables[cpu]);
for ( stub_page = 0, i = cpu & ~(STUBS_PER_PAGE - 1);
DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, compat_gdt_l1e);
#endif
-/* Master table, used by CPU0. */
-idt_entry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
- idt_table[IDT_ENTRIES];
-
/* Pointer to the IDT of every CPU. */
idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
static void __init noinline __set_intr_gate(unsigned int n,
uint32_t dpl, void *addr)
{
- _set_gate(&idt_table[n], SYS_DESC_irq_gate, dpl, addr);
+ _set_gate(&bsp_idt[n], SYS_DESC_irq_gate, dpl, addr);
}
static void __init set_swint_gate(unsigned int n, void *addr)
set_intr_gate (X86_EXC_CP, entry_CP);
/* Specify dedicated interrupt stacks for NMI, #DF, and #MC. */
- enable_each_ist(idt_table);
+ enable_each_ist(bsp_idt);
/* CPU0 uses the master IDT. */
- idt_tables[0] = idt_table;
+ idt_tables[0] = bsp_idt;
this_cpu(gdt) = boot_gdt;
if ( IS_ENABLED(CONFIG_PV32) )
if ( autogen_entrypoints[vector] )
{
/* Found autogen entry: check we won't clobber an existing trap. */
- ASSERT(idt_table[vector].b == 0);
+ ASSERT(bsp_idt[vector].b == 0);
set_intr_gate(vector, autogen_entrypoints[vector]);
}
else
{
/* No entry point: confirm we have an existing trap in place. */
- ASSERT(idt_table[vector].b != 0);
+ ASSERT(bsp_idt[vector].b != 0);
}
}