* support using ARRAY_SIZE against per-cpu variables.
*/
struct tss_page *tss_page = &this_cpu(tss_page);
+ idt_entry_t *idt = this_cpu(idt);
/* The TSS may be live. Disuade any clever optimisations. */
volatile struct tss64 *tss = &tss_page->tss;
.limit = LAST_RESERVED_GDT_BYTE,
};
const struct desc_ptr idtr = {
- .base = (unsigned long)idt_tables[cpu],
+ .base = (unsigned long)idt,
.limit = sizeof(bsp_idt) - 1,
};
ltr(TSS_SELECTOR);
lldt(0);
- enable_each_ist(idt_tables[cpu]);
+ enable_each_ist(idt);
/*
* Bottom-of-stack must be 16-byte aligned!
* This update is safe from a security point of view, as this
* pcpu is never going to try to sysret back to a PV vcpu.
*/
- set_ist(&idt_tables[cpu][X86_EXC_MC], IST_NONE);
+ set_ist(&per_cpu(idt, cpu)[X86_EXC_MC], IST_NONE);
kexec_crash_save_cpu();
__stop_this_cpu();
{
unsigned long msecs;
unsigned int cpu = smp_processor_id();
+ idt_entry_t *idt = this_cpu(idt);
disable_lapic_nmi_watchdog();
local_irq_disable();
* Disable IST for MCEs to avoid stack corruption race conditions, and
* change the NMI handler to a nop to avoid deviation from this codepath.
*/
- _set_gate_lower(&idt_tables[cpu][X86_EXC_NMI],
- SYS_DESC_irq_gate, 0, &trap_nop);
- set_ist(&idt_tables[cpu][X86_EXC_MC], IST_NONE);
+ _set_gate_lower(&idt[X86_EXC_NMI], SYS_DESC_irq_gate, 0, &trap_nop);
+ set_ist(&idt[X86_EXC_MC], IST_NONE);
set_nmi_callback(do_nmi_crash);
smp_send_nmi_allbutself();
local_irq_disable();
/* Change the NMI handler to a nop (see comment below). */
- _set_gate_lower(&idt_tables[cpu][X86_EXC_NMI], SYS_DESC_irq_gate, 0,
+ _set_gate_lower(&this_cpu(idt)[X86_EXC_NMI], SYS_DESC_irq_gate, 0,
&trap_nop);
/*
svm_vmload_pa(per_cpu(host_vmcb, cpu));
/* Resume use of ISTs now that the host TR is reinstated. */
- enable_each_ist(idt_tables[cpu]);
+ enable_each_ist(per_cpu(idt, cpu));
/*
* Possibly clear previous guest selection of SSBD if set. Note that
* Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR.
* But this doesn't matter: the IST is only req'd to handle SYSCALL/SYSRET.
*/
- disable_each_ist(idt_tables[cpu]);
+ disable_each_ist(per_cpu(idt, cpu));
svm_restore_dr(v);
__vmwrite(HOST_GDTR_BASE,
(unsigned long)(this_cpu(gdt) - FIRST_RESERVED_GDT_ENTRY));
- __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
+ __vmwrite(HOST_IDTR_BASE, (unsigned long)per_cpu(idt, cpu));
__vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(tss_page, cpu).tss);
#define X86_ASM_IDT_H
#include <xen/bug.h>
+#include <xen/percpu.h>
#include <asm/x86-defns.h>
} idt_entry_t;
extern idt_entry_t bsp_idt[X86_IDT_VECTORS];
-extern idt_entry_t *idt_tables[];
+DECLARE_PER_CPU(idt_entry_t *, idt);
/*
* Set the Interrupt Stack Table used by a particular IDT entry. Typically
*/
for ( i = 0; i < nr_cpu_ids; i++ )
{
- if ( idt_tables[i] == NULL )
+ idt_entry_t *idt = per_cpu(idt, i);
+
+ if ( !idt )
continue;
- _update_gate_addr_lower(&idt_tables[i][X86_EXC_MC], &trap_nop);
+
+ _update_gate_addr_lower(&idt[X86_EXC_MC], &trap_nop);
}
/* Reset CPUID masking and faulting to the host's default. */
rc = clone_mapping(__va(__pa(stack_base[cpu])) + off, rpt);
if ( !rc )
- rc = clone_mapping(idt_tables[cpu], rpt);
+ rc = clone_mapping(per_cpu(idt, cpu), rpt);
if ( !rc )
{
struct tss_page *ptr = &per_cpu(tss_page, cpu);
if ( remove )
{
FREE_XENHEAP_PAGE(per_cpu(gdt, cpu));
- FREE_XENHEAP_PAGE(idt_tables[cpu]);
+ FREE_XENHEAP_PAGE(per_cpu(idt, cpu));
if ( stack_base[cpu] )
{
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
#endif
- if ( idt_tables[cpu] == NULL )
- idt_tables[cpu] = alloc_xenheap_pages(0, memflags);
- if ( idt_tables[cpu] == NULL )
+ if ( per_cpu(idt, cpu) == NULL )
+ per_cpu(idt, cpu) = alloc_xenheap_pages(0, memflags);
+ if ( per_cpu(idt, cpu) == NULL )
goto out;
- memcpy(idt_tables[cpu], bsp_idt, sizeof(bsp_idt));
- disable_each_ist(idt_tables[cpu]);
+ memcpy(per_cpu(idt, cpu), bsp_idt, sizeof(bsp_idt));
+ disable_each_ist(per_cpu(idt, cpu));
for ( stub_page = 0, i = cpu & ~(STUBS_PER_PAGE - 1);
i < nr_cpu_ids && i <= (cpu | (STUBS_PER_PAGE - 1)); ++i )
idt_entry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
bsp_idt[X86_IDT_VECTORS];
+
+DEFINE_PER_CPU_READ_MOSTLY(idt_entry_t *, idt);
DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, compat_gdt_l1e);
#endif
-/* Pointer to the IDT of every CPU. */
-idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
-
/*
* The TSS is smaller than a page, but we give it a full page to avoid
* adjacent per-cpu data leaking via Meltdown when XPTI is in use.
enable_each_ist(bsp_idt);
/* CPU0 uses the master IDT. */
- idt_tables[0] = bsp_idt;
+ this_cpu(idt) = bsp_idt;
this_cpu(gdt) = boot_gdt;
if ( IS_ENABLED(CONFIG_PV32) )