*/
int apic_verbosity;
+int x2apic_enabled __read_mostly = 0;
+
static void apic_pm_activate(void);
*/
reg0 = apic_read(APIC_LVR);
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
- apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
+
+ /* We don't try writing LVR in x2APIC mode since that incurs #GP. */
+ if ( !x2apic_enabled )
+ apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
reg1 = apic_read(APIC_LVR);
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
- apic_pm_state.apic_dfr = apic_read(APIC_DFR);
+ if ( !x2apic_enabled )
+ apic_pm_state.apic_dfr = apic_read(APIC_DFR);
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
* FIXME! This will be wrong if we ever support suspend on
* SMP! We'll need to do this as part of the CPU restore!
*/
- rdmsr(MSR_IA32_APICBASE, l, h);
- l &= ~MSR_IA32_APICBASE_BASE;
- l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
- wrmsr(MSR_IA32_APICBASE, l, h);
+ if ( !x2apic_enabled )
+ {
+ rdmsr(MSR_IA32_APICBASE, l, h);
+ l &= ~MSR_IA32_APICBASE_BASE;
+ l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
+ wrmsr(MSR_IA32_APICBASE, l, h);
+ }
+ else
+ enable_x2apic();
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
- apic_write(APIC_DFR, apic_pm_state.apic_dfr);
+ if ( !x2apic_enabled )
+ apic_write(APIC_DFR, apic_pm_state.apic_dfr);
apic_write(APIC_LDR, apic_pm_state.apic_ldr);
apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
return -1;
}
+void enable_x2apic(void)
+{
+ u32 lo, hi;
+
+ rdmsr(MSR_IA32_APICBASE, lo, hi);
+ if ( !(lo & MSR_IA32_APICBASE_EXTD) )
+ {
+ lo |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD;
+ wrmsr(MSR_IA32_APICBASE, lo, 0);
+ printk("x2APIC mode enabled.\n");
+ }
+ else
+ printk("x2APIC mode enabled by BIOS.\n");
+
+ x2apic_enabled = 1;
+}
+
void __init init_apic_mappings(void)
{
unsigned long apic_phys;
+ if ( x2apic_enabled )
+ goto __next;
/*
* If no local APIC can be found then set up a fake all
* zeroes page to simulate the local APIC and another
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
apic_phys);
+__next:
/*
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
if (boot_cpu_physical_apicid == -1U)
- boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_physical_apicid = get_apic_id();
#ifdef CONFIG_X86_IO_APIC
{
* might be zero if read from MP tables. Get it from LAPIC.
*/
#ifdef CONFIG_CRASH_DUMP
- boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_physical_apicid = get_apic_id();
#endif
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
obj-y += bigsmp.o
+obj-y += x2apic.o
obj-y += default.o
obj-y += delivery.o
obj-y += probe.o
apic_write_around(APIC_DFR, APIC_DFR_FLAT);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+ val |= SET_xAPIC_LOGICAL_ID(1UL << smp_processor_id());
apic_write_around(APIC_LDR, val);
}
#include <asm/apicdef.h>
#include <asm/genapic.h>
+extern struct genapic apic_x2apic;
extern struct genapic apic_summit;
extern struct genapic apic_bigsmp;
extern struct genapic apic_default;
struct genapic *genapic;
struct genapic *apic_probe[] __initdata = {
+ &apic_x2apic,
&apic_summit,
&apic_bigsmp,
&apic_default, /* must be last */
--- /dev/null
+/*
+ * x2APIC driver.
+ *
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/cpumask.h>
+#include <asm/apicdef.h>
+#include <asm/genapic.h>
+#include <xen/smp.h>
+#include <asm/mach-default/mach_mpparse.h>
+
+__init int probe_x2apic(void)
+{
+ return x2apic_is_available();
+}
+
+struct genapic apic_x2apic= {
+ APIC_INIT("x2apic", probe_x2apic),
+ GENAPIC_X2APIC
+};
+
+void init_apic_ldr_x2apic(void)
+{
+ /* We only use physical delivery mode. */
+ return;
+}
+
+void clustered_apic_check_x2apic(void)
+{
+ /* We only use physical delivery mode. */
+ return;
+}
+
+cpumask_t target_cpus_x2apic(void)
+{
+ /* Deliver interrupts only to CPU0 for now */
+ return cpumask_of_cpu(0);
+}
+
+unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask)
+{
+ return cpu_physical_id(first_cpu(cpumask));
+}
+
+void send_IPI_mask_x2apic(cpumask_t cpumask, int vector)
+{
+ unsigned int query_cpu;
+ u32 cfg, dest;
+ unsigned long flags;
+
+ ASSERT(cpus_subset(cpumask, cpu_online_map));
+ ASSERT(!cpus_empty(cpumask));
+
+ local_irq_save(flags);
+
+ cfg = APIC_DM_FIXED | 0 /* no shorthand */ | APIC_DEST_PHYSICAL | vector;
+ for_each_cpu_mask(query_cpu, cpumask)
+ {
+ dest = cpu_physical_id(query_cpu);
+ apic_icr_write(cfg, dest);
+ }
+
+ local_irq_restore(flags);
+}
+
int result = 0;
uint8_t logical_id;
- logical_id = GET_APIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
+ logical_id = GET_xAPIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
switch ( vlapic_get_reg(vlapic, APIC_DFR) )
{
static int vlapic_ipi(
struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high)
{
- unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
+ unsigned int dest = GET_xAPIC_DEST_FIELD(icr_high);
unsigned int short_hand = icr_low & APIC_SHORT_MASK;
unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
unsigned int level = icr_low & APIC_INT_ASSERT;
entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
entry.dest.physical.physical_dest =
- GET_APIC_ID(apic_read(APIC_ID));
+ get_apic_id();
/*
* Add it to the IO-APIC irq-routing table:
void __init mp_register_lapic_address (
u64 address)
{
- mp_lapic_addr = (unsigned long) address;
+ if ( !x2apic_enabled )
+ {
+ mp_lapic_addr = (unsigned long) address;
- set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+ }
if (boot_cpu_physical_apicid == -1U)
- boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_physical_apicid = get_apic_id();
Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
}
*/
static void do_nmi_trigger(unsigned char key)
{
- u32 id = GET_APIC_ID(apic_read(APIC_ID));
+ u32 id = get_apic_id();
printk("Triggering NMI on APIC ID %x\n", id);
local_irq_disable();
apic_wait_icr_idle();
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(id));
- apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_PHYSICAL);
+ apic_icr_write(APIC_DM_NMI | APIC_DEST_PHYSICAL, id);
local_irq_enable();
}
generic_apic_probe();
+ if ( x2apic_is_available() )
+ enable_x2apic();
+
acpi_boot_init();
init_cpu_to_node();
local_irq_enable();
/* Ensure we are the boot CPU. */
- if ( GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid )
+ if ( get_apic_id() != boot_cpu_physical_apicid )
{
/* Send IPI to the boot CPU (logical cpu 0). */
on_selected_cpus(cpumask_of_cpu(0), (void *)machine_restart,
static inline int __prepare_ICR2 (unsigned int mask)
{
- return SET_APIC_DEST_FIELD(mask);
+ return SET_xAPIC_DEST_FIELD(mask);
}
void apic_wait_icr_idle(void)
{
+ if ( x2apic_enabled )
+ return;
+
while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
cpu_relax();
}
*/
wait_for_init_deassert(&init_deasserted);
+ if ( x2apic_is_available() )
+ enable_x2apic();
+
/*
* (This works even if the APIC is not enabled.)
*/
- phys_id = GET_APIC_ID(apic_read(APIC_ID));
+ phys_id = get_apic_id();
cpuid = smp_processor_id();
if (cpu_isset(cpuid, cpu_callin_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n",
static void map_cpu_to_logical_apicid(void)
{
int cpu = smp_processor_id();
- int apicid = hard_smp_processor_id();
+ int apicid = logical_smp_processor_id();
cpu_2_logical_apicid[cpu] = apicid;
}
*/
apic_wait_icr_idle();
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
- apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
+ apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
timeout = 0;
do {
#endif
#ifdef WAKE_SECONDARY_VIA_NMI
+
+static int logical_apicid_to_cpu(int logical_apicid)
+{
+ int i;
+
+ for ( i = 0; i < sizeof(cpu_2_logical_apicid); i++ )
+ if ( cpu_2_logical_apicid[i] == logical_apicid )
+ break;
+
+ if ( i == sizeof(cpu_2_logical_apicid) );
+ i = -1; /* not found */
+
+ return i;
+}
+
/*
* Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
{
unsigned long send_status = 0, accept_status = 0;
int timeout, maxlvt;
+ int dest_cpu;
+ u32 dest;
- /* Target chip */
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
+ dest_cpu = logical_apicid_to_cpu(logical_apicid);
+ BUG_ON(dest_cpu == -1);
+
+ dest = cpu_physical_id(dest_cpu);
/* Boot on the stack */
- /* Kick the second */
- apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
+ apic_icr_write(APIC_DM_NMI | APIC_DEST_PHYSICAL, dest_cpu);
Dprintk("Waiting for send to finish...\n");
timeout = 0;
do {
Dprintk("+");
udelay(100);
- send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ if ( !x2apic_enabled )
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ else
+ send_status = 0; /* We go out of the loop directly. */
} while (send_status && (timeout++ < 1000));
/*
Dprintk("Asserting INIT.\n");
/*
- * Turn INIT on target chip
+ * Turn INIT on target chip via IPI
*/
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
- /*
- * Send IPI
- */
- apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
- | APIC_DM_INIT);
+ apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
+ phys_apicid);
Dprintk("Waiting for send to finish...\n");
timeout = 0;
do {
Dprintk("+");
udelay(100);
- send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ if ( !x2apic_enabled )
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ else
+ send_status = 0; /* We go out of the loop dirctly. */
} while (send_status && (timeout++ < 1000));
mdelay(10);
Dprintk("Deasserting INIT.\n");
- /* Target chip */
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
- /* Send IPI */
- apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+ apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
Dprintk("Waiting for send to finish...\n");
timeout = 0;
do {
Dprintk("+");
udelay(100);
- send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ if ( !x2apic_enabled )
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ else
+ send_status = 0; /* We go out of the loop dirctly. */
} while (send_status && (timeout++ < 1000));
atomic_set(&init_deasserted, 1);
/*
* STARTUP IPI
+ * Boot on the stack
*/
-
- /* Target chip */
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
- /* Boot on the stack */
- /* Kick the second */
- apic_write_around(APIC_ICR, APIC_DM_STARTUP
- | (start_eip >> 12));
+ apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), phys_apicid);
/*
* Give the other CPU some time to accept the IPI.
do {
Dprintk("+");
udelay(100);
- send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ if ( !x2apic_enabled )
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ else
+ send_status = 0; /* We go out of the loop dirctly. */
} while (send_status && (timeout++ < 1000));
/*
printk("CPU%d: ", 0);
print_cpu_info(&cpu_data[0]);
- boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_physical_apicid = get_apic_id();
x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
stack_base[0] = stack_start.esp;
#define APIC_DEBUG 2
extern int apic_verbosity;
+extern int x2apic_enabled;
+
+extern void enable_x2apic(void);
+
+static __inline int x2apic_is_available(void)
+{
+ unsigned int op = 1, eax, ecx;
+
+ asm ( "cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+
+ return (ecx & (1U << 21));
+}
/*
* Define the default level of output to be very little
* Basic functions accessing APICs.
*/
-static __inline void apic_write(unsigned long reg, u32 v)
+static __inline void apic_mem_write(unsigned long reg, u32 v)
{
*((volatile u32 *)(APIC_BASE+reg)) = v;
}
-static __inline void apic_write_atomic(unsigned long reg, u32 v)
+static __inline void apic_mem_write_atomic(unsigned long reg, u32 v)
{
(void)xchg((volatile u32 *)(APIC_BASE+reg), v);
}
-static __inline u32 apic_read(unsigned long reg)
+static __inline u32 apic_mem_read(unsigned long reg)
{
return *((volatile u32 *)(APIC_BASE+reg));
}
+/* NOTE: in x2APIC mode, we should use apic_icr_write()/apic_icr_read() to
+ * access the 64-bit ICR register.
+ */
+
+static __inline void apic_wrmsr(unsigned long reg, u32 low, u32 high)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no outputs */
+ : "c" (APIC_MSR_BASE + (reg >> 4)), "a" (low), "d" (high));
+}
+
+static __inline void apic_rdmsr(unsigned long reg, u32 *low, u32 *high)
+{
+ __asm__ __volatile__("rdmsr"
+ : "=a" (*low), "=d" (*high)
+ : "c" (APIC_MSR_BASE + (reg >> 4)));
+}
+
+static __inline void apic_write(unsigned long reg, u32 v)
+{
+
+ if ( x2apic_enabled )
+ apic_wrmsr(reg, v, 0);
+ else
+ apic_mem_write(reg, v);
+}
+
+static __inline void apic_write_atomic(unsigned long reg, u32 v)
+{
+ if ( x2apic_enabled )
+ apic_wrmsr(reg, v, 0);
+ else
+ apic_mem_write_atomic(reg, v);
+}
+
+static __inline u32 apic_read(unsigned long reg)
+{
+ u32 lo, hi;
+
+ if ( x2apic_enabled )
+ apic_rdmsr(reg, &lo, &hi);
+ else
+ lo = apic_mem_read(reg);
+ return lo;
+}
+
+static __inline u64 apic_icr_read(void)
+{
+ u32 lo, hi;
+
+ if ( x2apic_enabled )
+ apic_rdmsr(APIC_ICR, &lo, &hi);
+ else
+ {
+ lo = apic_mem_read(APIC_ICR);
+ hi = apic_mem_read(APIC_ICR2);
+ }
+
+ return ((u64)lo) | (((u64)hi) << 32);
+}
+
+static __inline void apic_icr_write(u32 low, u32 dest)
+{
+ if ( x2apic_enabled )
+ apic_wrmsr(APIC_ICR, low, dest);
+ else
+ {
+ apic_mem_write(APIC_ICR2, dest << 24);
+ apic_mem_write(APIC_ICR, low);
+ }
+}
+
+static __inline u32 get_apic_id(void) /* Get the physical APIC id */
+{
+ u32 id = apic_read(APIC_ID);
+ return x2apic_enabled ? id : GET_xAPIC_ID(id);
+}
+
+static __inline u32 get_logical_apic_id(void)
+{
+ u32 logical_id = apic_read(APIC_LDR);
+ return x2apic_enabled ? logical_id : GET_xAPIC_LOGICAL_ID(logical_id);
+}
+
void apic_wait_icr_idle(void);
int get_physical_broadcast(void);
#define APIC_ID 0x20
#define APIC_ID_MASK (0xFFu<<24)
-#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
-#define SET_APIC_ID(x) (((x)<<24))
+#define GET_xAPIC_ID(x) (((x)>>24)&0xFFu)
+#define SET_xAPIC_ID(x) (((x)<<24))
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x)&0xFF)
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFF<<24)
-#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF)
-#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
+#define GET_xAPIC_LOGICAL_ID(x) (((x)>>24)&0xFF)
+#define SET_xAPIC_LOGICAL_ID(x) (((x)<<24))
#define APIC_ALL_CPUS 0xFF
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
-#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
-#define SET_APIC_DEST_FIELD(x) ((x)<<24)
+#define GET_xAPIC_DEST_FIELD(x) (((x)>>24)&0xFF)
+#define SET_xAPIC_DEST_FIELD(x) ((x)<<24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
+
+/* Only available in x2APIC mode */
+#define APIC_SELF_IPI 0x400
+
#define APIC_TDR_DIV_TMBASE (1<<2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+/* It's only used in x2APIC mode of an x2APIC unit. */
+#define APIC_MSR_BASE 0x800
+
#ifdef __i386__
#define MAX_IO_APICS 64
#else
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_SSE4_1 (4*32+19) /* Streaming SIMD Extensions 4.1 */
#define X86_FEATURE_SSE4_2 (4*32+20) /* Streaming SIMD Extensions 4.2 */
+#define X86_FEATURE_X2APIC (4*32+21) /* Extended xAPIC */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define cpu_has_ffxsr ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \
&& boot_cpu_has(X86_FEATURE_FFXSR))
+#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#endif /* __ASM_I386_CPUFEATURE_H */
/*
.cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
.send_IPI_mask = send_IPI_mask_flat
+void init_apic_ldr_x2apic(void);
+void clustered_apic_check_x2apic(void);
+cpumask_t target_cpus_x2apic(void);
+unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask);
+void send_IPI_mask_x2apic(cpumask_t mask, int vector);
+#define GENAPIC_X2APIC \
+ .int_delivery_mode = dest_Fixed, \
+ .int_dest_mode = 0 /* physical delivery */, \
+ .init_apic_ldr = init_apic_ldr_x2apic, \
+ .clustered_apic_check = clustered_apic_check_x2apic, \
+ .target_cpus = target_cpus_x2apic, \
+ .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic, \
+ .send_IPI_mask = send_IPI_mask_x2apic
+
void init_apic_ldr_phys(void);
void clustered_apic_check_phys(void);
cpumask_t target_cpus_phys(void);
#define vlapic_domain(vpic) (vlapic_vcpu(vlapic)->domain)
#define VLAPIC_ID(vlapic) \
- (GET_APIC_ID(vlapic_get_reg((vlapic), APIC_ID)))
+ (GET_xAPIC_ID(vlapic_get_reg((vlapic), APIC_ID)))
/*
* APIC can be disabled in two ways:
*/
static inline int apic_id_registered(void)
{
- return physid_isset(GET_APIC_ID(apic_read(APIC_ID)),
+ return physid_isset(get_apic_id(),
phys_cpu_present_map);
}
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_EXTD (1<<10)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
static inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
+ return get_apic_id();
}
static __inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(*(unsigned int *)(APIC_BASE+APIC_LDR));
+ return get_logical_apic_id();
}
#endif