#include <mach_apic.h>
+const cpumask_t *target_cpus_all(void)
+{
+ return &cpu_online_map;
+}
+
/*
* LOGICAL FLAT DELIVERY MODE (multicast via bitmask to <= 8 logical APIC IDs).
*/
apic_write_around(APIC_LDR, val);
}
-void clustered_apic_check_flat(void)
+void __init clustered_apic_check_flat(void)
{
printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics);
}
-const cpumask_t *target_cpus_flat(void)
-{
- return &cpu_online_map;
-}
-
const cpumask_t *vector_allocation_cpumask_flat(int cpu)
{
return &cpu_online_map;
apic_write_around(APIC_LDR, val);
}
-void clustered_apic_check_phys(void)
+void __init clustered_apic_check_phys(void)
{
printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics);
}
-const cpumask_t *target_cpus_phys(void)
-{
- return &cpu_online_map;
-}
-
const cpumask_t *vector_allocation_cpumask_phys(int cpu)
{
return cpumask_of(cpu);
cpu_2_logical_apicid[cpu] = apic_read(APIC_LDR);
}
-static void clustered_apic_check_x2apic(void)
+static void __init clustered_apic_check_x2apic(void)
{
}
-static const cpumask_t *target_cpus_x2apic(void)
-{
- return &cpu_online_map;
-}
-
-static const cpumask_t *vector_allocation_cpumask_x2apic(int cpu)
-{
- return cpumask_of(cpu);
-}
-
-static unsigned int cpu_mask_to_apicid_x2apic_phys(const cpumask_t *cpumask)
-{
- return cpu_physical_id(cpumask_first(cpumask));
-}
-
static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
{
return cpu_2_logical_apicid[cpumask_first(cpumask)];
.int_dest_mode = 0 /* physical delivery */,
.init_apic_ldr = init_apic_ldr_x2apic_phys,
.clustered_apic_check = clustered_apic_check_x2apic,
- .target_cpus = target_cpus_x2apic,
- .vector_allocation_cpumask = vector_allocation_cpumask_x2apic,
- .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_phys,
+ .target_cpus = target_cpus_all,
+ .vector_allocation_cpumask = vector_allocation_cpumask_phys,
+ .cpu_mask_to_apicid = cpu_mask_to_apicid_phys,
.send_IPI_mask = send_IPI_mask_x2apic_phys,
.send_IPI_self = send_IPI_self_x2apic
};
.int_dest_mode = 1 /* logical delivery */,
.init_apic_ldr = init_apic_ldr_x2apic_cluster,
.clustered_apic_check = clustered_apic_check_x2apic,
- .target_cpus = target_cpus_x2apic,
- .vector_allocation_cpumask = vector_allocation_cpumask_x2apic,
+ .target_cpus = target_cpus_all,
+ .vector_allocation_cpumask = vector_allocation_cpumask_phys,
.cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster,
.send_IPI_mask = send_IPI_mask_x2apic_cluster,
.send_IPI_self = send_IPI_self_x2apic
extern const struct genapic *genapic;
+const cpumask_t *target_cpus_all(void);
+
void init_apic_ldr_flat(void);
void clustered_apic_check_flat(void);
-const cpumask_t *target_cpus_flat(void);
unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask);
void send_IPI_mask_flat(const cpumask_t *mask, int vector);
void send_IPI_self_flat(int vector);
.int_dest_mode = 1 /* logical delivery */, \
.init_apic_ldr = init_apic_ldr_flat, \
.clustered_apic_check = clustered_apic_check_flat, \
- .target_cpus = target_cpus_flat, \
+ .target_cpus = target_cpus_all, \
.vector_allocation_cpumask = vector_allocation_cpumask_flat, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
.send_IPI_mask = send_IPI_mask_flat, \
void init_apic_ldr_phys(void);
void clustered_apic_check_phys(void);
-const cpumask_t *target_cpus_phys(void);
unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask);
void send_IPI_mask_phys(const cpumask_t *mask, int vector);
void send_IPI_self_phys(int vector);
.int_dest_mode = 0 /* physical delivery */, \
.init_apic_ldr = init_apic_ldr_phys, \
.clustered_apic_check = clustered_apic_check_phys, \
- .target_cpus = target_cpus_phys, \
+ .target_cpus = target_cpus_all, \
.vector_allocation_cpumask = vector_allocation_cpumask_phys, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
.send_IPI_mask = send_IPI_mask_phys, \