error = acpi_parse_madt_ioapic_entries();
if (!error) {
acpi_ioapic = true;
-
smp_found_config = true;
- clustered_apic_check();
}
}
if (error == -EINVAL) {
outb(0x70, 0x22);
outb(0x01, 0x23);
}
+
+ printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
+ !INT_DEST_MODE ? "Physical"
+ : init_apic_ldr == init_apic_ldr_flat ? "Flat"
+ : "Clustered",
+ nr_ioapics);
enable_apic_mode();
}
apic_write(APIC_LDR, val);
}
-void __init clustered_apic_check_flat(void)
-{
- printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics);
-}
-
const cpumask_t *vector_allocation_cpumask_flat(int cpu)
{
return &cpu_online_map;
/* We only deliver in phys mode - no setup needed. */
}
-void __init clustered_apic_check_phys(void)
-{
- printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics);
-}
-
const cpumask_t *vector_allocation_cpumask_phys(int cpu)
{
return cpumask_of(cpu);
cpumask_set_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu));
}
-static void __init clustered_apic_check_x2apic(void)
-{
-}
-
static const cpumask_t *vector_allocation_cpumask_x2apic_cluster(int cpu)
{
return per_cpu(cluster_cpus, cpu);
.int_delivery_mode = dest_Fixed,
.int_dest_mode = 0 /* physical delivery */,
.init_apic_ldr = init_apic_ldr_phys,
- .clustered_apic_check = clustered_apic_check_x2apic,
.vector_allocation_cpumask = vector_allocation_cpumask_phys,
.cpu_mask_to_apicid = cpu_mask_to_apicid_phys,
.send_IPI_mask = send_IPI_mask_x2apic_phys,
.int_delivery_mode = dest_LowestPrio,
.int_dest_mode = 1 /* logical delivery */,
.init_apic_ldr = init_apic_ldr_x2apic_cluster,
- .clustered_apic_check = clustered_apic_check_x2apic,
.vector_allocation_cpumask = vector_allocation_cpumask_x2apic_cluster,
.cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster,
.send_IPI_mask = send_IPI_mask_x2apic_cluster,
}
}
}
- clustered_apic_check();
if (!num_processors)
printk(KERN_ERR "SMP mptable: no processors registered!\n");
return num_processors;
int int_delivery_mode;
int int_dest_mode;
void (*init_apic_ldr)(void);
- void (*clustered_apic_check)(void);
const cpumask_t *(*vector_allocation_cpumask)(int cpu);
unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
void (*send_IPI_mask)(const cpumask_t *mask, int vector);
void send_IPI_self_legacy(uint8_t vector);
void init_apic_ldr_flat(void);
-void clustered_apic_check_flat(void);
unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask);
void send_IPI_mask_flat(const cpumask_t *mask, int vector);
const cpumask_t *vector_allocation_cpumask_flat(int cpu);
.int_delivery_mode = dest_LowestPrio, \
.int_dest_mode = 1 /* logical delivery */, \
.init_apic_ldr = init_apic_ldr_flat, \
- .clustered_apic_check = clustered_apic_check_flat, \
.vector_allocation_cpumask = vector_allocation_cpumask_flat, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
.send_IPI_mask = send_IPI_mask_flat, \
.send_IPI_self = send_IPI_self_legacy
void init_apic_ldr_phys(void);
-void clustered_apic_check_phys(void);
unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask);
void send_IPI_mask_phys(const cpumask_t *mask, int vector);
const cpumask_t *vector_allocation_cpumask_phys(int cpu);
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
.init_apic_ldr = init_apic_ldr_phys, \
- .clustered_apic_check = clustered_apic_check_phys, \
.vector_allocation_cpumask = vector_allocation_cpumask_phys, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
.send_IPI_mask = send_IPI_mask_phys, \
#define INT_DEST_MODE (genapic.int_dest_mode)
#define TARGET_CPUS ((const typeof(cpu_online_map) *)&cpu_online_map)
#define init_apic_ldr (genapic.init_apic_ldr)
-#define clustered_apic_check (genapic.clustered_apic_check)
#define cpu_mask_to_apicid(mask) ({ \
/* \
* There are a number of places where the address of a local variable \