]> xenbits.xensource.com Git - people/vhanquez/xen-unstable.git/commitdiff
x86: a little bit of genapic cleanup
authorKeir Fraser <keir@xen.org>
Fri, 24 Dec 2010 10:14:36 +0000 (10:14 +0000)
committerKeir Fraser <keir@xen.org>
Fri, 24 Dec 2010 10:14:36 +0000 (10:14 +0000)
Eliminate redundancy among the individual handler functions, and mark
init-only functions as such.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
xen/arch/x86/genapic/delivery.c
xen/arch/x86/genapic/x2apic.c
xen/include/asm-x86/genapic.h

index 54ab6d0c222c7804d4ed18090c1bae10323b66be..25fbf694c7317f4a379174153fe6583e0d1d628b 100644 (file)
@@ -7,6 +7,11 @@
 #include <mach_apic.h>
 
 
+const cpumask_t *target_cpus_all(void)
+{
+       return &cpu_online_map;
+}
+
 /*
  * LOGICAL FLAT DELIVERY MODE (multicast via bitmask to <= 8 logical APIC IDs).
  */
@@ -21,16 +26,11 @@ void init_apic_ldr_flat(void)
        apic_write_around(APIC_LDR, val);
 }
 
-void clustered_apic_check_flat(void)
+void __init clustered_apic_check_flat(void)
 {
        printk("Enabling APIC mode:  Flat.  Using %d I/O APICs\n", nr_ioapics);
 }
 
-const cpumask_t *target_cpus_flat(void)
-{
-       return &cpu_online_map;
-}
-
 const cpumask_t *vector_allocation_cpumask_flat(int cpu)
 {
        return &cpu_online_map;
@@ -54,16 +54,11 @@ void init_apic_ldr_phys(void)
        apic_write_around(APIC_LDR, val);
 }
 
-void clustered_apic_check_phys(void)
+void __init clustered_apic_check_phys(void)
 {
        printk("Enabling APIC mode:  Phys.  Using %d I/O APICs\n", nr_ioapics);
 }
 
-const cpumask_t *target_cpus_phys(void)
-{
-       return &cpu_online_map;
-}
-
 const cpumask_t *vector_allocation_cpumask_phys(int cpu)
 {
        return cpumask_of(cpu);
index 86d06ed97d9ca072d0897bc987ec03823c57fb3b..98afc66b1b8b7ea8255cfca210a0fd9b976fc7cc 100644 (file)
@@ -40,25 +40,10 @@ static void init_apic_ldr_x2apic_cluster(void)
     cpu_2_logical_apicid[cpu] = apic_read(APIC_LDR);
 }
 
-static void clustered_apic_check_x2apic(void)
+static void __init clustered_apic_check_x2apic(void)
 {
 }
 
-static const cpumask_t *target_cpus_x2apic(void)
-{
-    return &cpu_online_map;
-}
-
-static const cpumask_t *vector_allocation_cpumask_x2apic(int cpu)
-{
-    return cpumask_of(cpu);
-}
-
-static unsigned int cpu_mask_to_apicid_x2apic_phys(const cpumask_t *cpumask)
-{
-    return cpu_physical_id(cpumask_first(cpumask));
-}
-
 static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
 {
     return cpu_2_logical_apicid[cpumask_first(cpumask)];
@@ -114,9 +99,9 @@ static const struct genapic apic_x2apic_phys = {
     .int_dest_mode = 0 /* physical delivery */,
     .init_apic_ldr = init_apic_ldr_x2apic_phys,
     .clustered_apic_check = clustered_apic_check_x2apic,
-    .target_cpus = target_cpus_x2apic,
-    .vector_allocation_cpumask = vector_allocation_cpumask_x2apic,
-    .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_phys,
+    .target_cpus = target_cpus_all,
+    .vector_allocation_cpumask = vector_allocation_cpumask_phys,
+    .cpu_mask_to_apicid = cpu_mask_to_apicid_phys,
     .send_IPI_mask = send_IPI_mask_x2apic_phys,
     .send_IPI_self = send_IPI_self_x2apic
 };
@@ -127,8 +112,8 @@ static const struct genapic apic_x2apic_cluster = {
     .int_dest_mode = 1 /* logical delivery */,
     .init_apic_ldr = init_apic_ldr_x2apic_cluster,
     .clustered_apic_check = clustered_apic_check_x2apic,
-    .target_cpus = target_cpus_x2apic,
-    .vector_allocation_cpumask = vector_allocation_cpumask_x2apic,
+    .target_cpus = target_cpus_all,
+    .vector_allocation_cpumask = vector_allocation_cpumask_phys,
     .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster,
     .send_IPI_mask = send_IPI_mask_x2apic_cluster,
     .send_IPI_self = send_IPI_self_x2apic
index 81c86329b217b2b3d842ae7cf3cc7244c11e42ad..6a0fd7033ec669633a6f17d3bf5e6d5598803e56 100644 (file)
@@ -50,9 +50,10 @@ struct genapic {
 
 extern const struct genapic *genapic;
 
+const cpumask_t *target_cpus_all(void);
+
 void init_apic_ldr_flat(void);
 void clustered_apic_check_flat(void);
-const cpumask_t *target_cpus_flat(void);
 unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask);
 void send_IPI_mask_flat(const cpumask_t *mask, int vector);
 void send_IPI_self_flat(int vector);
@@ -62,7 +63,7 @@ const cpumask_t *vector_allocation_cpumask_flat(int cpu);
        .int_dest_mode = 1 /* logical delivery */, \
        .init_apic_ldr = init_apic_ldr_flat, \
        .clustered_apic_check = clustered_apic_check_flat, \
-       .target_cpus = target_cpus_flat, \
+       .target_cpus = target_cpus_all, \
        .vector_allocation_cpumask = vector_allocation_cpumask_flat, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
        .send_IPI_mask = send_IPI_mask_flat, \
@@ -70,7 +71,6 @@ const cpumask_t *vector_allocation_cpumask_flat(int cpu);
 
 void init_apic_ldr_phys(void);
 void clustered_apic_check_phys(void);
-const cpumask_t *target_cpus_phys(void);
 unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask);
 void send_IPI_mask_phys(const cpumask_t *mask, int vector);
 void send_IPI_self_phys(int vector);
@@ -80,7 +80,7 @@ const cpumask_t *vector_allocation_cpumask_phys(int cpu);
        .int_dest_mode = 0 /* physical delivery */, \
        .init_apic_ldr = init_apic_ldr_phys, \
        .clustered_apic_check = clustered_apic_check_phys, \
-       .target_cpus = target_cpus_phys, \
+       .target_cpus = target_cpus_all, \
        .vector_allocation_cpumask = vector_allocation_cpumask_phys, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
        .send_IPI_mask = send_IPI_mask_phys, \