]> xenbits.xensource.com Git - people/ssmith/netchannel2-pvops.git/commitdiff
sched: avoid flexible array member inside struct (gcc extension)
authorRusty Russell <rusty@rustcorp.com.au>
Wed, 13 May 2009 06:49:12 +0000 (16:19 +0930)
committerIngo Molnar <mingo@elte.hu>
Wed, 13 May 2009 13:17:05 +0000 (15:17 +0200)
struct sched_group and struct sched_domain end in 'unsigned long
cpumask[]' which Jeff Garzik notes is not legal C to place inside
another struct.  It upsets sparse and clang (LLVM's C front end).

Al Viro pointed out that a union is the Right Way to do this.

[ Impact: use more standard C code ]

Reported-by: Jeff Garzik <jeff@garzik.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Jeff Garzik <jeff@garzik.org>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <200905131619.12880.rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 26efa475bdc143f6e4459067c18ce57e71608764..d1ef62cd3f5d2bcd9d9b853cb1d24850b6bc6590 100644 (file)
@@ -7756,22 +7756,24 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
  * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
  * for nr_cpu_ids < CONFIG_NR_CPUS.
  */
-struct static_sched_group {
+union static_sched_group {
        struct sched_group sg;
-       DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
+       char _sg_and_cpus[sizeof(struct sched_group) +
+                         BITS_TO_LONGS(CONFIG_NR_CPUS) * sizeof(long)];
 };
 
-struct static_sched_domain {
+union static_sched_domain {
        struct sched_domain sd;
-       DECLARE_BITMAP(span, CONFIG_NR_CPUS);
+       char _sd_and_cpus[sizeof(struct sched_domain) +
+                         BITS_TO_LONGS(CONFIG_NR_CPUS) * sizeof(long)];
 };
 
 /*
  * SMT sched-domains:
  */
 #ifdef CONFIG_SCHED_SMT
-static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(union static_sched_domain, cpu_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_cpus);
 
 static int
 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
@@ -7787,8 +7789,8 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
  * multi-core sched-domains:
  */
 #ifdef CONFIG_SCHED_MC
-static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
+static DEFINE_PER_CPU(union static_sched_domain, core_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_core);
 #endif /* CONFIG_SCHED_MC */
 
 #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
@@ -7815,8 +7817,8 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
 }
 #endif
 
-static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
+static DEFINE_PER_CPU(union static_sched_domain, phys_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_phys);
 
 static int
 cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
@@ -7843,11 +7845,11 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
  * groups, so roll our own. Now each node has its own list of groups which
  * gets dynamically allocated.
  */
-static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
+static DEFINE_PER_CPU(union static_sched_domain, node_domains);
 static struct sched_group ***sched_group_nodes_bycpu;
 
-static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
+static DEFINE_PER_CPU(union static_sched_domain, allnodes_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_allnodes);
 
 static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
                                 struct sched_group **sg,