ia64/xen-unstable

changeset 18526:c0db74e41662

Fix misc issues related to allowing support of more CPUs

This mainly means removing stack variables that (should) depend on
NR_CPUS (other than cpumask_t ones) and adjusting certain array sizes.

There's at least one open tools issue: The 'xm vcpu-pin' path assumes
a maximum of 64 CPU-s in many places.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Sep 22 14:37:31 2008 +0100 (2008-09-22)
parents ae24b533dc9d
children 40233384ffe3
files xen/arch/x86/nmi.c xen/arch/x86/smpboot.c xen/arch/x86/x86_32/domain_page.c xen/common/domctl.c xen/common/sched_credit.c xen/common/sched_sedf.c
line diff
     1.1 --- a/xen/arch/x86/nmi.c	Mon Sep 22 14:04:27 2008 +0100
     1.2 +++ b/xen/arch/x86/nmi.c	Mon Sep 22 14:37:31 2008 +0100
     1.3 @@ -96,7 +96,7 @@ int nmi_active;
     1.4  
     1.5  int __init check_nmi_watchdog (void)
     1.6  {
     1.7 -    unsigned int prev_nmi_count[NR_CPUS];
     1.8 +    static unsigned int __initdata prev_nmi_count[NR_CPUS];
     1.9      int cpu;
    1.10      
    1.11      if ( !nmi_watchdog )
     2.1 --- a/xen/arch/x86/smpboot.c	Mon Sep 22 14:04:27 2008 +0100
     2.2 +++ b/xen/arch/x86/smpboot.c	Mon Sep 22 14:37:31 2008 +0100
     2.3 @@ -1121,7 +1121,7 @@ static void __init smp_boot_cpus(unsigne
     2.4  	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
     2.5  
     2.6  	kicked = 1;
     2.7 -	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
     2.8 +	for (bit = 0; kicked < NR_CPUS && bit < NR_CPUS; bit++) {
     2.9  		apicid = cpu_present_to_apicid(bit);
    2.10  		/*
    2.11  		 * Don't even attempt to start the boot CPU!
     3.1 --- a/xen/arch/x86/x86_32/domain_page.c	Mon Sep 22 14:04:27 2008 +0100
     3.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Mon Sep 22 14:37:31 2008 +0100
     3.3 @@ -201,6 +201,9 @@ void *map_domain_page_global(unsigned lo
     3.4  
     3.5      ASSERT(!in_irq() && local_irq_is_enabled());
     3.6  
     3.7 +    /* At least half the ioremap space should be available to us. */
     3.8 +    BUILD_BUG_ON(IOREMAP_VIRT_START + (IOREMAP_MBYTES << 19) >= FIXADDR_START);
     3.9 +
    3.10      spin_lock(&globalmap_lock);
    3.11  
    3.12      idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
     4.1 --- a/xen/common/domctl.c	Mon Sep 22 14:04:27 2008 +0100
     4.2 +++ b/xen/common/domctl.c	Mon Sep 22 14:37:31 2008 +0100
     4.3 @@ -145,16 +145,23 @@ static unsigned int default_vcpu0_locati
     4.4  {
     4.5      struct domain *d;
     4.6      struct vcpu   *v;
     4.7 -    unsigned int   i, cpu, cnt[NR_CPUS] = { 0 };
     4.8 +    unsigned int   i, cpu, nr_cpus, *cnt;
     4.9      cpumask_t      cpu_exclude_map;
    4.10  
    4.11      /* Do an initial CPU placement. Pick the least-populated CPU. */
    4.12 -    rcu_read_lock(&domlist_read_lock);
    4.13 -    for_each_domain ( d )
    4.14 -        for_each_vcpu ( d, v )
    4.15 -        if ( !test_bit(_VPF_down, &v->pause_flags) )
    4.16 -            cnt[v->processor]++;
    4.17 -    rcu_read_unlock(&domlist_read_lock);
    4.18 +    nr_cpus = last_cpu(cpu_possible_map) + 1;
    4.19 +    cnt = xmalloc_array(unsigned int, nr_cpus);
    4.20 +    if ( cnt )
    4.21 +    {
    4.22 +        memset(cnt, 0, nr_cpus * sizeof(*cnt));
    4.23 +
    4.24 +        rcu_read_lock(&domlist_read_lock);
    4.25 +        for_each_domain ( d )
    4.26 +            for_each_vcpu ( d, v )
    4.27 +                if ( !test_bit(_VPF_down, &v->pause_flags) )
    4.28 +                    cnt[v->processor]++;
    4.29 +        rcu_read_unlock(&domlist_read_lock);
    4.30 +    }
    4.31  
    4.32      /*
    4.33       * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
    4.34 @@ -172,10 +179,12 @@ static unsigned int default_vcpu0_locati
    4.35               (cpus_weight(cpu_sibling_map[i]) > 1) )
    4.36              continue;
    4.37          cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
    4.38 -        if ( cnt[i] <= cnt[cpu] )
    4.39 +        if ( !cnt || cnt[i] <= cnt[cpu] )
    4.40              cpu = i;
    4.41      }
    4.42  
    4.43 +    xfree(cnt);
    4.44 +
    4.45      return cpu;
    4.46  }
    4.47  
     5.1 --- a/xen/common/sched_credit.c	Mon Sep 22 14:04:27 2008 +0100
     5.2 +++ b/xen/common/sched_credit.c	Mon Sep 22 14:37:31 2008 +0100
     5.3 @@ -1258,14 +1258,15 @@ csched_dump_pcpu(int cpu)
     5.4      struct csched_pcpu *spc;
     5.5      struct csched_vcpu *svc;
     5.6      int loop;
     5.7 +    char cpustr[100];
     5.8  
     5.9      spc = CSCHED_PCPU(cpu);
    5.10      runq = &spc->runq;
    5.11  
    5.12 -    printk(" sort=%d, sibling=0x%lx, core=0x%lx\n",
    5.13 -            spc->runq_sort_last,
    5.14 -            cpu_sibling_map[cpu].bits[0],
    5.15 -            cpu_core_map[cpu].bits[0]);
    5.16 +    cpumask_scnprintf(cpustr, sizeof(cpustr), cpu_sibling_map[cpu]);
    5.17 +    printk(" sort=%d, sibling=%s, ", spc->runq_sort_last, cpustr);
    5.18 +    cpumask_scnprintf(cpustr, sizeof(cpustr), cpu_core_map[cpu]);
    5.19 +    printk("core=%s\n", cpustr);
    5.20  
    5.21      /* current VCPU */
    5.22      svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
    5.23 @@ -1292,6 +1293,7 @@ csched_dump(void)
    5.24  {
    5.25      struct list_head *iter_sdom, *iter_svc;
    5.26      int loop;
    5.27 +    char idlers_buf[100];
    5.28  
    5.29      printk("info:\n"
    5.30             "\tncpus              = %u\n"
    5.31 @@ -1317,7 +1319,8 @@ csched_dump(void)
    5.32             CSCHED_TICKS_PER_TSLICE,
    5.33             CSCHED_TICKS_PER_ACCT);
    5.34  
    5.35 -    printk("idlers: 0x%lx\n", csched_priv.idlers.bits[0]);
    5.36 +    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), csched_priv.idlers);
    5.37 +    printk("idlers: %s\n", idlers_buf);
    5.38  
    5.39      CSCHED_STATS_PRINTK();
    5.40  
     6.1 --- a/xen/common/sched_sedf.c	Mon Sep 22 14:04:27 2008 +0100
     6.2 +++ b/xen/common/sched_sedf.c	Mon Sep 22 14:37:31 2008 +0100
     6.3 @@ -1298,8 +1298,18 @@ static int sedf_adjust_weights(struct xe
     6.4  {
     6.5      struct vcpu *p;
     6.6      struct domain      *d;
     6.7 -    int                 sumw[NR_CPUS] = { 0 };
     6.8 -    s_time_t            sumt[NR_CPUS] = { 0 };
     6.9 +    unsigned int        nr_cpus = last_cpu(cpu_possible_map) + 1;
    6.10 +    int                *sumw = xmalloc_array(int, nr_cpus);
    6.11 +    s_time_t           *sumt = xmalloc_array(s_time_t, nr_cpus);
    6.12 +
    6.13 +    if ( !sumw || !sumt )
    6.14 +    {
    6.15 +        xfree(sumt);
    6.16 +        xfree(sumw);
    6.17 +        return -ENOMEM;
    6.18 +    }
    6.19 +    memset(sumw, 0, nr_cpus * sizeof(*sumw));
    6.20 +    memset(sumt, 0, nr_cpus * sizeof(*sumt));
    6.21  
    6.22      /* Sum across all weights. */
    6.23      rcu_read_lock(&domlist_read_lock);
    6.24 @@ -1348,6 +1358,9 @@ static int sedf_adjust_weights(struct xe
    6.25      }
    6.26      rcu_read_unlock(&domlist_read_lock);
    6.27  
    6.28 +    xfree(sumt);
    6.29 +    xfree(sumw);
    6.30 +
    6.31      return 0;
    6.32  }
    6.33  
    6.34 @@ -1356,6 +1369,7 @@ static int sedf_adjust_weights(struct xe
    6.35  static int sedf_adjust(struct domain *p, struct xen_domctl_scheduler_op *op)
    6.36  {
    6.37      struct vcpu *v;
    6.38 +    int rc;
    6.39  
    6.40      PRINT(2,"sedf_adjust was called, domain-id %i new period %"PRIu64" "
    6.41            "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
    6.42 @@ -1411,8 +1425,9 @@ static int sedf_adjust(struct domain *p,
    6.43              }
    6.44          }
    6.45  
    6.46 -        if ( sedf_adjust_weights(op) )
    6.47 -            return -EINVAL;
    6.48 +        rc = sedf_adjust_weights(op);
    6.49 +        if ( rc )
    6.50 +            return rc;
    6.51  
    6.52          for_each_vcpu ( p, v )
    6.53          {