ia64/xen-unstable

changeset 12266:32e4952c0638

[XEN] Optimize credit scheduler load balancing logic
When looking for remote work, only look at or grab a remove CPU's
lock when this CPU is not idling.
Signed-off-by: Emmanuel Ackaouy <ack@xensource.com>
author Emmanuel Ackaouy <ack@xensource.com>
date Mon Nov 06 16:55:56 2006 +0000 (2006-11-06)
parents 8eb8c0085604
children bb6cd7ba259b
files xen/common/sched_credit.c
line diff
     1.1 --- a/xen/common/sched_credit.c	Mon Nov 06 16:36:51 2006 +0000
     1.2 +++ b/xen/common/sched_credit.c	Mon Nov 06 16:55:56 2006 +0000
     1.3 @@ -955,8 +955,10 @@ csched_runq_steal(struct csched_pcpu *sp
     1.4  static struct csched_vcpu *
     1.5  csched_load_balance(int cpu, struct csched_vcpu *snext)
     1.6  {
     1.7 +    struct csched_vcpu *speer;
     1.8      struct csched_pcpu *spc;
     1.9 -    struct csched_vcpu *speer;
    1.10 +    struct vcpu *peer_vcpu;
    1.11 +    cpumask_t workers;
    1.12      int peer_cpu;
    1.13  
    1.14      if ( snext->pri == CSCHED_PRI_IDLE )
    1.15 @@ -966,15 +968,23 @@ csched_load_balance(int cpu, struct csch
    1.16      else
    1.17          CSCHED_STAT_CRANK(load_balance_other);
    1.18  
    1.19 +    /*
    1.20 +     * Peek at non-idling CPUs in the system
    1.21 +     */
    1.22 +    cpus_andnot(workers, cpu_online_map, csched_priv.idlers);
    1.23 +    cpu_clear(cpu, workers);
    1.24 +
    1.25      peer_cpu = cpu;
    1.26      BUG_ON( peer_cpu != snext->vcpu->processor );
    1.27  
    1.28 -    while ( 1 )
    1.29 +    while ( !cpus_empty(workers) )
    1.30      {
    1.31 -        /* For each PCPU in the system starting with our neighbour... */
    1.32 -        peer_cpu = (peer_cpu + 1) % csched_priv.ncpus;
    1.33 -        if ( peer_cpu == cpu )
    1.34 -            break;
    1.35 +        /* For each CPU of interest, starting with our neighbour... */
    1.36 +        peer_cpu = next_cpu(peer_cpu, workers);
    1.37 +        if ( peer_cpu == NR_CPUS )
    1.38 +            peer_cpu = first_cpu(workers);
    1.39 +
    1.40 +        cpu_clear(peer_cpu, workers);
    1.41  
    1.42          /*
    1.43           * Get ahold of the scheduler lock for this peer CPU.
    1.44 @@ -990,13 +1000,19 @@ csched_load_balance(int cpu, struct csch
    1.45          }
    1.46  
    1.47          spc = CSCHED_PCPU(peer_cpu);
    1.48 +        peer_vcpu = per_cpu(schedule_data, peer_cpu).curr;
    1.49 +
    1.50          if ( unlikely(spc == NULL) )
    1.51          {
    1.52              CSCHED_STAT_CRANK(steal_peer_down);
    1.53              speer = NULL;
    1.54          }
    1.55 -        else if ( is_idle_vcpu(per_cpu(schedule_data, peer_cpu).curr) )
    1.56 +        else if ( unlikely(is_idle_vcpu(peer_vcpu)) )
    1.57          {
    1.58 +            /*
    1.59 +             * Don't steal from an idle CPU's runq because it's about to
    1.60 +             * pick up work from it itself.
    1.61 +             */
    1.62              CSCHED_STAT_CRANK(steal_peer_idle);
    1.63              speer = NULL;
    1.64          }