ia64/xen-unstable

changeset 11480:65b33e64b642

[XEN] Do not steal work from idle CPUs. This can happen
if a idle CPU is in the process of waking up.
This fix suggested by Anthony Xu <anthony.xu@intel.com> as
it can have a significant boost to HVM performance.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Sep 14 16:01:46 2006 +0100 (2006-09-14)
parents 11645dda144c
children e87e5d216c0b
files xen/common/sched_credit.c
line diff
     1.1 --- a/xen/common/sched_credit.c	Thu Sep 14 08:19:41 2006 +0100
     1.2 +++ b/xen/common/sched_credit.c	Thu Sep 14 16:01:46 2006 +0100
     1.3 @@ -987,36 +987,38 @@ csched_load_balance(int cpu, struct csch
     1.4           * cause a deadlock if the peer CPU is also load balancing and trying
     1.5           * to lock this CPU.
     1.6           */
     1.7 -        if ( spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) )
     1.8 +        if ( !spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) )
     1.9          {
    1.10 +            CSCHED_STAT_CRANK(steal_trylock_failed);
    1.11 +            continue;
    1.12 +        }
    1.13  
    1.14 -            spc = CSCHED_PCPU(peer_cpu);
    1.15 -            if ( unlikely(spc == NULL) )
    1.16 -            {
    1.17 -                CSCHED_STAT_CRANK(steal_peer_down);
    1.18 -                speer = NULL;
    1.19 -            }
    1.20 -            else
    1.21 -            {
    1.22 -                speer = csched_runq_steal(spc, cpu, snext->pri);
    1.23 -            }
    1.24 -
    1.25 -            spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
    1.26 -
    1.27 -            /* Got one! */
    1.28 -            if ( speer )
    1.29 -            {
    1.30 -                CSCHED_STAT_CRANK(vcpu_migrate);
    1.31 -                return speer;
    1.32 -            }
    1.33 +        spc = CSCHED_PCPU(peer_cpu);
    1.34 +        if ( unlikely(spc == NULL) )
    1.35 +        {
    1.36 +            CSCHED_STAT_CRANK(steal_peer_down);
    1.37 +            speer = NULL;
    1.38 +        }
    1.39 +        else if ( is_idle_vcpu(per_cpu(schedule_data, peer_cpu).curr) )
    1.40 +        {
    1.41 +            speer = NULL;
    1.42          }
    1.43          else
    1.44          {
    1.45 -            CSCHED_STAT_CRANK(steal_trylock_failed);
    1.46 +            /* Try to steal work from an online non-idle CPU. */
    1.47 +            speer = csched_runq_steal(spc, cpu, snext->pri);
    1.48 +        }
    1.49 +
    1.50 +        spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
    1.51 +
    1.52 +        /* Got one? */
    1.53 +        if ( speer )
    1.54 +        {
    1.55 +            CSCHED_STAT_CRANK(vcpu_migrate);
    1.56 +            return speer;
    1.57          }
    1.58      }
    1.59  
    1.60 -
    1.61      /* Failed to find more important work */
    1.62      __runq_remove(snext);
    1.63      return snext;