ia64/xen-unstable

changeset 12024:1940ee13f9d6

Improve I/O performance when competing with CPU intensive workloads.
Allow non CPU consuming wake-to-run latency sensitive VCPUs to
preempt CPU consuming ones.

Signed-off-by: Emmanuel Ackaouy <ack@xensource.com>
author Emmanuel Ackaouy <ack@xensource.com>
date Fri Oct 27 15:44:27 2006 +0100 (2006-10-27)
parents 4a320d26fc24
children 66fe61db9e69
files xen/common/sched_credit.c
line diff
     1.1 --- a/xen/common/sched_credit.c	Thu Oct 26 16:56:16 2006 +0100
     1.2 +++ b/xen/common/sched_credit.c	Fri Oct 27 15:44:27 2006 +0100
     1.3 @@ -46,6 +46,7 @@
     1.4  /*
     1.5   * Priorities
     1.6   */
     1.7 +#define CSCHED_PRI_TS_BOOST      0      /* time-share waking up */
     1.8  #define CSCHED_PRI_TS_UNDER     -1      /* time-share w/ credits */
     1.9  #define CSCHED_PRI_TS_OVER      -2      /* time-share w/o credits */
    1.10  #define CSCHED_PRI_IDLE         -64     /* idle */
    1.11 @@ -410,6 +411,14 @@ csched_vcpu_acct(struct csched_vcpu *svc
    1.12  
    1.13          spin_unlock_irqrestore(&csched_priv.lock, flags);
    1.14      }
    1.15 +
    1.16 +    /*
    1.17 +     * If this VCPU's priority was boosted when it last awoke, reset it.
    1.18 +     * If the VCPU is found here, then it's consuming a non-negligeable
    1.19 +     * amount of CPU resources and should no longer be boosted.
    1.20 +     */
    1.21 +    if ( svc->pri == CSCHED_PRI_TS_BOOST )
    1.22 +        svc->pri = CSCHED_PRI_TS_UNDER;
    1.23  }
    1.24  
    1.25  static inline void
    1.26 @@ -566,6 +575,25 @@ csched_vcpu_wake(struct vcpu *vc)
    1.27      else
    1.28          CSCHED_STAT_CRANK(vcpu_wake_not_runnable);
    1.29  
    1.30 +    /*
    1.31 +     * We temporarly boost the priority of awaking VCPUs!
    1.32 +     *
    1.33 +     * If this VCPU consumes a non negligeable amount of CPU, it
    1.34 +     * will eventually find itself in the credit accounting code
    1.35 +     * path where its priority will be reset to normal.
    1.36 +     *
    1.37 +     * If on the other hand the VCPU consumes little CPU and is
    1.38 +     * blocking and awoken a lot (doing I/O for example), its
    1.39 +     * priority will remain boosted, optimizing it's wake-to-run
    1.40 +     * latencies.
    1.41 +     *
    1.42 +     * This allows wake-to-run latency sensitive VCPUs to preempt
    1.43 +     * more CPU resource intensive VCPUs without impacting overall 
    1.44 +     * system fairness.
    1.45 +     */
    1.46 +    if ( svc->pri == CSCHED_PRI_TS_UNDER )
    1.47 +        svc->pri = CSCHED_PRI_TS_BOOST;
    1.48 +
    1.49      /* Put the VCPU on the runq and tickle CPUs */
    1.50      __runq_insert(cpu, svc);
    1.51      __runq_tickle(cpu, svc);
    1.52 @@ -659,7 +687,7 @@ csched_runq_sort(unsigned int cpu)
    1.53          next = elem->next;
    1.54          svc_elem = __runq_elem(elem);
    1.55  
    1.56 -        if ( svc_elem->pri == CSCHED_PRI_TS_UNDER )
    1.57 +        if ( svc_elem->pri >= CSCHED_PRI_TS_UNDER )
    1.58          {
    1.59              /* does elem need to move up the runq? */
    1.60              if ( elem->prev != last_under )