ia64/xen-unstable

changeset 1879:8a98ffe9c5ef

bitkeeper revision 1.1108.2.21 (41005dedyY6udmpNmEKoBr3e9UiXAQ)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@scramble.cl.cam.ac.uk
date Fri Jul 23 00:38:05 2004 +0000 (2004-07-23)
parents a2fa0a647240 00d044d1226c
children 020f58885ed4
files xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/common/sched_bvt.c	Thu Jul 22 18:18:54 2004 +0000
     1.2 +++ b/xen/common/sched_bvt.c	Fri Jul 23 00:38:05 2004 +0000
     1.3 @@ -25,6 +25,7 @@
     1.4  #include <xen/perfc.h>
     1.5  #include <xen/sched-if.h>
     1.6  #include <xen/slab.h>
     1.7 +#include <xen/softirq.h>
     1.8  
     1.9  /* all per-domain BVT-specific scheduling info is stored here */
    1.10  struct bvt_dom_info
    1.11 @@ -145,24 +146,6 @@ void bvt_free_task(struct domain *p)
    1.12  }
    1.13  
    1.14  
    1.15 -void bvt_wake_up(struct domain *p)
    1.16 -{
    1.17 -    struct bvt_dom_info *inf = BVT_INFO(p);
    1.18 -
    1.19 -    ASSERT(inf != NULL);
    1.20 -    
    1.21 -
    1.22 -    /* set the BVT parameters */
    1.23 -    if (inf->avt < CPU_SVT(p->processor))
    1.24 -        inf->avt = CPU_SVT(p->processor);
    1.25 -
    1.26 -    /* deal with warping here */
    1.27 -    inf->warpback  = 1;
    1.28 -    inf->warped    = NOW();
    1.29 -    __calc_evt(inf);
    1.30 -    __add_to_runqueue_head(p);
    1.31 -}
    1.32 -
    1.33  /* 
    1.34   * Block the currently-executing domain until a pertinent event occurs.
    1.35   */
    1.36 @@ -433,10 +416,47 @@ int bvt_init_scheduler()
    1.37      return 0;
    1.38  }
    1.39  
    1.40 -static void bvt_pause(struct domain *p)
    1.41 +static void bvt_sleep(struct domain *d)
    1.42 +{
    1.43 +    if ( test_bit(DF_RUNNING, &d->flags) )
    1.44 +        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
    1.45 +    else if ( __task_on_runqueue(d) )
    1.46 +        __del_from_runqueue(d);
    1.47 +}
    1.48 +
    1.49 +void bvt_wake(struct domain *d)
    1.50  {
    1.51 -    if( __task_on_runqueue(p) )
    1.52 -        __del_from_runqueue(p);
    1.53 +    struct bvt_dom_info *inf = BVT_INFO(d);
    1.54 +    struct domain       *curr;
    1.55 +    s_time_t             now, min_time;
    1.56 +    int                  cpu = d->processor;
    1.57 +
    1.58 +    /* If on the runqueue already then someone has done the wakeup work. */
    1.59 +    if ( unlikely(__task_on_runqueue(d)) )
    1.60 +        return;
    1.61 +
    1.62 +    __add_to_runqueue_head(d);
    1.63 +
    1.64 +    now = NOW();
    1.65 +
    1.66 +    /* Set the BVT parameters. */
    1.67 +    if ( inf->avt < CPU_SVT(cpu) )
    1.68 +        inf->avt = CPU_SVT(cpu);
    1.69 +
    1.70 +    /* Deal with warping here. */
    1.71 +    inf->warpback  = 1;
    1.72 +    inf->warped    = now;
    1.73 +    __calc_evt(inf);
    1.74 +
    1.75 +    curr = schedule_data[cpu].curr;
    1.76 +
    1.77 +    /* Currently-running domain should run at least for ctx_allow. */
    1.78 +    min_time = curr->lastschd + curr->min_slice;
    1.79 +    
    1.80 +    if ( is_idle_task(curr) || (min_time <= now) )
    1.81 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    1.82 +    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
    1.83 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    1.84  }
    1.85  
    1.86  struct scheduler sched_bvt_def = {
    1.87 @@ -448,7 +468,6 @@ struct scheduler sched_bvt_def = {
    1.88      .alloc_task     = bvt_alloc_task,
    1.89      .add_task       = bvt_add_task,
    1.90      .free_task      = bvt_free_task,
    1.91 -    .wake_up        = bvt_wake_up,
    1.92      .do_block       = bvt_do_block,
    1.93      .do_schedule    = bvt_do_schedule,
    1.94      .control        = bvt_ctl,
    1.95 @@ -456,6 +475,7 @@ struct scheduler sched_bvt_def = {
    1.96      .dump_settings  = bvt_dump_settings,
    1.97      .dump_cpu_state = bvt_dump_cpu_state,
    1.98      .dump_runq_el   = bvt_dump_runq_el,
    1.99 -    .pause          = bvt_pause,
   1.100 +    .sleep          = bvt_sleep,
   1.101 +    .wake           = bvt_wake,
   1.102  };
   1.103  
     2.1 --- a/xen/common/sched_fair_bvt.c	Thu Jul 22 18:18:54 2004 +0000
     2.2 +++ b/xen/common/sched_fair_bvt.c	Fri Jul 23 00:38:05 2004 +0000
     2.3 @@ -26,20 +26,20 @@
     2.4  #include <xen/perfc.h>
     2.5  #include <xen/sched-if.h>
     2.6  #include <xen/slab.h>
     2.7 +#include <xen/softirq.h>
     2.8  #include <xen/trace.h>
     2.9  
    2.10  /* For tracing - TODO - put all the defines in some common hearder file */
    2.11  #define TRC_SCHED_FBVT_DO_SCHED             0x00020000
    2.12  #define TRC_SCHED_FBVT_DO_SCHED_UPDATE      0x00020001
    2.13  
    2.14 -
    2.15  /* all per-domain BVT-specific scheduling info is stored here */
    2.16  struct fbvt_dom_info
    2.17  {
    2.18      unsigned long mcu_advance;      /* inverse of weight */
    2.19      u32           avt;              /* actual virtual time */
    2.20      u32           evt;              /* effective virtual time */
    2.21 -    u32		      time_slept;	    /* records amount of time slept, used for scheduling */
    2.22 +    u32           time_slept;       /* amount of time slept */
    2.23      int           warpback;         /* warp?  */
    2.24      long          warp;             /* virtual time warp */
    2.25      long          warpl;            /* warp limit */
    2.26 @@ -50,13 +50,13 @@ struct fbvt_dom_info
    2.27  
    2.28  struct fbvt_cpu_info
    2.29  {
    2.30 -    unsigned long svt; /* XXX check this is unsigned long! */
    2.31 -    u32		      vtb;	    	    /* virtual time bonus */
    2.32 -    u32           r_time;           /* last time to run */  
    2.33 +    unsigned long svt;       /* XXX check this is unsigned long! */
    2.34 +    u32           vtb;       /* virtual time bonus */
    2.35 +    u32           r_time;    /* last time to run */  
    2.36  };
    2.37  
    2.38  
    2.39 -#define FBVT_INFO(p)   ((struct fbvt_dom_info *)(p)->sched_priv)
    2.40 +#define FBVT_INFO(p)  ((struct fbvt_dom_info *)(p)->sched_priv)
    2.41  #define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
    2.42  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
    2.43  #define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
    2.44 @@ -137,7 +137,7 @@ void fbvt_add_task(struct domain *p)
    2.45          inf->avt         = CPU_SVT(p->processor);
    2.46          inf->evt         = CPU_SVT(p->processor);
    2.47          /* Set some default values here. */
    2.48 -		inf->time_slept  = 0;
    2.49 +        inf->time_slept  = 0;
    2.50          inf->warpback    = 0;
    2.51          inf->warp        = 0;
    2.52          inf->warpl       = 0;
    2.53 @@ -157,43 +157,6 @@ void fbvt_free_task(struct domain *p)
    2.54      kmem_cache_free( dom_info_cache, p->sched_priv );
    2.55  }
    2.56  
    2.57 -
    2.58 -void fbvt_wake_up(struct domain *p)
    2.59 -{
    2.60 -    struct fbvt_dom_info *inf = FBVT_INFO(p);
    2.61 -    s32 io_warp;
    2.62 -
    2.63 -    ASSERT(inf != NULL);
    2.64 -    
    2.65 -
    2.66 -    /* set the BVT parameters */
    2.67 -    if (inf->avt < CPU_SVT(p->processor))
    2.68 -    {
    2.69 -		/*
    2.70 -	  	 *We want IO bound processes to gain
    2.71 -		 *dispatch precedence. It is especially for
    2.72 -		 *device driver domains. Therefore AVT should not be updated
    2.73 -		 *to SVT but to a value marginally smaller.
    2.74 -		 *Since frequently sleeping domains have high time_slept
    2.75 -		 *values, the virtual time can be determined as:
    2.76 -		 *SVT - const * TIME_SLEPT
    2.77 -	 	 */
    2.78 -	
    2.79 -		io_warp = (int)(0.5 * inf->time_slept);
    2.80 -		if(io_warp > 1000) io_warp = 1000;
    2.81 -
    2.82 -		ASSERT(inf->time_slept + CPU_SVT(p->processor) > inf->avt + io_warp);
    2.83 -		inf->time_slept += CPU_SVT(p->processor) - inf->avt - io_warp;
    2.84 -        inf->avt = CPU_SVT(p->processor) - io_warp;
    2.85 -    }
    2.86 -
    2.87 -    /* deal with warping here */
    2.88 -    inf->warpback  = 1;
    2.89 -    inf->warped    = NOW();
    2.90 -    __calc_evt(inf);
    2.91 -    __add_to_runqueue_head(p);
    2.92 -}
    2.93 -
    2.94  /* 
    2.95   * Block the currently-executing domain until a pertinent event occurs.
    2.96   */
    2.97 @@ -223,7 +186,7 @@ int fbvt_ctl(struct sched_ctl_cmd *cmd)
    2.98  
    2.99  /* Adjust scheduling parameter for a given domain. */
   2.100  int fbvt_adjdom(struct domain *p,
   2.101 -               struct sched_adjdom_cmd *cmd)
   2.102 +                struct sched_adjdom_cmd *cmd)
   2.103  {
   2.104      struct fbvt_adjdom *params = &cmd->u.fbvt;
   2.105      unsigned long flags;
   2.106 @@ -292,10 +255,10 @@ static task_slice_t fbvt_do_schedule(s_t
   2.107      s32                 mcus;
   2.108      u32                 next_evt, next_prime_evt, min_avt;
   2.109      u32                 sl_decrement;
   2.110 -    struct fbvt_dom_info *prev_inf       = FBVT_INFO(prev),
   2.111 -                        *p_inf          = NULL,
   2.112 -                        *next_inf       = NULL,
   2.113 -                        *next_prime_inf = NULL;
   2.114 +    struct fbvt_dom_info *prev_inf       = FBVT_INFO(prev);
   2.115 +    struct fbvt_dom_info *p_inf          = NULL;
   2.116 +    struct fbvt_dom_info *next_inf       = NULL;
   2.117 +    struct fbvt_dom_info *next_prime_inf = NULL;
   2.118      task_slice_t        ret;
   2.119  
   2.120      ASSERT(prev->sched_priv != NULL);
   2.121 @@ -307,24 +270,25 @@ static task_slice_t fbvt_do_schedule(s_t
   2.122          /* Calculate mcu and update avt. */
   2.123          mcus = (ranfor + MCU - 1) / MCU;
   2.124          
   2.125 -        TRACE_3D(TRC_SCHED_FBVT_DO_SCHED_UPDATE, prev->domain, mcus, LAST_VTB(cpu));
   2.126 +        TRACE_3D(TRC_SCHED_FBVT_DO_SCHED_UPDATE, prev->domain, 
   2.127 +                 mcus, LAST_VTB(cpu));
   2.128      
   2.129          sl_decrement = mcus * LAST_VTB(cpu) / R_TIME(cpu);
   2.130          prev_inf->time_slept -=  sl_decrement;
   2.131          prev_inf->avt += mcus * prev_inf->mcu_advance - sl_decrement;
   2.132    
   2.133          /*if(mcus * prev_inf->mcu_advance < LAST_VTB(cpu))
   2.134 -	    {
   2.135 -	        ASSERT(prev_inf->time_slept >= mcus * prev_inf->mcu_advance);
   2.136 -    	    prev_inf->time_slept -= mcus * prev_inf->mcu_advance;
   2.137 -	    }
   2.138 -	    else
   2.139 -	    {
   2.140 -	        prev_inf->avt += mcus * prev_inf->mcu_advance - LAST_VTB(cpu);
   2.141 -		
   2.142 -	        ASSERT(prev_inf->time_slept >= LAST_VTB(cpu));
   2.143 -	        prev_inf->time_slept -= LAST_VTB(cpu);
   2.144 - 	    }*/
   2.145 +          {
   2.146 +          ASSERT(prev_inf->time_slept >= mcus * prev_inf->mcu_advance);
   2.147 +          prev_inf->time_slept -= mcus * prev_inf->mcu_advance;
   2.148 +          }
   2.149 +          else
   2.150 +          {
   2.151 +          prev_inf->avt += mcus * prev_inf->mcu_advance - LAST_VTB(cpu);
   2.152 +  
   2.153 +          ASSERT(prev_inf->time_slept >= LAST_VTB(cpu));
   2.154 +          prev_inf->time_slept -= LAST_VTB(cpu);
   2.155 +          }*/
   2.156          
   2.157          __calc_evt(prev_inf);
   2.158          
   2.159 @@ -413,7 +377,7 @@ static task_slice_t fbvt_do_schedule(s_t
   2.160      }
   2.161  
   2.162  
   2.163 -   /*
   2.164 +    /*
   2.165       * In here we decide on Virtual Time Bonus. The idea is, for the
   2.166       * domains that have large time_slept values to be allowed to run
   2.167       * for longer. Thus regaining the share of CPU originally allocated.
   2.168 @@ -514,30 +478,85 @@ int fbvt_init_scheduler()
   2.169      return 0;
   2.170  }
   2.171  
   2.172 -static void fbvt_pause(struct domain *p)
   2.173 +static void fbvt_sleep(struct domain *d)
   2.174  {
   2.175 -    if( __task_on_runqueue(p) )
   2.176 -    {
   2.177 -        __del_from_runqueue(p);
   2.178 -    }
   2.179 +    if ( test_bit(DF_RUNNING, &d->flags) )
   2.180 +        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   2.181 +    else if ( __task_on_runqueue(d) )
   2.182 +        __del_from_runqueue(d);
   2.183  }
   2.184  
   2.185 -static void fbvt_unpause(struct domain *p)
   2.186 +static void fbvt_wake(struct domain *d)
   2.187  {
   2.188 -	struct fbvt_dom_info *inf = FBVT_INFO(p);
   2.189 +    struct fbvt_dom_info *inf = FBVT_INFO(d);
   2.190 +    struct domain        *curr;
   2.191 +    s_time_t              now, min_time;
   2.192 +    int                   cpu = d->processor;
   2.193 +    s32                   io_warp;
   2.194 +
   2.195 +    /* If on the runqueue already then someone has done the wakeup work. */
   2.196 +    if ( unlikely(__task_on_runqueue(d)) )
   2.197 +        return;
   2.198  
   2.199 -	if ( p->domain == IDLE_DOMAIN_ID )
   2.200 +    __add_to_runqueue_head(d);
   2.201 +
   2.202 +    now = NOW();
   2.203 +
   2.204 +#if 0
   2.205 +    /*
   2.206 +     * XXX KAF: This was fbvt_unpause(). Not sure if it's the right thing
   2.207 +     * to do, in light of the stuff that fbvt_wake_up() does.
   2.208 +     * e.g., setting 'inf->avt = CPU_SVT(cpu);' would make the later test
   2.209 +     * 'inf->avt < CPU_SVT(cpu)' redundant!
   2.210 +     */
   2.211 +    if ( d->domain == IDLE_DOMAIN_ID )
   2.212      {
   2.213          inf->avt = inf->evt = ~0U;
   2.214      } 
   2.215      else 
   2.216      {
   2.217          /* Set avt to system virtual time. */
   2.218 -        inf->avt         = CPU_SVT(p->processor);
   2.219 +        inf->avt = CPU_SVT(cpu);
   2.220          /* Set some default values here. */
   2.221 -		LAST_VTB(p->processor) = 0;
   2.222 -		__calc_evt(inf);
   2.223 +        LAST_VTB(cpu) = 0;
   2.224 +        __calc_evt(inf);
   2.225      }
   2.226 +#endif
   2.227 +
   2.228 +    /* Set the BVT parameters. */
   2.229 +    if ( inf->avt < CPU_SVT(cpu) )
   2.230 +    {
   2.231 +        /*
   2.232 +         * We want IO bound processes to gain dispatch precedence. It is 
   2.233 +         * especially for device driver domains. Therefore AVT 
   2.234 +         * not be updated to SVT but to a value marginally smaller.
   2.235 +         * Since frequently sleeping domains have high time_slept
   2.236 +         * values, the virtual time can be determined as:
   2.237 +         * SVT - const * TIME_SLEPT
   2.238 +         */
   2.239 +        io_warp = (int)(0.5 * inf->time_slept);
   2.240 +        if ( io_warp > 1000 )
   2.241 +            io_warp = 1000;
   2.242 +
   2.243 +        ASSERT(inf->time_slept + CPU_SVT(cpu) > inf->avt + io_warp);
   2.244 +        inf->time_slept += CPU_SVT(cpu) - inf->avt - io_warp;
   2.245 +        inf->avt = CPU_SVT(cpu) - io_warp;
   2.246 +    }
   2.247 +
   2.248 +    /* Deal with warping here. */
   2.249 +    inf->warpback  = 1;
   2.250 +    inf->warped    = now;
   2.251 +    __calc_evt(inf);
   2.252 +
   2.253 +    curr = schedule_data[cpu].curr;
   2.254 +
   2.255 +    /* Currently-running domain should run at least for ctx_allow. */
   2.256 +    min_time = curr->lastschd + curr->min_slice;
   2.257 +    
   2.258 +    if ( is_idle_task(curr) || (min_time <= now) )
   2.259 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
   2.260 +    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
   2.261 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
   2.262  }
   2.263  
   2.264  struct scheduler sched_fbvt_def = {
   2.265 @@ -549,7 +568,6 @@ struct scheduler sched_fbvt_def = {
   2.266      .alloc_task     = fbvt_alloc_task,
   2.267      .add_task       = fbvt_add_task,
   2.268      .free_task      = fbvt_free_task,
   2.269 -    .wake_up        = fbvt_wake_up,
   2.270      .do_block       = fbvt_do_block,
   2.271      .do_schedule    = fbvt_do_schedule,
   2.272      .control        = fbvt_ctl,
   2.273 @@ -557,7 +575,7 @@ struct scheduler sched_fbvt_def = {
   2.274      .dump_settings  = fbvt_dump_settings,
   2.275      .dump_cpu_state = fbvt_dump_cpu_state,
   2.276      .dump_runq_el   = fbvt_dump_runq_el,
   2.277 -    .pause          = fbvt_pause,
   2.278 -    .unpause	    = fbvt_unpause,
   2.279 +    .sleep          = fbvt_sleep,
   2.280 +    .wake           = fbvt_wake,
   2.281  };
   2.282  
     3.1 --- a/xen/common/sched_rrobin.c	Thu Jul 22 18:18:54 2004 +0000
     3.2 +++ b/xen/common/sched_rrobin.c	Fri Jul 23 00:38:05 2004 +0000
     3.3 @@ -8,8 +8,11 @@
     3.4  #include <xen/sched-if.h>
     3.5  #include <hypervisor-ifs/sched_ctl.h>
     3.6  #include <xen/ac_timer.h>
     3.7 +#include <xen/softirq.h>
     3.8  #include <xen/time.h>
     3.9  
    3.10 +#define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
    3.11 +
    3.12  static s_time_t rr_slice = MILLISECS(10);
    3.13  
    3.14  static task_slice_t rr_do_schedule(s_time_t now)
    3.15 @@ -33,7 +36,7 @@ static task_slice_t rr_do_schedule(s_tim
    3.16  
    3.17  static int rr_ctl(struct sched_ctl_cmd *cmd)
    3.18  {
    3.19 -    if(cmd->direction == SCHED_INFO_PUT)
    3.20 +    if ( cmd->direction == SCHED_INFO_PUT )
    3.21      {
    3.22          rr_slice = cmd->u.rrobin.slice;
    3.23      }
    3.24 @@ -50,10 +53,37 @@ static void rr_dump_settings()
    3.25      printk("rr_slice = %llu ", rr_slice);
    3.26  }
    3.27  
    3.28 -static void rr_pause(struct domain *p)
    3.29 +static void rr_sleep(struct domain *d)
    3.30 +{
    3.31 +    if ( test_bit(DF_RUNNING, &d->flags) )
    3.32 +        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
    3.33 +    else if ( __task_on_runqueue(d) )
    3.34 +        __del_from_runqueue(d);
    3.35 +}
    3.36 +
    3.37 +void rr_wake(struct domain *d)
    3.38  {
    3.39 -    if ( __task_on_runqueue(p) )
    3.40 -        __del_from_runqueue(p);
    3.41 +    struct domain       *curr;
    3.42 +    s_time_t             now, min_time;
    3.43 +    int                  cpu = d->processor;
    3.44 +
    3.45 +    /* If on the runqueue already then someone has done the wakeup work. */
    3.46 +    if ( unlikely(__task_on_runqueue(d)) )
    3.47 +        return;
    3.48 +
    3.49 +    __add_to_runqueue_head(d);
    3.50 +
    3.51 +    now = NOW();
    3.52 +
    3.53 +    curr = schedule_data[cpu].curr;
    3.54 +
    3.55 +    /* Currently-running domain should run at least for ctx_allow. */
    3.56 +    min_time = curr->lastschd + curr->min_slice;
    3.57 +    
    3.58 +    if ( is_idle_task(curr) || (min_time <= now) )
    3.59 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    3.60 +    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
    3.61 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    3.62  }
    3.63  
    3.64  struct scheduler sched_rrobin_def = {
    3.65 @@ -61,11 +91,11 @@ struct scheduler sched_rrobin_def = {
    3.66      .opt_name = "rrobin",
    3.67      .sched_id = SCHED_RROBIN,
    3.68  
    3.69 -    .wake_up        = __add_to_runqueue_head,
    3.70      .do_schedule    = rr_do_schedule,
    3.71      .control        = rr_ctl,
    3.72      .dump_settings  = rr_dump_settings,
    3.73 -    .pause          = rr_pause,
    3.74 +    .sleep          = rr_sleep,
    3.75 +    .wake           = rr_wake,
    3.76  };
    3.77  
    3.78  
     4.1 --- a/xen/common/schedule.c	Thu Jul 22 18:18:54 2004 +0000
     4.2 +++ b/xen/common/schedule.c	Fri Jul 23 00:38:05 2004 +0000
     4.3 @@ -178,12 +178,7 @@ void domain_sleep(struct domain *d)
     4.4  
     4.5      spin_lock_irqsave(&schedule_lock[cpu], flags);
     4.6      if ( likely(!domain_runnable(d)) )
     4.7 -    {
     4.8 -        if ( test_bit(DF_RUNNING, &d->flags) )
     4.9 -            cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    4.10 -        else if ( __task_on_runqueue(d) )
    4.11 -            __del_from_runqueue(d);
    4.12 -    }
    4.13 +        SCHED_OP(sleep, d);
    4.14      spin_unlock_irqrestore(&schedule_lock[cpu], flags);
    4.15  
    4.16      /* Synchronous. */
    4.17 @@ -198,53 +193,19 @@ void domain_wake(struct domain *d)
    4.18  {
    4.19      unsigned long       flags;
    4.20      int                 cpu = d->processor;
    4.21 -    struct domain      *curr;
    4.22 -    s_time_t            now, min_time;
    4.23  
    4.24      spin_lock_irqsave(&schedule_lock[cpu], flags);
    4.25 -
    4.26 -    if ( likely(domain_runnable(d)) && likely(!__task_on_runqueue(d)) )
    4.27 +    if ( likely(domain_runnable(d)) )
    4.28      {
    4.29 -        TRACE_2D(TRC_SCHED_WAKE,d->domain, d);
    4.30 -        SCHED_OP(wake_up, d);
    4.31 +        TRACE_2D(TRC_SCHED_WAKE, d->domain, d);
    4.32 +        SCHED_OP(wake, d);
    4.33  #ifdef WAKE_HISTO
    4.34 -        p->wokenup = NOW();
    4.35 +        d->wokenup = NOW();
    4.36  #endif
    4.37 -
    4.38 -        now = NOW();
    4.39 -        curr = schedule_data[cpu].curr;
    4.40 -
    4.41 -        /* Currently-running domain should run at least for ctx_allow. */
    4.42 -        min_time = curr->lastschd + curr->min_slice;
    4.43 -
    4.44 -        if ( is_idle_task(curr) || (min_time <= now) )
    4.45 -            cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    4.46 -        else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
    4.47 -            mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    4.48      }
    4.49 -
    4.50      spin_unlock_irqrestore(&schedule_lock[cpu], flags);
    4.51  }
    4.52  
    4.53 -/*
    4.54 - * Pausing a domain.
    4.55 - */
    4.56 -void pause_domain(struct domain *domain)
    4.57 -{
    4.58 -	domain_sleep(domain);
    4.59 -	SCHED_OP(pause, domain);	
    4.60 -}
    4.61 -
    4.62 -
    4.63 -/*
    4.64 - * Unpauseing a domain
    4.65 - */
    4.66 -void unpause_domain(struct domain *domain)
    4.67 -{
    4.68 -	SCHED_OP(unpause, domain);
    4.69 -	domain_wake(domain);
    4.70 -}
    4.71 -
    4.72  /* Block the currently-executing domain until a pertinent event occurs. */
    4.73  long do_block(void)
    4.74  {
     5.1 --- a/xen/include/xen/sched-if.h	Thu Jul 22 18:18:54 2004 +0000
     5.2 +++ b/xen/include/xen/sched-if.h	Fri Jul 23 00:38:05 2004 +0000
     5.3 @@ -39,7 +39,8 @@ struct scheduler
     5.4      void         (*add_task)       (struct domain *);
     5.5      void         (*free_task)      (struct domain *);
     5.6      void         (*rem_task)       (struct domain *);
     5.7 -    void         (*wake_up)        (struct domain *);
     5.8 +    void         (*sleep)          (struct domain *);
     5.9 +    void         (*wake)           (struct domain *);
    5.10      void         (*do_block)       (struct domain *);
    5.11      task_slice_t (*do_schedule)    (s_time_t);
    5.12      int          (*control)        (struct sched_ctl_cmd *);
    5.13 @@ -49,8 +50,6 @@ struct scheduler
    5.14      void         (*dump_cpu_state) (int);
    5.15      void         (*dump_runq_el)   (struct domain *);
    5.16      int          (*prn_state)      (int);
    5.17 -    void         (*pause)          (struct domain *);
    5.18 -	void		 (*unpause)		   (struct domain *);
    5.19  };
    5.20  
    5.21  /* per CPU scheduler information */
     6.1 --- a/xen/include/xen/sched.h	Thu Jul 22 18:18:54 2004 +0000
     6.2 +++ b/xen/include/xen/sched.h	Fri Jul 23 00:38:05 2004 +0000
     6.3 @@ -210,8 +210,6 @@ int  sched_id();
     6.4  void init_idle_task(void);
     6.5  void domain_wake(struct domain *d);
     6.6  void domain_sleep(struct domain *d);
     6.7 -void pause_domain(struct domain *d);
     6.8 -void unpause_domain(struct domain *d);
     6.9  
    6.10  void __enter_scheduler(void);
    6.11  
    6.12 @@ -262,14 +260,14 @@ static inline void domain_pause(struct d
    6.13  {
    6.14      ASSERT(d != current);
    6.15      atomic_inc(&d->pausecnt);
    6.16 -    pause_domain(d);
    6.17 +    domain_sleep(d);
    6.18  }
    6.19  
    6.20  static inline void domain_unpause(struct domain *d)
    6.21  {
    6.22      ASSERT(d != current);
    6.23      if ( atomic_dec_and_test(&d->pausecnt) )
    6.24 -        unpause_domain(d);
    6.25 +        domain_wake(d);
    6.26  }
    6.27  
    6.28  static inline void domain_unblock(struct domain *d)
    6.29 @@ -282,13 +280,13 @@ static inline void domain_pause_by_syste
    6.30  {
    6.31      ASSERT(d != current);
    6.32      if ( !test_and_set_bit(DF_CTRLPAUSE, &d->flags) )
    6.33 -        pause_domain(d);
    6.34 +        domain_sleep(d);
    6.35  }
    6.36  
    6.37  static inline void domain_unpause_by_systemcontroller(struct domain *d)
    6.38  {
    6.39      if ( test_and_clear_bit(DF_CTRLPAUSE, &d->flags) )
    6.40 -        unpause_domain(d);
    6.41 +        domain_wake(d);
    6.42  }
    6.43  
    6.44