ia64/xen-unstable

changeset 1884:381b2b637b12

bitkeeper revision 1.1123 (4100e6bayo7vN1TCcMOno-0X4Q6suA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-ptrw
author cl349@freefall.cl.cam.ac.uk
date Fri Jul 23 10:21:46 2004 +0000 (2004-07-23)
parents ce3ac2fde5dd 51f8d2380c82
children a50c804416c5
files xen/arch/x86/memory.c xen/common/memory.c xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/memory.c	Fri Jul 23 10:20:56 2004 +0000
     1.2 +++ b/xen/arch/x86/memory.c	Fri Jul 23 10:21:46 2004 +0000
     1.3 @@ -164,9 +164,8 @@ static void __invalidate_shadow_ldt(stru
     1.4  }
     1.5  
     1.6  
     1.7 -static inline void invalidate_shadow_ldt(void)
     1.8 +static inline void invalidate_shadow_ldt(struct domain *d)
     1.9  {
    1.10 -    struct domain *d = current;
    1.11      if ( d->mm.shadow_ldt_mapcnt != 0 )
    1.12          __invalidate_shadow_ldt(d);
    1.13  }
    1.14 @@ -387,7 +386,7 @@ static void put_page_from_l1e(l1_pgentry
    1.15          if ( unlikely(((page->type_and_flags & PGT_type_mask) == 
    1.16                         PGT_ldt_page)) &&
    1.17               unlikely(((page->type_and_flags & PGT_count_mask) != 0)) )
    1.18 -            invalidate_shadow_ldt();
    1.19 +            invalidate_shadow_ldt(page->u.domain);
    1.20          put_page(page);
    1.21      }
    1.22  }
    1.23 @@ -764,7 +763,7 @@ static int do_extended_command(unsigned 
    1.24          okay = get_page_and_type_from_pagenr(pfn, PGT_l2_page_table, d);
    1.25          if ( likely(okay) )
    1.26          {
    1.27 -            invalidate_shadow_ldt();
    1.28 +            invalidate_shadow_ldt(d);
    1.29  
    1.30              percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
    1.31              old_base_pfn = pagetable_val(d->mm.pagetable) >> PAGE_SHIFT;
    1.32 @@ -811,7 +810,7 @@ static int do_extended_command(unsigned 
    1.33          else if ( (d->mm.ldt_ents != ents) || 
    1.34                    (d->mm.ldt_base != ptr) )
    1.35          {
    1.36 -            invalidate_shadow_ldt();
    1.37 +            invalidate_shadow_ldt(d);
    1.38              d->mm.ldt_base = ptr;
    1.39              d->mm.ldt_ents = ents;
    1.40              load_LDT(d);
     2.1 --- a/xen/common/memory.c	Fri Jul 23 10:20:56 2004 +0000
     2.2 +++ b/xen/common/memory.c	Fri Jul 23 10:21:46 2004 +0000
     2.3 @@ -80,15 +80,13 @@ void __init init_frametable(void *framet
     2.4  
     2.5  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe)
     2.6  {
     2.7 -    struct pfn_info *pf;
     2.8      unsigned long i;
     2.9      unsigned long flags;
    2.10  
    2.11      spin_lock_irqsave(&free_list_lock, flags);
    2.12      for ( i = ps >> PAGE_SHIFT; i < (pe >> PAGE_SHIFT); i++ )
    2.13      {
    2.14 -        pf = list_entry(&frame_table[i].list, struct pfn_info, list);
    2.15 -        list_add_tail(&pf->list, &free_list);
    2.16 +        list_add_tail(&frame_table[i].list, &free_list);
    2.17          free_pfns++;
    2.18      }
    2.19      spin_unlock_irqrestore(&free_list_lock, flags);
     3.1 --- a/xen/common/sched_bvt.c	Fri Jul 23 10:20:56 2004 +0000
     3.2 +++ b/xen/common/sched_bvt.c	Fri Jul 23 10:21:46 2004 +0000
     3.3 @@ -25,6 +25,7 @@
     3.4  #include <xen/perfc.h>
     3.5  #include <xen/sched-if.h>
     3.6  #include <xen/slab.h>
     3.7 +#include <xen/softirq.h>
     3.8  
     3.9  /* all per-domain BVT-specific scheduling info is stored here */
    3.10  struct bvt_dom_info
    3.11 @@ -145,24 +146,6 @@ void bvt_free_task(struct domain *p)
    3.12  }
    3.13  
    3.14  
    3.15 -void bvt_wake_up(struct domain *p)
    3.16 -{
    3.17 -    struct bvt_dom_info *inf = BVT_INFO(p);
    3.18 -
    3.19 -    ASSERT(inf != NULL);
    3.20 -    
    3.21 -
    3.22 -    /* set the BVT parameters */
    3.23 -    if (inf->avt < CPU_SVT(p->processor))
    3.24 -        inf->avt = CPU_SVT(p->processor);
    3.25 -
    3.26 -    /* deal with warping here */
    3.27 -    inf->warpback  = 1;
    3.28 -    inf->warped    = NOW();
    3.29 -    __calc_evt(inf);
    3.30 -    __add_to_runqueue_head(p);
    3.31 -}
    3.32 -
    3.33  /* 
    3.34   * Block the currently-executing domain until a pertinent event occurs.
    3.35   */
    3.36 @@ -433,10 +416,47 @@ int bvt_init_scheduler()
    3.37      return 0;
    3.38  }
    3.39  
    3.40 -static void bvt_pause(struct domain *p)
    3.41 +static void bvt_sleep(struct domain *d)
    3.42 +{
    3.43 +    if ( test_bit(DF_RUNNING, &d->flags) )
    3.44 +        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
    3.45 +    else if ( __task_on_runqueue(d) )
    3.46 +        __del_from_runqueue(d);
    3.47 +}
    3.48 +
    3.49 +void bvt_wake(struct domain *d)
    3.50  {
    3.51 -    if( __task_on_runqueue(p) )
    3.52 -        __del_from_runqueue(p);
    3.53 +    struct bvt_dom_info *inf = BVT_INFO(d);
    3.54 +    struct domain       *curr;
    3.55 +    s_time_t             now, min_time;
    3.56 +    int                  cpu = d->processor;
    3.57 +
    3.58 +    /* If on the runqueue already then someone has done the wakeup work. */
    3.59 +    if ( unlikely(__task_on_runqueue(d)) )
    3.60 +        return;
    3.61 +
    3.62 +    __add_to_runqueue_head(d);
    3.63 +
    3.64 +    now = NOW();
    3.65 +
    3.66 +    /* Set the BVT parameters. */
    3.67 +    if ( inf->avt < CPU_SVT(cpu) )
    3.68 +        inf->avt = CPU_SVT(cpu);
    3.69 +
    3.70 +    /* Deal with warping here. */
    3.71 +    inf->warpback  = 1;
    3.72 +    inf->warped    = now;
    3.73 +    __calc_evt(inf);
    3.74 +
    3.75 +    curr = schedule_data[cpu].curr;
    3.76 +
    3.77 +    /* Currently-running domain should run at least for ctx_allow. */
    3.78 +    min_time = curr->lastschd + curr->min_slice;
    3.79 +    
    3.80 +    if ( is_idle_task(curr) || (min_time <= now) )
    3.81 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    3.82 +    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
    3.83 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    3.84  }
    3.85  
    3.86  struct scheduler sched_bvt_def = {
    3.87 @@ -448,7 +468,6 @@ struct scheduler sched_bvt_def = {
    3.88      .alloc_task     = bvt_alloc_task,
    3.89      .add_task       = bvt_add_task,
    3.90      .free_task      = bvt_free_task,
    3.91 -    .wake_up        = bvt_wake_up,
    3.92      .do_block       = bvt_do_block,
    3.93      .do_schedule    = bvt_do_schedule,
    3.94      .control        = bvt_ctl,
    3.95 @@ -456,6 +475,7 @@ struct scheduler sched_bvt_def = {
    3.96      .dump_settings  = bvt_dump_settings,
    3.97      .dump_cpu_state = bvt_dump_cpu_state,
    3.98      .dump_runq_el   = bvt_dump_runq_el,
    3.99 -    .pause          = bvt_pause,
   3.100 +    .sleep          = bvt_sleep,
   3.101 +    .wake           = bvt_wake,
   3.102  };
   3.103  
     4.1 --- a/xen/common/sched_fair_bvt.c	Fri Jul 23 10:20:56 2004 +0000
     4.2 +++ b/xen/common/sched_fair_bvt.c	Fri Jul 23 10:21:46 2004 +0000
     4.3 @@ -26,20 +26,20 @@
     4.4  #include <xen/perfc.h>
     4.5  #include <xen/sched-if.h>
     4.6  #include <xen/slab.h>
     4.7 +#include <xen/softirq.h>
     4.8  #include <xen/trace.h>
     4.9  
    4.10  /* For tracing - TODO - put all the defines in some common hearder file */
    4.11  #define TRC_SCHED_FBVT_DO_SCHED             0x00020000
    4.12  #define TRC_SCHED_FBVT_DO_SCHED_UPDATE      0x00020001
    4.13  
    4.14 -
    4.15  /* all per-domain BVT-specific scheduling info is stored here */
    4.16  struct fbvt_dom_info
    4.17  {
    4.18      unsigned long mcu_advance;      /* inverse of weight */
    4.19      u32           avt;              /* actual virtual time */
    4.20      u32           evt;              /* effective virtual time */
    4.21 -    u32		      time_slept;	    /* records amount of time slept, used for scheduling */
    4.22 +    u32           time_slept;       /* amount of time slept */
    4.23      int           warpback;         /* warp?  */
    4.24      long          warp;             /* virtual time warp */
    4.25      long          warpl;            /* warp limit */
    4.26 @@ -50,13 +50,13 @@ struct fbvt_dom_info
    4.27  
    4.28  struct fbvt_cpu_info
    4.29  {
    4.30 -    unsigned long svt; /* XXX check this is unsigned long! */
    4.31 -    u32		      vtb;	    	    /* virtual time bonus */
    4.32 -    u32           r_time;           /* last time to run */  
    4.33 +    unsigned long svt;       /* XXX check this is unsigned long! */
    4.34 +    u32           vtb;       /* virtual time bonus */
    4.35 +    u32           r_time;    /* last time to run */  
    4.36  };
    4.37  
    4.38  
    4.39 -#define FBVT_INFO(p)   ((struct fbvt_dom_info *)(p)->sched_priv)
    4.40 +#define FBVT_INFO(p)  ((struct fbvt_dom_info *)(p)->sched_priv)
    4.41  #define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
    4.42  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
    4.43  #define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
    4.44 @@ -137,7 +137,7 @@ void fbvt_add_task(struct domain *p)
    4.45          inf->avt         = CPU_SVT(p->processor);
    4.46          inf->evt         = CPU_SVT(p->processor);
    4.47          /* Set some default values here. */
    4.48 -		inf->time_slept  = 0;
    4.49 +        inf->time_slept  = 0;
    4.50          inf->warpback    = 0;
    4.51          inf->warp        = 0;
    4.52          inf->warpl       = 0;
    4.53 @@ -157,43 +157,6 @@ void fbvt_free_task(struct domain *p)
    4.54      kmem_cache_free( dom_info_cache, p->sched_priv );
    4.55  }
    4.56  
    4.57 -
    4.58 -void fbvt_wake_up(struct domain *p)
    4.59 -{
    4.60 -    struct fbvt_dom_info *inf = FBVT_INFO(p);
    4.61 -    s32 io_warp;
    4.62 -
    4.63 -    ASSERT(inf != NULL);
    4.64 -    
    4.65 -
    4.66 -    /* set the BVT parameters */
    4.67 -    if (inf->avt < CPU_SVT(p->processor))
    4.68 -    {
    4.69 -		/*
    4.70 -	  	 *We want IO bound processes to gain
    4.71 -		 *dispatch precedence. It is especially for
    4.72 -		 *device driver domains. Therefore AVT should not be updated
    4.73 -		 *to SVT but to a value marginally smaller.
    4.74 -		 *Since frequently sleeping domains have high time_slept
    4.75 -		 *values, the virtual time can be determined as:
    4.76 -		 *SVT - const * TIME_SLEPT
    4.77 -	 	 */
    4.78 -	
    4.79 -		io_warp = (int)(0.5 * inf->time_slept);
    4.80 -		if(io_warp > 1000) io_warp = 1000;
    4.81 -
    4.82 -		ASSERT(inf->time_slept + CPU_SVT(p->processor) > inf->avt + io_warp);
    4.83 -		inf->time_slept += CPU_SVT(p->processor) - inf->avt - io_warp;
    4.84 -        inf->avt = CPU_SVT(p->processor) - io_warp;
    4.85 -    }
    4.86 -
    4.87 -    /* deal with warping here */
    4.88 -    inf->warpback  = 1;
    4.89 -    inf->warped    = NOW();
    4.90 -    __calc_evt(inf);
    4.91 -    __add_to_runqueue_head(p);
    4.92 -}
    4.93 -
    4.94  /* 
    4.95   * Block the currently-executing domain until a pertinent event occurs.
    4.96   */
    4.97 @@ -223,7 +186,7 @@ int fbvt_ctl(struct sched_ctl_cmd *cmd)
    4.98  
    4.99  /* Adjust scheduling parameter for a given domain. */
   4.100  int fbvt_adjdom(struct domain *p,
   4.101 -               struct sched_adjdom_cmd *cmd)
   4.102 +                struct sched_adjdom_cmd *cmd)
   4.103  {
   4.104      struct fbvt_adjdom *params = &cmd->u.fbvt;
   4.105      unsigned long flags;
   4.106 @@ -292,10 +255,10 @@ static task_slice_t fbvt_do_schedule(s_t
   4.107      s32                 mcus;
   4.108      u32                 next_evt, next_prime_evt, min_avt;
   4.109      u32                 sl_decrement;
   4.110 -    struct fbvt_dom_info *prev_inf       = FBVT_INFO(prev),
   4.111 -                        *p_inf          = NULL,
   4.112 -                        *next_inf       = NULL,
   4.113 -                        *next_prime_inf = NULL;
   4.114 +    struct fbvt_dom_info *prev_inf       = FBVT_INFO(prev);
   4.115 +    struct fbvt_dom_info *p_inf          = NULL;
   4.116 +    struct fbvt_dom_info *next_inf       = NULL;
   4.117 +    struct fbvt_dom_info *next_prime_inf = NULL;
   4.118      task_slice_t        ret;
   4.119  
   4.120      ASSERT(prev->sched_priv != NULL);
   4.121 @@ -307,24 +270,25 @@ static task_slice_t fbvt_do_schedule(s_t
   4.122          /* Calculate mcu and update avt. */
   4.123          mcus = (ranfor + MCU - 1) / MCU;
   4.124          
   4.125 -        TRACE_3D(TRC_SCHED_FBVT_DO_SCHED_UPDATE, prev->domain, mcus, LAST_VTB(cpu));
   4.126 +        TRACE_3D(TRC_SCHED_FBVT_DO_SCHED_UPDATE, prev->domain, 
   4.127 +                 mcus, LAST_VTB(cpu));
   4.128      
   4.129          sl_decrement = mcus * LAST_VTB(cpu) / R_TIME(cpu);
   4.130          prev_inf->time_slept -=  sl_decrement;
   4.131          prev_inf->avt += mcus * prev_inf->mcu_advance - sl_decrement;
   4.132    
   4.133          /*if(mcus * prev_inf->mcu_advance < LAST_VTB(cpu))
   4.134 -	    {
   4.135 -	        ASSERT(prev_inf->time_slept >= mcus * prev_inf->mcu_advance);
   4.136 -    	    prev_inf->time_slept -= mcus * prev_inf->mcu_advance;
   4.137 -	    }
   4.138 -	    else
   4.139 -	    {
   4.140 -	        prev_inf->avt += mcus * prev_inf->mcu_advance - LAST_VTB(cpu);
   4.141 -		
   4.142 -	        ASSERT(prev_inf->time_slept >= LAST_VTB(cpu));
   4.143 -	        prev_inf->time_slept -= LAST_VTB(cpu);
   4.144 - 	    }*/
   4.145 +          {
   4.146 +          ASSERT(prev_inf->time_slept >= mcus * prev_inf->mcu_advance);
   4.147 +          prev_inf->time_slept -= mcus * prev_inf->mcu_advance;
   4.148 +          }
   4.149 +          else
   4.150 +          {
   4.151 +          prev_inf->avt += mcus * prev_inf->mcu_advance - LAST_VTB(cpu);
   4.152 +  
   4.153 +          ASSERT(prev_inf->time_slept >= LAST_VTB(cpu));
   4.154 +          prev_inf->time_slept -= LAST_VTB(cpu);
   4.155 +          }*/
   4.156          
   4.157          __calc_evt(prev_inf);
   4.158          
   4.159 @@ -413,7 +377,7 @@ static task_slice_t fbvt_do_schedule(s_t
   4.160      }
   4.161  
   4.162  
   4.163 -   /*
   4.164 +    /*
   4.165       * In here we decide on Virtual Time Bonus. The idea is, for the
   4.166       * domains that have large time_slept values to be allowed to run
   4.167       * for longer. Thus regaining the share of CPU originally allocated.
   4.168 @@ -514,30 +478,85 @@ int fbvt_init_scheduler()
   4.169      return 0;
   4.170  }
   4.171  
   4.172 -static void fbvt_pause(struct domain *p)
   4.173 +static void fbvt_sleep(struct domain *d)
   4.174  {
   4.175 -    if( __task_on_runqueue(p) )
   4.176 -    {
   4.177 -        __del_from_runqueue(p);
   4.178 -    }
   4.179 +    if ( test_bit(DF_RUNNING, &d->flags) )
   4.180 +        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   4.181 +    else if ( __task_on_runqueue(d) )
   4.182 +        __del_from_runqueue(d);
   4.183  }
   4.184  
   4.185 -static void fbvt_unpause(struct domain *p)
   4.186 +static void fbvt_wake(struct domain *d)
   4.187  {
   4.188 -	struct fbvt_dom_info *inf = FBVT_INFO(p);
   4.189 +    struct fbvt_dom_info *inf = FBVT_INFO(d);
   4.190 +    struct domain        *curr;
   4.191 +    s_time_t              now, min_time;
   4.192 +    int                   cpu = d->processor;
   4.193 +    s32                   io_warp;
   4.194 +
   4.195 +    /* If on the runqueue already then someone has done the wakeup work. */
   4.196 +    if ( unlikely(__task_on_runqueue(d)) )
   4.197 +        return;
   4.198  
   4.199 -	if ( p->domain == IDLE_DOMAIN_ID )
   4.200 +    __add_to_runqueue_head(d);
   4.201 +
   4.202 +    now = NOW();
   4.203 +
   4.204 +#if 0
   4.205 +    /*
   4.206 +     * XXX KAF: This was fbvt_unpause(). Not sure if it's the right thing
   4.207 +     * to do, in light of the stuff that fbvt_wake_up() does.
   4.208 +     * e.g., setting 'inf->avt = CPU_SVT(cpu);' would make the later test
   4.209 +     * 'inf->avt < CPU_SVT(cpu)' redundant!
   4.210 +     */
   4.211 +    if ( d->domain == IDLE_DOMAIN_ID )
   4.212      {
   4.213          inf->avt = inf->evt = ~0U;
   4.214      } 
   4.215      else 
   4.216      {
   4.217          /* Set avt to system virtual time. */
   4.218 -        inf->avt         = CPU_SVT(p->processor);
   4.219 +        inf->avt = CPU_SVT(cpu);
   4.220          /* Set some default values here. */
   4.221 -		LAST_VTB(p->processor) = 0;
   4.222 -		__calc_evt(inf);
   4.223 +        LAST_VTB(cpu) = 0;
   4.224 +        __calc_evt(inf);
   4.225      }
   4.226 +#endif
   4.227 +
   4.228 +    /* Set the BVT parameters. */
   4.229 +    if ( inf->avt < CPU_SVT(cpu) )
   4.230 +    {
   4.231 +        /*
   4.232 +         * We want IO bound processes to gain dispatch precedence. It is 
   4.233 +         * especially for device driver domains. Therefore AVT 
   4.234 +         * not be updated to SVT but to a value marginally smaller.
   4.235 +         * Since frequently sleeping domains have high time_slept
   4.236 +         * values, the virtual time can be determined as:
   4.237 +         * SVT - const * TIME_SLEPT
   4.238 +         */
   4.239 +        io_warp = (int)(0.5 * inf->time_slept);
   4.240 +        if ( io_warp > 1000 )
   4.241 +            io_warp = 1000;
   4.242 +
   4.243 +        ASSERT(inf->time_slept + CPU_SVT(cpu) > inf->avt + io_warp);
   4.244 +        inf->time_slept += CPU_SVT(cpu) - inf->avt - io_warp;
   4.245 +        inf->avt = CPU_SVT(cpu) - io_warp;
   4.246 +    }
   4.247 +
   4.248 +    /* Deal with warping here. */
   4.249 +    inf->warpback  = 1;
   4.250 +    inf->warped    = now;
   4.251 +    __calc_evt(inf);
   4.252 +
   4.253 +    curr = schedule_data[cpu].curr;
   4.254 +
   4.255 +    /* Currently-running domain should run at least for ctx_allow. */
   4.256 +    min_time = curr->lastschd + curr->min_slice;
   4.257 +    
   4.258 +    if ( is_idle_task(curr) || (min_time <= now) )
   4.259 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
   4.260 +    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
   4.261 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
   4.262  }
   4.263  
   4.264  struct scheduler sched_fbvt_def = {
   4.265 @@ -549,7 +568,6 @@ struct scheduler sched_fbvt_def = {
   4.266      .alloc_task     = fbvt_alloc_task,
   4.267      .add_task       = fbvt_add_task,
   4.268      .free_task      = fbvt_free_task,
   4.269 -    .wake_up        = fbvt_wake_up,
   4.270      .do_block       = fbvt_do_block,
   4.271      .do_schedule    = fbvt_do_schedule,
   4.272      .control        = fbvt_ctl,
   4.273 @@ -557,7 +575,7 @@ struct scheduler sched_fbvt_def = {
   4.274      .dump_settings  = fbvt_dump_settings,
   4.275      .dump_cpu_state = fbvt_dump_cpu_state,
   4.276      .dump_runq_el   = fbvt_dump_runq_el,
   4.277 -    .pause          = fbvt_pause,
   4.278 -    .unpause	    = fbvt_unpause,
   4.279 +    .sleep          = fbvt_sleep,
   4.280 +    .wake           = fbvt_wake,
   4.281  };
   4.282  
     5.1 --- a/xen/common/sched_rrobin.c	Fri Jul 23 10:20:56 2004 +0000
     5.2 +++ b/xen/common/sched_rrobin.c	Fri Jul 23 10:21:46 2004 +0000
     5.3 @@ -8,8 +8,11 @@
     5.4  #include <xen/sched-if.h>
     5.5  #include <hypervisor-ifs/sched_ctl.h>
     5.6  #include <xen/ac_timer.h>
     5.7 +#include <xen/softirq.h>
     5.8  #include <xen/time.h>
     5.9  
    5.10 +#define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
    5.11 +
    5.12  static s_time_t rr_slice = MILLISECS(10);
    5.13  
    5.14  static task_slice_t rr_do_schedule(s_time_t now)
    5.15 @@ -33,7 +36,7 @@ static task_slice_t rr_do_schedule(s_tim
    5.16  
    5.17  static int rr_ctl(struct sched_ctl_cmd *cmd)
    5.18  {
    5.19 -    if(cmd->direction == SCHED_INFO_PUT)
    5.20 +    if ( cmd->direction == SCHED_INFO_PUT )
    5.21      {
    5.22          rr_slice = cmd->u.rrobin.slice;
    5.23      }
    5.24 @@ -50,10 +53,37 @@ static void rr_dump_settings()
    5.25      printk("rr_slice = %llu ", rr_slice);
    5.26  }
    5.27  
    5.28 -static void rr_pause(struct domain *p)
    5.29 +static void rr_sleep(struct domain *d)
    5.30 +{
    5.31 +    if ( test_bit(DF_RUNNING, &d->flags) )
    5.32 +        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
    5.33 +    else if ( __task_on_runqueue(d) )
    5.34 +        __del_from_runqueue(d);
    5.35 +}
    5.36 +
    5.37 +void rr_wake(struct domain *d)
    5.38  {
    5.39 -    if ( __task_on_runqueue(p) )
    5.40 -        __del_from_runqueue(p);
    5.41 +    struct domain       *curr;
    5.42 +    s_time_t             now, min_time;
    5.43 +    int                  cpu = d->processor;
    5.44 +
    5.45 +    /* If on the runqueue already then someone has done the wakeup work. */
    5.46 +    if ( unlikely(__task_on_runqueue(d)) )
    5.47 +        return;
    5.48 +
    5.49 +    __add_to_runqueue_head(d);
    5.50 +
    5.51 +    now = NOW();
    5.52 +
    5.53 +    curr = schedule_data[cpu].curr;
    5.54 +
    5.55 +    /* Currently-running domain should run at least for ctx_allow. */
    5.56 +    min_time = curr->lastschd + curr->min_slice;
    5.57 +    
    5.58 +    if ( is_idle_task(curr) || (min_time <= now) )
    5.59 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    5.60 +    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
    5.61 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    5.62  }
    5.63  
    5.64  struct scheduler sched_rrobin_def = {
    5.65 @@ -61,11 +91,11 @@ struct scheduler sched_rrobin_def = {
    5.66      .opt_name = "rrobin",
    5.67      .sched_id = SCHED_RROBIN,
    5.68  
    5.69 -    .wake_up        = __add_to_runqueue_head,
    5.70      .do_schedule    = rr_do_schedule,
    5.71      .control        = rr_ctl,
    5.72      .dump_settings  = rr_dump_settings,
    5.73 -    .pause          = rr_pause,
    5.74 +    .sleep          = rr_sleep,
    5.75 +    .wake           = rr_wake,
    5.76  };
    5.77  
    5.78  
     6.1 --- a/xen/common/schedule.c	Fri Jul 23 10:20:56 2004 +0000
     6.2 +++ b/xen/common/schedule.c	Fri Jul 23 10:21:46 2004 +0000
     6.3 @@ -178,12 +178,7 @@ void domain_sleep(struct domain *d)
     6.4  
     6.5      spin_lock_irqsave(&schedule_lock[cpu], flags);
     6.6      if ( likely(!domain_runnable(d)) )
     6.7 -    {
     6.8 -        if ( test_bit(DF_RUNNING, &d->flags) )
     6.9 -            cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    6.10 -        else if ( __task_on_runqueue(d) )
    6.11 -            __del_from_runqueue(d);
    6.12 -    }
    6.13 +        SCHED_OP(sleep, d);
    6.14      spin_unlock_irqrestore(&schedule_lock[cpu], flags);
    6.15  
    6.16      /* Synchronous. */
    6.17 @@ -198,53 +193,19 @@ void domain_wake(struct domain *d)
    6.18  {
    6.19      unsigned long       flags;
    6.20      int                 cpu = d->processor;
    6.21 -    struct domain      *curr;
    6.22 -    s_time_t            now, min_time;
    6.23  
    6.24      spin_lock_irqsave(&schedule_lock[cpu], flags);
    6.25 -
    6.26 -    if ( likely(domain_runnable(d)) && likely(!__task_on_runqueue(d)) )
    6.27 +    if ( likely(domain_runnable(d)) )
    6.28      {
    6.29 -        TRACE_2D(TRC_SCHED_WAKE,d->domain, d);
    6.30 -        SCHED_OP(wake_up, d);
    6.31 +        TRACE_2D(TRC_SCHED_WAKE, d->domain, d);
    6.32 +        SCHED_OP(wake, d);
    6.33  #ifdef WAKE_HISTO
    6.34 -        p->wokenup = NOW();
    6.35 +        d->wokenup = NOW();
    6.36  #endif
    6.37 -
    6.38 -        now = NOW();
    6.39 -        curr = schedule_data[cpu].curr;
    6.40 -
    6.41 -        /* Currently-running domain should run at least for ctx_allow. */
    6.42 -        min_time = curr->lastschd + curr->min_slice;
    6.43 -
    6.44 -        if ( is_idle_task(curr) || (min_time <= now) )
    6.45 -            cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    6.46 -        else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
    6.47 -            mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    6.48      }
    6.49 -
    6.50      spin_unlock_irqrestore(&schedule_lock[cpu], flags);
    6.51  }
    6.52  
    6.53 -/*
    6.54 - * Pausing a domain.
    6.55 - */
    6.56 -void pause_domain(struct domain *domain)
    6.57 -{
    6.58 -	domain_sleep(domain);
    6.59 -	SCHED_OP(pause, domain);	
    6.60 -}
    6.61 -
    6.62 -
    6.63 -/*
    6.64 - * Unpauseing a domain
    6.65 - */
    6.66 -void unpause_domain(struct domain *domain)
    6.67 -{
    6.68 -	SCHED_OP(unpause, domain);
    6.69 -	domain_wake(domain);
    6.70 -}
    6.71 -
    6.72  /* Block the currently-executing domain until a pertinent event occurs. */
    6.73  long do_block(void)
    6.74  {
     7.1 --- a/xen/include/xen/sched-if.h	Fri Jul 23 10:20:56 2004 +0000
     7.2 +++ b/xen/include/xen/sched-if.h	Fri Jul 23 10:21:46 2004 +0000
     7.3 @@ -39,7 +39,8 @@ struct scheduler
     7.4      void         (*add_task)       (struct domain *);
     7.5      void         (*free_task)      (struct domain *);
     7.6      void         (*rem_task)       (struct domain *);
     7.7 -    void         (*wake_up)        (struct domain *);
     7.8 +    void         (*sleep)          (struct domain *);
     7.9 +    void         (*wake)           (struct domain *);
    7.10      void         (*do_block)       (struct domain *);
    7.11      task_slice_t (*do_schedule)    (s_time_t);
    7.12      int          (*control)        (struct sched_ctl_cmd *);
    7.13 @@ -49,8 +50,6 @@ struct scheduler
    7.14      void         (*dump_cpu_state) (int);
    7.15      void         (*dump_runq_el)   (struct domain *);
    7.16      int          (*prn_state)      (int);
    7.17 -    void         (*pause)          (struct domain *);
    7.18 -	void		 (*unpause)		   (struct domain *);
    7.19  };
    7.20  
    7.21  /* per CPU scheduler information */
     8.1 --- a/xen/include/xen/sched.h	Fri Jul 23 10:20:56 2004 +0000
     8.2 +++ b/xen/include/xen/sched.h	Fri Jul 23 10:21:46 2004 +0000
     8.3 @@ -208,8 +208,6 @@ int  sched_id();
     8.4  void init_idle_task(void);
     8.5  void domain_wake(struct domain *d);
     8.6  void domain_sleep(struct domain *d);
     8.7 -void pause_domain(struct domain *d);
     8.8 -void unpause_domain(struct domain *d);
     8.9  
    8.10  void __enter_scheduler(void);
    8.11  
    8.12 @@ -260,14 +258,14 @@ static inline void domain_pause(struct d
    8.13  {
    8.14      ASSERT(d != current);
    8.15      atomic_inc(&d->pausecnt);
    8.16 -    pause_domain(d);
    8.17 +    domain_sleep(d);
    8.18  }
    8.19  
    8.20  static inline void domain_unpause(struct domain *d)
    8.21  {
    8.22      ASSERT(d != current);
    8.23      if ( atomic_dec_and_test(&d->pausecnt) )
    8.24 -        unpause_domain(d);
    8.25 +        domain_wake(d);
    8.26  }
    8.27  
    8.28  static inline void domain_unblock(struct domain *d)
    8.29 @@ -280,13 +278,13 @@ static inline void domain_pause_by_syste
    8.30  {
    8.31      ASSERT(d != current);
    8.32      if ( !test_and_set_bit(DF_CTRLPAUSE, &d->flags) )
    8.33 -        pause_domain(d);
    8.34 +        domain_sleep(d);
    8.35  }
    8.36  
    8.37  static inline void domain_unpause_by_systemcontroller(struct domain *d)
    8.38  {
    8.39      if ( test_and_clear_bit(DF_CTRLPAUSE, &d->flags) )
    8.40 -        unpause_domain(d);
    8.41 +        domain_wake(d);
    8.42  }
    8.43  
    8.44