ia64/xen-unstable

changeset 1979:e9c10b054f93

bitkeeper revision 1.1108.36.1 (4108f279nPgkLZARXvnqXNBEsFkj4Q)

The runqueue management functions removed from sched-if.h and put into schedulers
author gm281@boulderdash.cl.cam.ac.uk
date Thu Jul 29 12:50:01 2004 +0000 (2004-07-29)
parents 6d2b5ebbf4b6
children 0475742a106f
files xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/include/xen/sched-if.h
line diff
     1.1 --- a/xen/common/sched_bvt.c	Wed Jul 28 18:03:08 2004 +0000
     1.2 +++ b/xen/common/sched_bvt.c	Thu Jul 29 12:50:01 2004 +0000
     1.3 @@ -53,8 +53,8 @@ struct bvt_cpu_info
     1.4  
     1.5  #define BVT_INFO(p)   ((struct bvt_dom_info *)(p)->sched_priv)
     1.6  #define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
     1.7 -#define RUNLIST(p)    &(BVT_INFO(p)->run_list)
     1.8 -#define RUNQUEUE(cpu) &(CPU_INFO(cpu)->runqueue)
     1.9 +#define RUNLIST(p)    ((struct list_head *)&(BVT_INFO(p)->run_list))
    1.10 +#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
    1.11  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
    1.12  
    1.13  #define MCU            (s32)MICROSECS(100)    /* Minimum unit */
    1.14 @@ -66,6 +66,32 @@ static s32 ctx_allow = (s32)MILLISECS(5)
    1.15  static xmem_cache_t *dom_info_cache;
    1.16  
    1.17  /*
    1.18 + * Wrappers for run-queue management. Must be called with the run_lock
    1.19 + * held.
    1.20 + */
    1.21 +static inline void __add_to_runqueue_head(struct domain *d)
    1.22 +{
    1.23 +    list_add(RUNLIST(d), RUNQUEUE(d->processor));
    1.24 +}
    1.25 +
    1.26 +static inline void __add_to_runqueue_tail(struct domain *d)
    1.27 +{
    1.28 +    list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
    1.29 +}
    1.30 +
    1.31 +static inline void __del_from_runqueue(struct domain *d)
    1.32 +{
    1.33 +    struct list_head *runlist = RUNLIST(d);
    1.34 +    list_del(runlist);
    1.35 +    runlist->next = NULL;
    1.36 +}
    1.37 +
    1.38 +static inline int __task_on_runqueue(struct domain *d)
    1.39 +{
    1.40 +    return (RUNLIST(d))->next != NULL;
    1.41 +}
    1.42 +
    1.43 +/*
    1.44   * Calculate the effective virtual time for a domain. Take into account 
    1.45   * warping limits
    1.46   */
    1.47 @@ -152,8 +178,8 @@ int bvt_init_idle_task(struct domain *p)
    1.48      spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);
    1.49      
    1.50      set_bit(DF_RUNNING, &p->flags);
    1.51 -    if ( !__task_on_runqueue(RUNLIST(p)) )
    1.52 -        __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
    1.53 +    if ( !__task_on_runqueue(p) )
    1.54 +        __add_to_runqueue_head(p);
    1.55          
    1.56      spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
    1.57  
    1.58 @@ -172,13 +198,13 @@ void bvt_wake(struct domain *d)
    1.59      spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
    1.60      
    1.61      /* If on the runqueue already then someone has done the wakeup work. */
    1.62 -    if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
    1.63 +    if ( unlikely(__task_on_runqueue(d)) )
    1.64      {
    1.65          spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);
    1.66          return;
    1.67      }
    1.68  
    1.69 -    __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(d->processor));
    1.70 +    __add_to_runqueue_head(d);
    1.71  
    1.72      now = NOW();
    1.73  
    1.74 @@ -222,8 +248,8 @@ static void bvt_sleep(struct domain *d)
    1.75          spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags);
    1.76          
    1.77          
    1.78 -        if ( __task_on_runqueue(RUNLIST(d)) )
    1.79 -            __del_from_runqueue(RUNLIST(d));
    1.80 +        if ( __task_on_runqueue(d) )
    1.81 +            __del_from_runqueue(d);
    1.82  
    1.83          spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags);    
    1.84      }
    1.85 @@ -347,7 +373,7 @@ static task_slice_t bvt_do_schedule(s_ti
    1.86      ASSERT(prev_inf != NULL);
    1.87      spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
    1.88  
    1.89 -    ASSERT(__task_on_runqueue(RUNLIST(prev)));
    1.90 +    ASSERT(__task_on_runqueue(prev));
    1.91  
    1.92      if ( likely(!is_idle_task(prev)) ) 
    1.93      {
    1.94 @@ -358,10 +384,10 @@ static task_slice_t bvt_do_schedule(s_ti
    1.95          
    1.96          __calc_evt(prev_inf);
    1.97          
    1.98 -        __del_from_runqueue(RUNLIST(prev));
    1.99 +        __del_from_runqueue(prev);
   1.100          
   1.101          if ( domain_runnable(prev) )
   1.102 -            __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
   1.103 +            __add_to_runqueue_tail(prev);
   1.104      }
   1.105  
   1.106   
     2.1 --- a/xen/common/sched_fair_bvt.c	Wed Jul 28 18:03:08 2004 +0000
     2.2 +++ b/xen/common/sched_fair_bvt.c	Thu Jul 29 12:50:01 2004 +0000
     2.3 @@ -62,8 +62,8 @@ struct fbvt_cpu_info
     2.4  
     2.5  #define FBVT_INFO(p)  ((struct fbvt_dom_info *)(p)->sched_priv)
     2.6  #define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
     2.7 -#define RUNLIST(p)    (struct list_head *)(&(FBVT_INFO(p)->run_list))
     2.8 -#define RUNQUEUE(cpu) (struct list_head *)&(CPU_INFO(cpu)->runqueue)
     2.9 +#define RUNLIST(p)    ((struct list_head *)&(FBVT_INFO(p)->run_list))
    2.10 +#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
    2.11  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
    2.12  #define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
    2.13  #define R_TIME(cpu)   (CPU_INFO(cpu)->r_time) 
    2.14 @@ -77,6 +77,33 @@ static s32 max_vtb   = (s32)MILLISECS(5)
    2.15  /* SLAB cache for struct fbvt_dom_info objects */
    2.16  static xmem_cache_t *dom_info_cache;
    2.17  
    2.18 +
    2.19 +/*
    2.20 + * Wrappers for run-queue management. Must be called with the run_lock
    2.21 + * held.
    2.22 + */
    2.23 +static inline void __add_to_runqueue_head(struct domain *d)
    2.24 +{
    2.25 +    list_add(RUNLIST(d), RUNQUEUE(d->processor));
    2.26 +}
    2.27 +
    2.28 +static inline void __add_to_runqueue_tail(struct domain *d)
    2.29 +{
    2.30 +    list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
    2.31 +}
    2.32 +
    2.33 +static inline void __del_from_runqueue(struct domain *d)
    2.34 +{
    2.35 +    struct list_head *runlist = RUNLIST(d);
    2.36 +    list_del(runlist);
    2.37 +    runlist->next = NULL;
    2.38 +}
    2.39 +
    2.40 +static inline int __task_on_runqueue(struct domain *d)
    2.41 +{
    2.42 +    return (RUNLIST(d))->next != NULL;
    2.43 +}
    2.44 +
    2.45  /*
    2.46   * Calculate the effective virtual time for a domain. Take into account 
    2.47   * warping limits
    2.48 @@ -163,8 +190,8 @@ int fbvt_init_idle_task(struct domain *p
    2.49      fbvt_add_task(p);
    2.50      spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);
    2.51      set_bit(DF_RUNNING, &p->flags);
    2.52 -    if ( !__task_on_runqueue(RUNLIST(p)) )
    2.53 -    __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
    2.54 +    if ( !__task_on_runqueue(p) )
    2.55 +    __add_to_runqueue_head(p);
    2.56      spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
    2.57  
    2.58      return 0;
    2.59 @@ -183,13 +210,13 @@ static void fbvt_wake(struct domain *d)
    2.60      spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
    2.61      
    2.62      /* If on the runqueue already then someone has done the wakeup work. */
    2.63 -    if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
    2.64 +    if ( unlikely(__task_on_runqueue(d)) )
    2.65      {
    2.66          spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 
    2.67          return;
    2.68      }    
    2.69      
    2.70 -    __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
    2.71 +    __add_to_runqueue_head(d);
    2.72   
    2.73      now = NOW();
    2.74  
    2.75 @@ -270,8 +297,8 @@ static void fbvt_sleep(struct domain *d)
    2.76           /* The runqueue accesses must be protected */
    2.77          spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags);       
    2.78      
    2.79 -        if ( __task_on_runqueue(RUNLIST(d)) )
    2.80 -            __del_from_runqueue(RUNLIST(d));
    2.81 +        if ( __task_on_runqueue(d) )
    2.82 +            __del_from_runqueue(d);
    2.83  
    2.84          spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags);
    2.85      }
    2.86 @@ -398,7 +425,7 @@ static task_slice_t fbvt_do_schedule(s_t
    2.87      
    2.88      spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
    2.89  
    2.90 -    ASSERT(__task_on_runqueue(RUNLIST(prev)));
    2.91 +    ASSERT(__task_on_runqueue(prev));
    2.92  
    2.93      if ( likely(!is_idle_task(prev)) ) 
    2.94      {
    2.95 @@ -428,10 +455,10 @@ static task_slice_t fbvt_do_schedule(s_t
    2.96          
    2.97          __calc_evt(prev_inf);
    2.98          
    2.99 -        __del_from_runqueue(RUNLIST(prev));
   2.100 +        __del_from_runqueue(prev);
   2.101          
   2.102          if ( domain_runnable(prev) )
   2.103 -            __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
   2.104 +            __add_to_runqueue_tail(prev);
   2.105      }
   2.106  
   2.107      /* We should at least have the idle task */
     3.1 --- a/xen/common/sched_rrobin.c	Wed Jul 28 18:03:08 2004 +0000
     3.2 +++ b/xen/common/sched_rrobin.c	Thu Jul 29 12:50:01 2004 +0000
     3.3 @@ -26,15 +26,38 @@ struct rrobin_dom_info
     3.4  static spinlock_t run_locks[NR_CPUS];
     3.5  
     3.6  #define RR_INFO(d)      ((struct rrobin_dom_info *)d->sched_priv)
     3.7 -#define RUNLIST(d)      (struct list_head *)&(RR_INFO(d)->run_list)
     3.8 +#define RUNLIST(d)      ((struct list_head *)&(RR_INFO(d)->run_list))
     3.9  #define RUNQUEUE(cpu)   RUNLIST(schedule_data[cpu].idle)
    3.10  
    3.11 -// TODO remove following line
    3.12 -static void rr_dump_cpu_state(int cpu);
    3.13 -
    3.14  /* SLAB cache for struct rrobin_dom_info objects */
    3.15  static xmem_cache_t *dom_info_cache;
    3.16  
    3.17 +/*
    3.18 + * Wrappers for run-queue management. Must be called with the run_lock
    3.19 + * held.
    3.20 + */
    3.21 +static inline void __add_to_runqueue_head(struct domain *d)
    3.22 +{
    3.23 +    list_add(RUNLIST(d), RUNQUEUE(d->processor));
    3.24 +}
    3.25 +
    3.26 +static inline void __add_to_runqueue_tail(struct domain *d)
    3.27 +{
    3.28 +    list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
    3.29 +}
    3.30 +
    3.31 +static inline void __del_from_runqueue(struct domain *d)
    3.32 +{
    3.33 +    struct list_head *runlist = RUNLIST(d);
    3.34 +    list_del(runlist);
    3.35 +    runlist->next = NULL;
    3.36 +}
    3.37 +
    3.38 +static inline int __task_on_runqueue(struct domain *d)
    3.39 +{
    3.40 +    return (RUNLIST(d))->next != NULL;
    3.41 +}
    3.42 +
    3.43  
    3.44  /* Ensures proper initialisation of the dom_info */
    3.45  static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
    3.46 @@ -102,8 +125,8 @@ static int rr_init_idle_task(struct doma
    3.47  
    3.48      spin_lock_irqsave(&run_locks[p->processor], flags);
    3.49      set_bit(DF_RUNNING, &p->flags);
    3.50 -    if ( !__task_on_runqueue(RUNLIST(p)) )
    3.51 -         __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
    3.52 +    if ( !__task_on_runqueue(p) )
    3.53 +         __add_to_runqueue_head(p);
    3.54      spin_unlock_irqrestore(&run_locks[p->processor], flags);
    3.55      return 0;
    3.56  }
    3.57 @@ -122,15 +145,15 @@ static task_slice_t rr_do_schedule(s_tim
    3.58      
    3.59      if(!is_idle_task(prev))
    3.60      {
    3.61 -        __del_from_runqueue(RUNLIST(prev));
    3.62 +        __del_from_runqueue(prev);
    3.63      
    3.64          if ( domain_runnable(prev) )
    3.65 -            __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
    3.66 +            __add_to_runqueue_tail(prev);
    3.67      }
    3.68      
    3.69      spin_unlock_irqrestore(&run_locks[cpu], flags);
    3.70      
    3.71 -    ret.task = list_entry(  RUNQUEUE(cpu).next->next, 
    3.72 +    ret.task = list_entry(  RUNQUEUE(cpu)->next, 
    3.73                              struct rrobin_dom_info, 
    3.74                              run_list)->domain;
    3.75      ret.time = rr_slice;
    3.76 @@ -166,8 +189,8 @@ static void rr_sleep(struct domain *d)
    3.77      else
    3.78      {
    3.79          spin_lock_irqsave(&run_locks[d->processor], flags);
    3.80 -        if ( __task_on_runqueue(RUNLIST(d)) )
    3.81 -            __del_from_runqueue(RUNLIST(d));
    3.82 +        if ( __task_on_runqueue(d) )
    3.83 +            __del_from_runqueue(d);
    3.84          spin_unlock_irqrestore(&run_locks[d->processor], flags);
    3.85      }
    3.86  }
    3.87 @@ -182,13 +205,13 @@ void rr_wake(struct domain *d)
    3.88      spin_lock_irqsave(&run_locks[cpu], flags);
    3.89      
    3.90      /* If on the runqueue already then someone has done the wakeup work. */
    3.91 -    if ( unlikely(__task_on_runqueue(RUNLIST(d))))
    3.92 +    if ( unlikely(__task_on_runqueue(d)))
    3.93      {
    3.94          spin_unlock_irqrestore(&run_locks[cpu], flags);
    3.95          return;
    3.96      }
    3.97  
    3.98 -    __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
    3.99 +    __add_to_runqueue_head(d);
   3.100      spin_unlock_irqrestore(&run_locks[cpu], flags);
   3.101  
   3.102      now = NOW();
     4.1 --- a/xen/include/xen/sched-if.h	Wed Jul 28 18:03:08 2004 +0000
     4.2 +++ b/xen/include/xen/sched-if.h	Thu Jul 29 12:50:01 2004 +0000
     4.3 @@ -56,28 +56,4 @@ struct scheduler
     4.4  /* per CPU scheduler information */
     4.5  extern schedule_data_t schedule_data[];
     4.6  
     4.7 -/*
     4.8 - * Wrappers for run-queue management. Must be called with the schedule_lock
     4.9 - * held.
    4.10 - */
    4.11 -static inline void __add_to_runqueue_head(struct list_head *run_list, struct list_head *runqueue)
    4.12 -{
    4.13 -    list_add(run_list, runqueue);
    4.14 -}
    4.15  
    4.16 -static inline void __add_to_runqueue_tail(struct list_head *run_list, struct list_head *runqueue)
    4.17 -{
    4.18 -    list_add_tail(run_list, runqueue);
    4.19 -}
    4.20 -
    4.21 -static inline void __del_from_runqueue(struct list_head *run_list)
    4.22 -{
    4.23 -    list_del(run_list);
    4.24 -    run_list->next = NULL;
    4.25 -}
    4.26 -
    4.27 -static inline int __task_on_runqueue(struct list_head *run_list)
    4.28 -{
    4.29 -    return run_list->next != NULL;
    4.30 -}
    4.31 -