ia64/xen-unstable

changeset 1899:9fd9d87a5a61

bitkeeper revision 1.1108.1.20 (410193bbkbfR1_3KuvxENsFzpMxzdw)

Definition of runqueues pushed from the generic scheduler to the specific schedulers. BVT, FBVT and RRobin fixed to work with the new interface.
author gm281@boulderdash.cl.cam.ac.uk
date Fri Jul 23 22:39:55 2004 +0000 (2004-07-23)
parents fbcefe1a0285
children 1a4ae7399519 b2ef8732a70a 780dd1691d62 f48d42a90681
files xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/common/sched_bvt.c	Fri Jul 23 17:16:11 2004 +0000
     1.2 +++ b/xen/common/sched_bvt.c	Fri Jul 23 22:39:55 2004 +0000
     1.3 @@ -30,25 +30,30 @@
     1.4  /* all per-domain BVT-specific scheduling info is stored here */
     1.5  struct bvt_dom_info
     1.6  {
     1.7 -    unsigned long mcu_advance;      /* inverse of weight */
     1.8 -    u32           avt;              /* actual virtual time */
     1.9 -    u32           evt;              /* effective virtual time */
    1.10 -    int           warpback;         /* warp?  */
    1.11 -    long          warp;             /* virtual time warp */
    1.12 -    long          warpl;            /* warp limit */
    1.13 -    long          warpu;            /* unwarp time requirement */
    1.14 -    s_time_t      warped;           /* time it ran warped last time */
    1.15 -    s_time_t      uwarped;          /* time it ran unwarped last time */
    1.16 +    struct domain       *domain;          /* domain this info belongs to */
    1.17 +    struct list_head    run_list;         /* runqueue list pointers */
    1.18 +    unsigned long       mcu_advance;      /* inverse of weight */
    1.19 +    u32                 avt;              /* actual virtual time */
    1.20 +    u32                 evt;              /* effective virtual time */
    1.21 +    int                 warpback;         /* warp?  */
    1.22 +    long                warp;             /* virtual time warp */
    1.23 +    long                warpl;            /* warp limit */
    1.24 +    long                warpu;            /* unwarp time requirement */
    1.25 +    s_time_t            warped;           /* time it ran warped last time */
    1.26 +    s_time_t            uwarped;          /* time it ran unwarped last time */
    1.27  };
    1.28  
    1.29  struct bvt_cpu_info
    1.30  {
    1.31 -    unsigned long svt; /* XXX check this is unsigned long! */
    1.32 +    struct list_head    runqueue;   /* runqueue for given processor */ 
    1.33 +    unsigned long       svt;        /* XXX check this is unsigned long! */
    1.34  };
    1.35  
    1.36  
    1.37  #define BVT_INFO(p)   ((struct bvt_dom_info *)(p)->sched_priv)
    1.38  #define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
    1.39 +#define RUNLIST(p)    &(BVT_INFO(p)->run_list)
    1.40 +#define RUNQUEUE(cpu) &(CPU_INFO(cpu)->runqueue)
    1.41  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
    1.42  
    1.43  #define MCU            (s32)MICROSECS(100)    /* Minimum unit */
    1.44 @@ -110,12 +115,12 @@ int bvt_alloc_task(struct domain *p)
    1.45  void bvt_add_task(struct domain *p) 
    1.46  {
    1.47      struct bvt_dom_info *inf = BVT_INFO(p);
    1.48 -
    1.49      ASSERT(inf != NULL);
    1.50      ASSERT(p   != NULL);
    1.51  
    1.52      inf->mcu_advance = MCU_ADVANCE;
    1.53 -
    1.54 +    inf->domain = p;
    1.55 +    
    1.56      if ( p->domain == IDLE_DOMAIN_ID )
    1.57      {
    1.58          inf->avt = inf->evt = ~0U;
    1.59 @@ -135,6 +140,23 @@ void bvt_add_task(struct domain *p)
    1.60      return;
    1.61  }
    1.62  
    1.63 +int bvt_init_idle_task(struct domain *p)
    1.64 +{
    1.65 +    unsigned long flags;
    1.66 +
    1.67 +    if(bvt_alloc_task(p) < 0) return -1;
    1.68 +
    1.69 +    bvt_add_task(p);
    1.70 +
    1.71 +    spin_lock_irqsave(&schedule_lock[p->processor], flags);
    1.72 +    set_bit(DF_RUNNING, &p->flags);
    1.73 +    if ( !__task_on_runqueue(RUNLIST(p)) )
    1.74 +        __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
    1.75 +    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
    1.76 +
    1.77 +    return 0;
    1.78 +}
    1.79 +
    1.80  /**
    1.81   * bvt_free_task - free BVT private structures for a task
    1.82   * @p:             task
    1.83 @@ -234,7 +256,7 @@ int bvt_adjdom(struct domain *p,
    1.84   */
    1.85  static task_slice_t bvt_do_schedule(s_time_t now)
    1.86  {
    1.87 -    struct domain *prev = current, *next = NULL, *next_prime, *p;
    1.88 +    struct domain *prev = current, *next = NULL, *next_prime, *p; 
    1.89      struct list_head   *tmp;
    1.90      int                 cpu = prev->processor;
    1.91      s32                 r_time;     /* time for new dom to run */
    1.92 @@ -259,48 +281,48 @@ static task_slice_t bvt_do_schedule(s_ti
    1.93          
    1.94          __calc_evt(prev_inf);
    1.95          
    1.96 -        __del_from_runqueue(prev);
    1.97 +        __del_from_runqueue(RUNLIST(prev));
    1.98          
    1.99          if ( domain_runnable(prev) )
   1.100 -            __add_to_runqueue_tail(prev);
   1.101 +            __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
   1.102      }
   1.103  
   1.104 + 
   1.105      /* We should at least have the idle task */
   1.106 -    ASSERT(!list_empty(&schedule_data[cpu].runqueue));
   1.107 +    ASSERT(!list_empty(RUNQUEUE(cpu)));
   1.108  
   1.109      /*
   1.110       * scan through the run queue and pick the task with the lowest evt
   1.111       * *and* the task the second lowest evt.
   1.112       * this code is O(n) but we expect n to be small.
   1.113       */
   1.114 -    next       = schedule_data[cpu].idle;
   1.115 -    next_prime = NULL;
   1.116 +    next_inf        = BVT_INFO(schedule_data[cpu].idle);
   1.117 +    next_prime_inf  = NULL;
   1.118  
   1.119      next_evt       = ~0U;
   1.120      next_prime_evt = ~0U;
   1.121      min_avt        = ~0U;
   1.122  
   1.123 -    list_for_each ( tmp, &schedule_data[cpu].runqueue )
   1.124 +    list_for_each ( tmp, RUNQUEUE(cpu) )
   1.125      {
   1.126 -        p     = list_entry(tmp, struct domain, run_list);
   1.127 -        p_inf = BVT_INFO(p);
   1.128 +        p_inf = list_entry(tmp, struct bvt_dom_info, run_list);
   1.129  
   1.130          if ( p_inf->evt < next_evt )
   1.131          {
   1.132 -            next_prime     = next;
   1.133 -            next_prime_evt = next_evt;
   1.134 -            next = p;
   1.135 -            next_evt = p_inf->evt;
   1.136 +            next_prime_inf  = next_inf;
   1.137 +            next_prime_evt  = next_evt;
   1.138 +            next_inf        = p_inf;
   1.139 +            next_evt        = p_inf->evt;
   1.140          } 
   1.141          else if ( next_prime_evt == ~0U )
   1.142          {
   1.143 -            next_prime_evt = p_inf->evt;
   1.144 -            next_prime     = p;
   1.145 +            next_prime_evt  = p_inf->evt;
   1.146 +            next_prime_inf  = p_inf;
   1.147          } 
   1.148          else if ( p_inf->evt < next_prime_evt )
   1.149          {
   1.150 -            next_prime_evt = p_inf->evt;
   1.151 -            next_prime     = p;
   1.152 +            next_prime_evt  = p_inf->evt;
   1.153 +            next_prime_inf  = p_inf;
   1.154          }
   1.155  
   1.156          /* Determine system virtual time. */
   1.157 @@ -308,6 +330,10 @@ static task_slice_t bvt_do_schedule(s_ti
   1.158              min_avt = p_inf->avt;
   1.159      }
   1.160  
   1.161 +    /* Extract the domain pointers from the dom infos */
   1.162 +    next        = next_inf->domain;
   1.163 +    next_prime  = next_prime_inf->domain;
   1.164 +    
   1.165      /* Update system virtual time. */
   1.166      if ( min_avt != ~0U )
   1.167          CPU_SVT(cpu) = min_avt;
   1.168 @@ -344,9 +370,6 @@ static task_slice_t bvt_do_schedule(s_ti
   1.169          goto sched_done;
   1.170      }
   1.171  
   1.172 -    next_prime_inf = BVT_INFO(next_prime);
   1.173 -    next_inf       = BVT_INFO(next);
   1.174 -
   1.175      /*
   1.176       * If we are here then we have two runnable tasks.
   1.177       * Work out how long 'next' can run till its evt is greater than
   1.178 @@ -382,9 +405,44 @@ static void bvt_dump_settings(void)
   1.179  
   1.180  static void bvt_dump_cpu_state(int i)
   1.181  {
   1.182 +    unsigned long flags;
   1.183 +    struct list_head *list, *queue;
   1.184 +    int loop = 0;
   1.185 +    struct bvt_dom_info *d_inf;
   1.186 +    struct domain *d;
   1.187 +    
   1.188 +    spin_lock_irqsave(&schedule_lock[i], flags);
   1.189      printk("svt=0x%08lX ", CPU_SVT(i));
   1.190 +
   1.191 +    queue = RUNQUEUE(i);
   1.192 +    printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
   1.193 +        (unsigned long) queue->next, (unsigned long) queue->prev);
   1.194 +
   1.195 +    list_for_each ( list, queue )
   1.196 +    {
   1.197 +        d_inf = list_entry(list, struct bvt_dom_info, run_list);
   1.198 +        d = d_inf->domain;
   1.199 +        printk("%3d: %u has=%c ", loop++, d->domain,
   1.200 +              test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
   1.201 +        bvt_dump_runq_el(d);
   1.202 +        printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
   1.203 +        printk("         l: %lx n: %lx  p: %lx\n",
   1.204 +            (unsigned long)list, (unsigned long)list->next,
   1.205 +            (unsigned long)list->prev);
   1.206 +    }
   1.207 +    spin_unlock_irqrestore(&schedule_lock[i], flags);        
   1.208  }
   1.209  
   1.210 +/* We use cache to create the bvt_dom_infos 
   1.211 +   this functions makes sure that the run_list
   1.212 +   is initialised properly. The new domain needs
   1.213 +   NOT to appear as to be on the runqueue */
   1.214 +static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
   1.215 +{
   1.216 +    struct bvt_dom_info *dom_inf = (struct bvt_dom_info*)arg1;
   1.217 +    dom_inf->run_list.next = NULL;
   1.218 +    dom_inf->run_list.prev = NULL;
   1.219 +}
   1.220  
   1.221  /* Initialise the data structures. */
   1.222  int bvt_init_scheduler()
   1.223 @@ -394,6 +452,8 @@ int bvt_init_scheduler()
   1.224      for ( i = 0; i < NR_CPUS; i++ )
   1.225      {
   1.226          schedule_data[i].sched_priv = kmalloc(sizeof(struct bvt_cpu_info));
   1.227 +        INIT_LIST_HEAD(RUNQUEUE(i));
   1.228 +        
   1.229          if ( schedule_data[i].sched_priv == NULL )
   1.230          {
   1.231              printk("Failed to allocate BVT scheduler per-CPU memory!\n");
   1.232 @@ -405,7 +465,7 @@ int bvt_init_scheduler()
   1.233  
   1.234      dom_info_cache = kmem_cache_create("BVT dom info",
   1.235                                         sizeof(struct bvt_dom_info),
   1.236 -                                       0, 0, NULL, NULL);
   1.237 +                                       0, 0, cache_constructor, NULL);
   1.238  
   1.239      if ( dom_info_cache == NULL )
   1.240      {
   1.241 @@ -420,8 +480,8 @@ static void bvt_sleep(struct domain *d)
   1.242  {
   1.243      if ( test_bit(DF_RUNNING, &d->flags) )
   1.244          cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   1.245 -    else if ( __task_on_runqueue(d) )
   1.246 -        __del_from_runqueue(d);
   1.247 +    else if ( __task_on_runqueue(RUNLIST(d)) )
   1.248 +        __del_from_runqueue(RUNLIST(d));
   1.249  }
   1.250  
   1.251  void bvt_wake(struct domain *d)
   1.252 @@ -432,10 +492,10 @@ void bvt_wake(struct domain *d)
   1.253      int                  cpu = d->processor;
   1.254  
   1.255      /* If on the runqueue already then someone has done the wakeup work. */
   1.256 -    if ( unlikely(__task_on_runqueue(d)) )
   1.257 +    if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
   1.258          return;
   1.259  
   1.260 -    __add_to_runqueue_head(d);
   1.261 +    __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(d->processor));
   1.262  
   1.263      now = NOW();
   1.264  
   1.265 @@ -465,6 +525,7 @@ struct scheduler sched_bvt_def = {
   1.266      .sched_id = SCHED_BVT,
   1.267      
   1.268      .init_scheduler = bvt_init_scheduler,
   1.269 +    .init_idle_task = bvt_init_idle_task,
   1.270      .alloc_task     = bvt_alloc_task,
   1.271      .add_task       = bvt_add_task,
   1.272      .free_task      = bvt_free_task,
   1.273 @@ -474,7 +535,6 @@ struct scheduler sched_bvt_def = {
   1.274      .adjdom         = bvt_adjdom,
   1.275      .dump_settings  = bvt_dump_settings,
   1.276      .dump_cpu_state = bvt_dump_cpu_state,
   1.277 -    .dump_runq_el   = bvt_dump_runq_el,
   1.278      .sleep          = bvt_sleep,
   1.279      .wake           = bvt_wake,
   1.280  };
     2.1 --- a/xen/common/sched_fair_bvt.c	Fri Jul 23 17:16:11 2004 +0000
     2.2 +++ b/xen/common/sched_fair_bvt.c	Fri Jul 23 22:39:55 2004 +0000
     2.3 @@ -36,28 +36,33 @@
     2.4  /* all per-domain BVT-specific scheduling info is stored here */
     2.5  struct fbvt_dom_info
     2.6  {
     2.7 -    unsigned long mcu_advance;      /* inverse of weight */
     2.8 -    u32           avt;              /* actual virtual time */
     2.9 -    u32           evt;              /* effective virtual time */
    2.10 -    u32           time_slept;       /* amount of time slept */
    2.11 -    int           warpback;         /* warp?  */
    2.12 -    long          warp;             /* virtual time warp */
    2.13 -    long          warpl;            /* warp limit */
    2.14 -    long          warpu;            /* unwarp time requirement */
    2.15 -    s_time_t      warped;           /* time it ran warped last time */
    2.16 -    s_time_t      uwarped;          /* time it ran unwarped last time */
    2.17 +    struct domain       *domain;          /* domain this info belongs to */
    2.18 +    struct list_head    run_list;         /* runqueue pointers */
    2.19 +    unsigned long       mcu_advance;      /* inverse of weight */
    2.20 +    u32                 avt;              /* actual virtual time */
    2.21 +    u32                 evt;              /* effective virtual time */
    2.22 +    u32                 time_slept;       /* amount of time slept */
    2.23 +    int                 warpback;         /* warp?  */
    2.24 +    long                warp;             /* virtual time warp */
    2.25 +    long                warpl;            /* warp limit */
    2.26 +    long                warpu;            /* unwarp time requirement */
    2.27 +    s_time_t            warped;           /* time it ran warped last time */
    2.28 +    s_time_t            uwarped;          /* time it ran unwarped last time */
    2.29  };
    2.30  
    2.31  struct fbvt_cpu_info
    2.32  {
    2.33 -    unsigned long svt;       /* XXX check this is unsigned long! */
    2.34 -    u32           vtb;       /* virtual time bonus */
    2.35 -    u32           r_time;    /* last time to run */  
    2.36 +    struct list_head    runqueue;  /* runqueue for this CPU */
    2.37 +    unsigned long       svt;       /* XXX check this is unsigned long! */
    2.38 +    u32                 vtb;       /* virtual time bonus */
    2.39 +    u32                 r_time;    /* last time to run */  
    2.40  };
    2.41  
    2.42  
    2.43  #define FBVT_INFO(p)  ((struct fbvt_dom_info *)(p)->sched_priv)
    2.44  #define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
    2.45 +#define RUNLIST(p)    (struct list_head *)(&(FBVT_INFO(p)->run_list))
    2.46 +#define RUNQUEUE(cpu) (struct list_head *)&(CPU_INFO(cpu)->runqueue)
    2.47  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
    2.48  #define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
    2.49  #define R_TIME(cpu)   (CPU_INFO(cpu)->r_time) 
    2.50 @@ -127,6 +132,7 @@ void fbvt_add_task(struct domain *p)
    2.51      ASSERT(p   != NULL);
    2.52  
    2.53      inf->mcu_advance = MCU_ADVANCE;
    2.54 +    inf->domain = p;
    2.55      if ( p->domain == IDLE_DOMAIN_ID )
    2.56      {
    2.57          inf->avt = inf->evt = ~0U;
    2.58 @@ -147,6 +153,24 @@ void fbvt_add_task(struct domain *p)
    2.59      return;
    2.60  }
    2.61  
    2.62 +int fbvt_init_idle_task(struct domain *p)
    2.63 +{
    2.64 +    unsigned long flags;
    2.65 +
    2.66 +    if(fbvt_alloc_task(p) < 0) return -1;
    2.67 +
    2.68 +    fbvt_add_task(p);
    2.69 +//printk("< ----- >Initialising idle task for processor %d, address %d, priv %d\n", p->processor, (int)p, (int)p->sched_priv);
    2.70 +    spin_lock_irqsave(&schedule_lock[p->processor], flags);
    2.71 +    set_bit(DF_RUNNING, &p->flags);
    2.72 +    if ( !__task_on_runqueue(RUNLIST(p)) )
    2.73 +    __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
    2.74 +    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
    2.75 +
    2.76 +    return 0;
    2.77 +}
    2.78 +                                        
    2.79 +
    2.80  /**
    2.81   * fbvt_free_task - free FBVT private structures for a task
    2.82   * @p:             task
    2.83 @@ -261,6 +285,7 @@ static task_slice_t fbvt_do_schedule(s_t
    2.84      struct fbvt_dom_info *next_prime_inf = NULL;
    2.85      task_slice_t        ret;
    2.86  
    2.87 +//if(prev->sched_priv == NULL) printk("----> %d\n", prev->domain);
    2.88      ASSERT(prev->sched_priv != NULL);
    2.89      ASSERT(prev_inf != NULL);
    2.90  
    2.91 @@ -292,48 +317,47 @@ static task_slice_t fbvt_do_schedule(s_t
    2.92          
    2.93          __calc_evt(prev_inf);
    2.94          
    2.95 -        __del_from_runqueue(prev);
    2.96 +        __del_from_runqueue(RUNLIST(prev));
    2.97          
    2.98          if ( domain_runnable(prev) )
    2.99 -            __add_to_runqueue_tail(prev);
   2.100 +            __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
   2.101      }
   2.102  
   2.103      /* We should at least have the idle task */
   2.104 -    ASSERT(!list_empty(&schedule_data[cpu].runqueue));
   2.105 +    ASSERT(!list_empty(RUNQUEUE(cpu)));
   2.106  
   2.107      /*
   2.108       * scan through the run queue and pick the task with the lowest evt
   2.109       * *and* the task the second lowest evt.
   2.110       * this code is O(n) but we expect n to be small.
   2.111       */
   2.112 -    next       = schedule_data[cpu].idle;
   2.113 -    next_prime = NULL;
   2.114 +    next_inf        = FBVT_INFO(schedule_data[cpu].idle);
   2.115 +    next_prime_inf  = NULL;
   2.116  
   2.117      next_evt       = ~0U;
   2.118      next_prime_evt = ~0U;
   2.119      min_avt        = ~0U;
   2.120  
   2.121 -    list_for_each ( tmp, &schedule_data[cpu].runqueue )
   2.122 +    list_for_each ( tmp, RUNQUEUE(cpu) )
   2.123      {
   2.124 -        p     = list_entry(tmp, struct domain, run_list);
   2.125 -        p_inf = FBVT_INFO(p);
   2.126 +        p_inf = list_entry(tmp, struct fbvt_dom_info, run_list);
   2.127  
   2.128          if ( p_inf->evt < next_evt )
   2.129          {
   2.130 -            next_prime     = next;
   2.131 -            next_prime_evt = next_evt;
   2.132 -            next = p;
   2.133 -            next_evt = p_inf->evt;
   2.134 -        } 
   2.135 +            next_prime_inf  = next_inf;
   2.136 +            next_prime_evt  = next_evt;
   2.137 +            next_inf        = p_inf;
   2.138 +            next_evt        = p_inf->evt;
   2.139 +        }
   2.140          else if ( next_prime_evt == ~0U )
   2.141          {
   2.142 -            next_prime_evt = p_inf->evt;
   2.143 -            next_prime     = p;
   2.144 -        } 
   2.145 +            next_prime_evt  = p_inf->evt;
   2.146 +            next_prime_inf  = p_inf;
   2.147 +        }
   2.148          else if ( p_inf->evt < next_prime_evt )
   2.149          {
   2.150 -            next_prime_evt = p_inf->evt;
   2.151 -            next_prime     = p;
   2.152 +            next_prime_evt  = p_inf->evt;
   2.153 +            next_prime_inf  = p_inf;
   2.154          }
   2.155  
   2.156          /* Determine system virtual time. */
   2.157 @@ -341,6 +365,11 @@ static task_slice_t fbvt_do_schedule(s_t
   2.158              min_avt = p_inf->avt;
   2.159      }
   2.160  
   2.161 +    /* Extract the domain pointers from the dom infos */
   2.162 +    next        = next_inf->domain;
   2.163 +    next_prime  = next_prime_inf->domain;
   2.164 +     
   2.165 +
   2.166      /* Update system virtual time. */
   2.167      if ( min_avt != ~0U )
   2.168          CPU_SVT(cpu) = min_avt;
   2.169 @@ -363,9 +392,6 @@ static task_slice_t fbvt_do_schedule(s_t
   2.170          CPU_SVT(cpu) -= 0xe0000000;
   2.171      }
   2.172  
   2.173 -    next_prime_inf = FBVT_INFO(next_prime);
   2.174 -    next_inf       = FBVT_INFO(next);
   2.175 -    
   2.176      /* check for time_slept overrun for the domain we schedule to run*/
   2.177      if(next_inf->time_slept >= 0xf0000000)
   2.178      {
   2.179 @@ -424,7 +450,7 @@ static task_slice_t fbvt_do_schedule(s_t
   2.180      next->min_slice = ctx_allow;
   2.181      ret.task = next;
   2.182      ret.time = r_time;
   2.183 - 
   2.184 +//printk("NEXT --> domain %d (address %d, processor %d), priv %d\n",next->domain, (int)next, next->processor, (int)next->sched_priv); 
   2.185      return ret;
   2.186  }
   2.187  
   2.188 @@ -432,22 +458,60 @@ static task_slice_t fbvt_do_schedule(s_t
   2.189  static void fbvt_dump_runq_el(struct domain *p)
   2.190  {
   2.191      struct fbvt_dom_info *inf = FBVT_INFO(p);
   2.192 -    
   2.193 -    printk("mcua=%04lu ev=%08u av=%08u sl=%08u",
   2.194 -           inf->mcu_advance, inf->evt, inf->avt, inf->time_slept);
   2.195 +
   2.196 +    printk("mcua=0x%04lX ev=0x%08X av=0x%08X ",
   2.197 +           inf->mcu_advance, inf->evt, inf->avt);
   2.198  }
   2.199  
   2.200  static void fbvt_dump_settings(void)
   2.201  {
   2.202 -    printk("FBVT: mcu=0x%08Xns ctx_allow=0x%08Xns ", (u32)MCU, (s32)ctx_allow );
   2.203 +    printk("BVT: mcu=0x%08Xns ctx_allow=0x%08Xns ", (u32)MCU, (s32)ctx_allow );
   2.204  }
   2.205  
   2.206  static void fbvt_dump_cpu_state(int i)
   2.207  {
   2.208 +    unsigned long flags;
   2.209 +    struct list_head *list, *queue;
   2.210 +    int loop = 0;
   2.211 +    struct fbvt_dom_info *d_inf;
   2.212 +    struct domain *d;
   2.213 +
   2.214 +    spin_lock_irqsave(&schedule_lock[i], flags);
   2.215      printk("svt=0x%08lX ", CPU_SVT(i));
   2.216 +
   2.217 +    queue = RUNQUEUE(i);
   2.218 +    printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
   2.219 +        (unsigned long) queue->next, (unsigned long) queue->prev);
   2.220 +
   2.221 +    list_for_each ( list, queue )
   2.222 +    {
   2.223 +        d_inf = list_entry(list, struct fbvt_dom_info, run_list);
   2.224 +        d = d_inf->domain;
   2.225 +        printk("%3d: %u has=%c ", loop++, d->domain,
   2.226 +              test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
   2.227 +        fbvt_dump_runq_el(d);
   2.228 +        printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
   2.229 +        printk("         l: %lx n: %lx  p: %lx\n",
   2.230 +            (unsigned long)list, (unsigned long)list->next,
   2.231 +            (unsigned long)list->prev);
   2.232 +    }
   2.233 +    spin_unlock_irqrestore(&schedule_lock[i], flags);
   2.234  }
   2.235  
   2.236  
   2.237 +/* We use cache to create the bvt_dom_infos
   2.238 +   this functions makes sure that the run_list
   2.239 +   is initialised properly. The new domain needs
   2.240 +   NOT to appear as to be on the runqueue */
   2.241 +static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
   2.242 +{
   2.243 +    struct fbvt_dom_info *dom_inf = (struct fbvt_dom_info*)arg1;
   2.244 +    dom_inf->run_list.next = NULL;
   2.245 +    dom_inf->run_list.prev = NULL;
   2.246 +}
   2.247 +
   2.248 +                     
   2.249 +
   2.250  /* Initialise the data structures. */
   2.251  int fbvt_init_scheduler()
   2.252  {
   2.253 @@ -456,6 +520,7 @@ int fbvt_init_scheduler()
   2.254      for ( i = 0; i < NR_CPUS; i++ )
   2.255      {
   2.256          schedule_data[i].sched_priv = kmalloc(sizeof(struct fbvt_cpu_info));
   2.257 +        INIT_LIST_HEAD(RUNQUEUE(i));
   2.258          if ( schedule_data[i].sched_priv == NULL )
   2.259          {
   2.260              printk("Failed to allocate FBVT scheduler per-CPU memory!\n");
   2.261 @@ -467,7 +532,7 @@ int fbvt_init_scheduler()
   2.262  
   2.263      dom_info_cache = kmem_cache_create("FBVT dom info",
   2.264                                         sizeof(struct fbvt_dom_info),
   2.265 -                                       0, 0, NULL, NULL);
   2.266 +                                       0, 0, cache_constructor, NULL);
   2.267  
   2.268      if ( dom_info_cache == NULL )
   2.269      {
   2.270 @@ -482,8 +547,8 @@ static void fbvt_sleep(struct domain *d)
   2.271  {
   2.272      if ( test_bit(DF_RUNNING, &d->flags) )
   2.273          cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   2.274 -    else if ( __task_on_runqueue(d) )
   2.275 -        __del_from_runqueue(d);
   2.276 +    else if ( __task_on_runqueue(RUNLIST(d)) )
   2.277 +        __del_from_runqueue(RUNLIST(d));
   2.278  }
   2.279  
   2.280  static void fbvt_wake(struct domain *d)
   2.281 @@ -494,12 +559,15 @@ static void fbvt_wake(struct domain *d)
   2.282      int                   cpu = d->processor;
   2.283      s32                   io_warp;
   2.284  
   2.285 +//printk("-|--> Adding new domain %d\n",d->domain);
   2.286 +//printk("-|--> Current%d  (address %d, processor %d)  %d\n",current->domain,(int)current, current->processor, (int)current->sched_priv);
   2.287      /* If on the runqueue already then someone has done the wakeup work. */
   2.288 -    if ( unlikely(__task_on_runqueue(d)) )
   2.289 +    if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
   2.290          return;
   2.291 -
   2.292 -    __add_to_runqueue_head(d);
   2.293 -
   2.294 +//printk("----> Not on runqueue\n");
   2.295 +    __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
   2.296 +//printk(" ---> %d\n",(int)current->sched_priv);
   2.297 + 
   2.298      now = NOW();
   2.299  
   2.300  #if 0
   2.301 @@ -549,7 +617,8 @@ static void fbvt_wake(struct domain *d)
   2.302      __calc_evt(inf);
   2.303  
   2.304      curr = schedule_data[cpu].curr;
   2.305 -
   2.306 +//printk(" ---> %d\n",(int)current->sched_priv);
   2.307 + 
   2.308      /* Currently-running domain should run at least for ctx_allow. */
   2.309      min_time = curr->lastschd + curr->min_slice;
   2.310      
   2.311 @@ -557,7 +626,8 @@ static void fbvt_wake(struct domain *d)
   2.312          cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
   2.313      else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
   2.314          mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
   2.315 -}
   2.316 +//printk(" ---> %d\n",(int)current->sched_priv);
   2.317 +} 
   2.318  
   2.319  struct scheduler sched_fbvt_def = {
   2.320      .name     = "Fair Borrowed Virtual Time",
   2.321 @@ -565,6 +635,7 @@ struct scheduler sched_fbvt_def = {
   2.322      .sched_id = SCHED_FBVT,
   2.323      
   2.324      .init_scheduler = fbvt_init_scheduler,
   2.325 +    .init_idle_task = fbvt_init_idle_task,
   2.326      .alloc_task     = fbvt_alloc_task,
   2.327      .add_task       = fbvt_add_task,
   2.328      .free_task      = fbvt_free_task,
   2.329 @@ -574,7 +645,6 @@ struct scheduler sched_fbvt_def = {
   2.330      .adjdom         = fbvt_adjdom,
   2.331      .dump_settings  = fbvt_dump_settings,
   2.332      .dump_cpu_state = fbvt_dump_cpu_state,
   2.333 -    .dump_runq_el   = fbvt_dump_runq_el,
   2.334      .sleep          = fbvt_sleep,
   2.335      .wake           = fbvt_wake,
   2.336  };
     3.1 --- a/xen/common/sched_rrobin.c	Fri Jul 23 17:16:11 2004 +0000
     3.2 +++ b/xen/common/sched_rrobin.c	Fri Jul 23 22:39:55 2004 +0000
     3.3 @@ -10,30 +10,124 @@
     3.4  #include <xen/ac_timer.h>
     3.5  #include <xen/softirq.h>
     3.6  #include <xen/time.h>
     3.7 +#include <xen/slab.h>
     3.8  
     3.9  #define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
    3.10  
    3.11  static s_time_t rr_slice = MILLISECS(10);
    3.12  
    3.13 +/* Only runqueue pointers and domain pointer*/
    3.14 +struct rrobin_dom_info
    3.15 +{
    3.16 +    struct list_head run_list;
    3.17 +    struct domain    *domain;
    3.18 +};
    3.19 +
    3.20 +#define RR_INFO(d)      ((struct rrobin_dom_info *)d->sched_priv)
    3.21 +#define RUNLIST(d)      (struct list_head *)&(RR_INFO(d)->run_list)
    3.22 +#define RUNQUEUE(cpu)   RUNLIST(schedule_data[cpu].idle)
    3.23 +
    3.24 +// TODO remove following line
    3.25 +static void rr_dump_cpu_state(int cpu);
    3.26 +
    3.27 +/* SLAB cache for struct rrobin_dom_info objects */
    3.28 +static kmem_cache_t *dom_info_cache;
    3.29 +
    3.30 +
    3.31 +/* Ensures proper initialisation of the dom_info */
    3.32 +static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
    3.33 +{
    3.34 +    struct rrobin_dom_info *dom_inf = (struct rrobin_dom_info*)arg1;
    3.35 +    dom_inf->run_list.next = NULL;
    3.36 +    dom_inf->run_list.prev = NULL;
    3.37 +}
    3.38 +            
    3.39 +
    3.40 +/* Initialises the runqueues and creates the domain info cache */
    3.41 +static int rr_init_scheduler()
    3.42 +{
    3.43 +    int i;
    3.44 +
    3.45 +    for ( i = 0; i < NR_CPUS; i++ )
    3.46 +        INIT_LIST_HEAD(RUNQUEUE(i));
    3.47 +   
    3.48 +    dom_info_cache = kmem_cache_create("FBVT dom info", 
    3.49 +                                        sizeof(struct rrobin_dom_info), 
    3.50 +                                        0, 0, cache_constructor, NULL);
    3.51 +
    3.52 +    if(dom_info_cache == NULL)
    3.53 +    {
    3.54 +        printk("Could not allocate SLAB cache.\n");
    3.55 +        return -1;
    3.56 +    }
    3.57 +    return 0;                                                                
    3.58 +}
    3.59 +
    3.60 +/* Allocates memory for per domain private scheduling data*/
    3.61 +static int rr_alloc_task(struct domain *d)
    3.62 +{
    3.63 +    d->sched_priv = kmem_cache_alloc(dom_info_cache);
    3.64 +    if ( d->sched_priv == NULL )
    3.65 +        return -1;
    3.66 +
    3.67 +   return 0;
    3.68 +}
    3.69 +
    3.70 +/* Setup the rr_dom_info */
    3.71 +static void rr_add_task(struct domain *p)
    3.72 +{
    3.73 +    struct rrobin_dom_info *inf;
    3.74 +    RR_INFO(p)->domain = p;
    3.75 +    inf = RR_INFO(p);
    3.76 +}
    3.77 +
    3.78 +/* Frees memory used by domain info */
    3.79 +static void rr_free_task(struct domain *p)
    3.80 +{
    3.81 +    ASSERT( p->sched_priv != NULL );
    3.82 +    kmem_cache_free( dom_info_cache, p->sched_priv );
    3.83 +}
    3.84 +
    3.85 +/* Initialises idle task */
    3.86 +static int rr_init_idle_task(struct domain *p)
    3.87 +{
    3.88 +    unsigned long flags;
    3.89 +    if(rr_alloc_task(p) < 0) return -1;
    3.90 +    rr_add_task(p);
    3.91 +
    3.92 +    spin_lock_irqsave(&schedule_lock[p->processor], flags);
    3.93 +    set_bit(DF_RUNNING, &p->flags);
    3.94 +    if ( !__task_on_runqueue(RUNLIST(p)) )
    3.95 +         __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
    3.96 +    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
    3.97 +    return 0;
    3.98 +}
    3.99 +
   3.100 +
   3.101 +/* Main scheduling function */
   3.102  static task_slice_t rr_do_schedule(s_time_t now)
   3.103  {
   3.104      struct domain *prev = current;
   3.105      int cpu = current->processor;
   3.106 +    
   3.107      task_slice_t ret;
   3.108 - 
   3.109 -    __del_from_runqueue(prev);
   3.110 +
   3.111 +    if(!is_idle_task(prev))
   3.112 +    {
   3.113 +        __del_from_runqueue(RUNLIST(prev));
   3.114      
   3.115 -    if ( domain_runnable(prev) )
   3.116 -      __add_to_runqueue_tail(prev);
   3.117 +        if ( domain_runnable(prev) )
   3.118 +            __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
   3.119 +    }
   3.120      
   3.121 -    ret.task = list_entry(schedule_data[cpu].runqueue.next,
   3.122 -                    struct domain, run_list);
   3.123 -
   3.124 +    ret.task = list_entry(  RUNQUEUE(cpu).next->next, 
   3.125 +                            struct rrobin_dom_info, 
   3.126 +                            run_list)->domain;
   3.127      ret.time = rr_slice;
   3.128 -
   3.129      return ret;
   3.130  }
   3.131  
   3.132 +/* Set/retrive control parameter(s) */
   3.133  static int rr_ctl(struct sched_ctl_cmd *cmd)
   3.134  {
   3.135      if ( cmd->direction == SCHED_INFO_PUT )
   3.136 @@ -57,8 +151,8 @@ static void rr_sleep(struct domain *d)
   3.137  {
   3.138      if ( test_bit(DF_RUNNING, &d->flags) )
   3.139          cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   3.140 -    else if ( __task_on_runqueue(d) )
   3.141 -        __del_from_runqueue(d);
   3.142 +    else if ( __task_on_runqueue(RUNLIST(d)) )
   3.143 +        __del_from_runqueue(RUNLIST(d));
   3.144  }
   3.145  
   3.146  void rr_wake(struct domain *d)
   3.147 @@ -68,11 +162,10 @@ void rr_wake(struct domain *d)
   3.148      int                  cpu = d->processor;
   3.149  
   3.150      /* If on the runqueue already then someone has done the wakeup work. */
   3.151 -    if ( unlikely(__task_on_runqueue(d)) )
   3.152 +    if ( unlikely(__task_on_runqueue(RUNLIST(d))))
   3.153          return;
   3.154  
   3.155 -    __add_to_runqueue_head(d);
   3.156 -
   3.157 +    __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
   3.158      now = NOW();
   3.159  
   3.160      curr = schedule_data[cpu].curr;
   3.161 @@ -86,14 +179,55 @@ void rr_wake(struct domain *d)
   3.162          mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
   3.163  }
   3.164  
   3.165 +
   3.166 +static void rr_dump_domain(struct domain *d)
   3.167 +{
   3.168 +    printk("%u has=%c ", d->domain,
   3.169 +           test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
   3.170 +    printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
   3.171 +}
   3.172 +
   3.173 +static void rr_dump_cpu_state(int i)
   3.174 +{
   3.175 +    unsigned long flags;
   3.176 +    struct list_head *list, *queue;
   3.177 +    int loop = 0;
   3.178 +    struct rrobin_dom_info *d_inf;
   3.179 +
   3.180 +    spin_lock_irqsave(&schedule_lock[i], flags);
   3.181 +
   3.182 +    queue = RUNQUEUE(i);
   3.183 +    printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
   3.184 +        (unsigned long) queue->next, (unsigned long) queue->prev);
   3.185 +
   3.186 +    printk("%3d: ",loop++);
   3.187 +    d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
   3.188 +    rr_dump_domain(d_inf->domain);
   3.189 + 
   3.190 +    list_for_each ( list, queue )
   3.191 +    {
   3.192 +        printk("%3d: ",loop++);
   3.193 +        d_inf = list_entry(list, struct rrobin_dom_info, run_list);
   3.194 +        rr_dump_domain(d_inf->domain);
   3.195 +    }
   3.196 +    spin_unlock_irqrestore(&schedule_lock[i], flags);
   3.197 +}
   3.198 +
   3.199 +
   3.200  struct scheduler sched_rrobin_def = {
   3.201      .name     = "Round-Robin Scheduler",
   3.202      .opt_name = "rrobin",
   3.203      .sched_id = SCHED_RROBIN,
   3.204 -
   3.205 +    
   3.206 +    .init_idle_task = rr_init_idle_task,
   3.207 +    .alloc_task     = rr_alloc_task,
   3.208 +    .add_task       = rr_add_task,
   3.209 +    .free_task      = rr_free_task,
   3.210 +    .init_scheduler = rr_init_scheduler,
   3.211      .do_schedule    = rr_do_schedule,
   3.212      .control        = rr_ctl,
   3.213      .dump_settings  = rr_dump_settings,
   3.214 +    .dump_cpu_state = rr_dump_cpu_state,
   3.215      .sleep          = rr_sleep,
   3.216      .wake           = rr_wake,
   3.217  };
     4.1 --- a/xen/common/schedule.c	Fri Jul 23 17:16:11 2004 +0000
     4.2 +++ b/xen/common/schedule.c	Fri Jul 23 22:39:55 2004 +0000
     4.3 @@ -157,18 +157,10 @@ void sched_rem_domain(struct domain *d)
     4.4  
     4.5  void init_idle_task(void)
     4.6  {
     4.7 -    unsigned long flags;
     4.8      struct domain *d = current;
     4.9  
    4.10 -    if ( SCHED_OP(alloc_task, d) < 0)
    4.11 -        panic("Failed to allocate scheduler private data for idle task");
    4.12 -    SCHED_OP(add_task, d);
    4.13 -
    4.14 -    spin_lock_irqsave(&schedule_lock[d->processor], flags);
    4.15 -    set_bit(DF_RUNNING, &d->flags);
    4.16 -    if ( !__task_on_runqueue(d) )
    4.17 -        __add_to_runqueue_head(d);
    4.18 -    spin_unlock_irqrestore(&schedule_lock[d->processor], flags);
    4.19 +    if ( SCHED_OP(init_idle_task, d) < 0)
    4.20 +        panic("Failed to initialise idle task for processor %d",d->processor);
    4.21  }
    4.22  
    4.23  void domain_sleep(struct domain *d)
    4.24 @@ -193,7 +185,6 @@ void domain_wake(struct domain *d)
    4.25  {
    4.26      unsigned long       flags;
    4.27      int                 cpu = d->processor;
    4.28 -
    4.29      spin_lock_irqsave(&schedule_lock[cpu], flags);
    4.30      if ( likely(domain_runnable(d)) )
    4.31      {
    4.32 @@ -342,7 +333,7 @@ void __enter_scheduler(void)
    4.33      rem_ac_timer(&schedule_data[cpu].s_timer);
    4.34      
    4.35      ASSERT(!in_irq());
    4.36 -    ASSERT(__task_on_runqueue(prev));
    4.37 +    // TODO - move to specific scheduler ASSERT(__task_on_runqueue(prev));
    4.38  
    4.39      if ( test_bit(DF_BLOCKED, &prev->flags) )
    4.40      {
    4.41 @@ -490,7 +481,6 @@ void __init scheduler_init(void)
    4.42  
    4.43      for ( i = 0; i < NR_CPUS; i++ )
    4.44      {
    4.45 -        INIT_LIST_HEAD(&schedule_data[i].runqueue);
    4.46          spin_lock_init(&schedule_lock[i]);
    4.47          schedule_data[i].curr = &idle0_task;
    4.48          
    4.49 @@ -547,31 +537,8 @@ void schedulers_start(void)
    4.50  }
    4.51  
    4.52  
    4.53 -static void dump_rqueue(struct list_head *queue, char *name)
    4.54 -{
    4.55 -    struct list_head *list;
    4.56 -    int loop = 0;
    4.57 -    struct domain *d;
    4.58 -
    4.59 -    printk("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
    4.60 -           (unsigned long) queue->next, (unsigned long) queue->prev);
    4.61 -
    4.62 -    list_for_each ( list, queue )
    4.63 -    {
    4.64 -        d = list_entry(list, struct domain, run_list);
    4.65 -        printk("%3d: %u has=%c ", loop++, d->domain, 
    4.66 -               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
    4.67 -        SCHED_OP(dump_runq_el, d);
    4.68 -        printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
    4.69 -        printk("         l: %lx n: %lx  p: %lx\n",
    4.70 -               (unsigned long)list, (unsigned long)list->next,
    4.71 -               (unsigned long)list->prev);
    4.72 -    }
    4.73 -}
    4.74 -
    4.75  void dump_runq(u_char key, void *dev_id, struct pt_regs *regs)
    4.76  {
    4.77 -    unsigned long flags; 
    4.78      s_time_t      now = NOW();
    4.79      int           i;
    4.80  
    4.81 @@ -580,11 +547,8 @@ void dump_runq(u_char key, void *dev_id,
    4.82      printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now); 
    4.83      for ( i = 0; i < smp_num_cpus; i++ )
    4.84      {
    4.85 -        spin_lock_irqsave(&schedule_lock[i], flags);
    4.86          printk("CPU[%02d] ", i);
    4.87          SCHED_OP(dump_cpu_state,i);
    4.88 -        dump_rqueue(&schedule_data[i].runqueue, "rq"); 
    4.89 -        spin_unlock_irqrestore(&schedule_lock[i], flags);
    4.90      }
    4.91  }
    4.92  
     5.1 --- a/xen/include/xen/sched-if.h	Fri Jul 23 17:16:11 2004 +0000
     5.2 +++ b/xen/include/xen/sched-if.h	Fri Jul 23 22:39:55 2004 +0000
     5.3 @@ -11,7 +11,6 @@
     5.4  
     5.5  typedef struct schedule_data_st
     5.6  {
     5.7 -    struct list_head    runqueue;       /* runqueue */
     5.8      struct domain *curr;           /* current task */
     5.9      struct domain *idle;           /* idle task for this cpu */
    5.10      void *              sched_priv;
    5.11 @@ -35,6 +34,7 @@ struct scheduler
    5.12      unsigned int sched_id;  /* ID for this scheduler             */
    5.13  
    5.14      int          (*init_scheduler) ();
    5.15 +    int          (*init_idle_task) (struct domain *);
    5.16      int          (*alloc_task)     (struct domain *);
    5.17      void         (*add_task)       (struct domain *);
    5.18      void         (*free_task)      (struct domain *);
    5.19 @@ -48,7 +48,6 @@ struct scheduler
    5.20                                      struct sched_adjdom_cmd *);
    5.21      void         (*dump_settings)  (void);
    5.22      void         (*dump_cpu_state) (int);
    5.23 -    void         (*dump_runq_el)   (struct domain *);
    5.24      int          (*prn_state)      (int);
    5.25  };
    5.26  
    5.27 @@ -59,32 +58,24 @@ extern schedule_data_t schedule_data[];
    5.28   * Wrappers for run-queue management. Must be called with the schedule_lock
    5.29   * held.
    5.30   */
    5.31 -static inline void __add_to_runqueue_head(struct domain * p)
    5.32 -{    
    5.33 -    list_add(&p->run_list, &schedule_data[p->processor].runqueue);
    5.34 +static inline void __add_to_runqueue_head(struct list_head *run_list, struct list_head *runqueue)
    5.35 +{
    5.36 +    list_add(run_list, runqueue);
    5.37  }
    5.38  
    5.39 -static inline void __add_to_runqueue_tail(struct domain * p)
    5.40 +static inline void __add_to_runqueue_tail(struct list_head *run_list, struct list_head *runqueue)
    5.41  {
    5.42 -    list_add_tail(&p->run_list, &schedule_data[p->processor].runqueue);
    5.43 +    list_add_tail(run_list, runqueue);
    5.44  }
    5.45  
    5.46 -static inline void __del_from_runqueue(struct domain * p)
    5.47 +static inline void __del_from_runqueue(struct list_head *run_list)
    5.48  {
    5.49 -    list_del(&p->run_list);
    5.50 -    p->run_list.next = NULL;
    5.51 +    list_del(run_list);
    5.52 +    run_list->next = NULL;
    5.53  }
    5.54  
    5.55 -static inline int __task_on_runqueue(struct domain *p)
    5.56 +static inline int __task_on_runqueue(struct list_head *run_list)
    5.57  {
    5.58 -    return p->run_list.next != NULL;
    5.59 +    return run_list->next != NULL;
    5.60  }
    5.61  
    5.62 -#define next_domain(p) \\
    5.63 -        list_entry((p)->run_list.next, struct domain, run_list)
    5.64 -
    5.65 -
    5.66 -static inline int __runqueue_empty(int cpu)
    5.67 -{
    5.68 -    return list_empty(&schedule_data[cpu].runqueue);
    5.69 -}
     6.1 --- a/xen/include/xen/sched.h	Fri Jul 23 17:16:11 2004 +0000
     6.2 +++ b/xen/include/xen/sched.h	Fri Jul 23 22:39:55 2004 +0000
     6.3 @@ -100,7 +100,6 @@ struct domain
     6.4      unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
     6.5  
     6.6      /* Scheduling. */
     6.7 -    struct list_head run_list;
     6.8      int              shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
     6.9      s_time_t         lastschd;      /* time this domain was last scheduled */
    6.10      s_time_t         lastdeschd;    /* time this domain was last descheduled */