ia64/xen-unstable

changeset 2596:ebe6012dace7

bitkeeper revision 1.1159.99.5 (416279ea46si6muiQ6MrkeuqUATXmA)

Fixed locking in the remaining schedulers.
author kaf24@freefall.cl.cam.ac.uk
date Tue Oct 05 10:39:38 2004 +0000 (2004-10-05)
parents fe2f4bbcf869
children 92fff25bf21e
files xen/common/Makefile xen/common/sched_atropos.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c
line diff
     1.1 --- a/xen/common/Makefile	Tue Oct 05 09:53:10 2004 +0000
     1.2 +++ b/xen/common/Makefile	Tue Oct 05 10:39:38 2004 +0000
     1.3 @@ -19,14 +19,6 @@ ifneq ($(trace),y)
     1.4  OBJS := $(subst trace.o,,$(OBJS))
     1.5  endif
     1.6  
     1.7 -ifneq ($(broken_schedulers),y)
     1.8 -OBJS := $(subst sched_atropos.o,,$(OBJS))
     1.9 -OBJS := $(subst sched_fair_bvt.o,,$(OBJS))
    1.10 -OBJS := $(subst sched_rrobin.o,,$(OBJS))
    1.11 -else
    1.12 -CFLAGS += -DBROKEN_SCHEDULERS
    1.13 -endif
    1.14 -
    1.15  default: $(OBJS)
    1.16  	$(LD) $(LDFLAGS) -r -o common.o $(OBJS)
    1.17  
     2.1 --- a/xen/common/sched_atropos.c	Tue Oct 05 09:53:10 2004 +0000
     2.2 +++ b/xen/common/sched_atropos.c	Tue Oct 05 10:39:38 2004 +0000
     2.3 @@ -54,10 +54,8 @@ struct at_dom_info
     2.4  /* Atropos-specific per-CPU data */
     2.5  struct at_cpu_info
     2.6  {
     2.7 -    spinlock_t runq_lock;
     2.8 -    struct list_head runq;  /* run queue */
     2.9 -    spinlock_t waitq_lock;
    2.10 -    struct list_head waitq; /* wait queue*/
    2.11 +    struct list_head runq;
    2.12 +    struct list_head waitq;
    2.13  };
    2.14  
    2.15  
    2.16 @@ -71,14 +69,8 @@ struct at_cpu_info
    2.17  
    2.18  static void at_dump_cpu_state(int cpu);
    2.19  
    2.20 -
    2.21 -/* SLAB cache for struct at_dom_info objects */
    2.22  static xmem_cache_t *dom_info_cache;
    2.23  
    2.24 -/*
    2.25 - * Wrappers for run-queue management. Must be called with the run_lock
    2.26 - * held.
    2.27 - */
    2.28  static inline void __add_to_runqueue_head(struct domain *d)
    2.29  {
    2.30      list_add(RUNLIST(d), RUNQ(d->processor));
    2.31 @@ -590,8 +582,6 @@ static int at_init_scheduler()
    2.32              return -1;
    2.33          INIT_LIST_HEAD(WAITQ(i));
    2.34          INIT_LIST_HEAD(RUNQ(i));
    2.35 -        spin_lock_init(&CPU_INFO(i)->runq_lock);       
    2.36 -        spin_lock_init(&CPU_INFO(i)->waitq_lock);        
    2.37      }
    2.38  
    2.39      dom_info_cache = xmem_cache_create("Atropos dom info",
     3.1 --- a/xen/common/sched_fair_bvt.c	Tue Oct 05 09:53:10 2004 +0000
     3.2 +++ b/xen/common/sched_fair_bvt.c	Tue Oct 05 10:39:38 2004 +0000
     3.3 @@ -52,9 +52,8 @@ struct fbvt_dom_info
     3.4  
     3.5  struct fbvt_cpu_info
     3.6  {
     3.7 -    spinlock_t          run_lock;  /* protects runqueue */
     3.8 -    struct list_head    runqueue;  /* runqueue for this CPU */
     3.9 -    unsigned long       svt;       /* XXX check this is unsigned long! */
    3.10 +    struct list_head    runqueue;
    3.11 +    unsigned long       svt;
    3.12      u32                 vtb;       /* virtual time bonus */
    3.13      u32                 r_time;    /* last time to run */  
    3.14  };
    3.15 @@ -74,14 +73,8 @@ struct fbvt_cpu_info
    3.16  static s32 ctx_allow = (s32)MILLISECS(5);     /* context switch allowance */
    3.17  static s32 max_vtb   = (s32)MILLISECS(5);
    3.18  
    3.19 -/* SLAB cache for struct fbvt_dom_info objects */
    3.20  static xmem_cache_t *dom_info_cache;
    3.21  
    3.22 -
    3.23 -/*
    3.24 - * Wrappers for run-queue management. Must be called with the run_lock
    3.25 - * held.
    3.26 - */
    3.27  static inline void __add_to_runqueue_head(struct domain *d)
    3.28  {
    3.29      list_add(RUNLIST(d), RUNQUEUE(d->processor));
    3.30 @@ -140,12 +133,11 @@ static void __calc_evt(struct fbvt_dom_i
    3.31   *
    3.32   * Returns non-zero on failure.
    3.33   */
    3.34 -int fbvt_alloc_task(struct domain *p)
    3.35 +int fbvt_alloc_task(struct domain *d)
    3.36  {
    3.37 -    p->sched_priv = xmem_cache_alloc(dom_info_cache);
    3.38 -    if ( p->sched_priv == NULL )
    3.39 +    if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
    3.40          return -1;
    3.41 -    
    3.42 +    memset(d->sched_priv, 0, sizeof(struct fbvt_dom_info));
    3.43      return 0;
    3.44  }
    3.45  
    3.46 @@ -183,64 +175,33 @@ void fbvt_add_task(struct domain *p)
    3.47  
    3.48  int fbvt_init_idle_task(struct domain *p)
    3.49  {
    3.50 -    unsigned long flags;
    3.51 -
    3.52 -    if(fbvt_alloc_task(p) < 0) return -1;
    3.53 +    if ( fbvt_alloc_task(p) < 0 )
    3.54 +        return -1;
    3.55  
    3.56      fbvt_add_task(p);
    3.57 -    spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);
    3.58 +
    3.59      set_bit(DF_RUNNING, &p->flags);
    3.60      if ( !__task_on_runqueue(p) )
    3.61 -    __add_to_runqueue_head(p);
    3.62 -    spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
    3.63 +        __add_to_runqueue_head(p);
    3.64  
    3.65      return 0;
    3.66  }
    3.67                                          
    3.68  static void fbvt_wake(struct domain *d)
    3.69  {
    3.70 -    unsigned long        flags;
    3.71      struct fbvt_dom_info *inf = FBVT_INFO(d);
    3.72      struct domain        *curr;
    3.73      s_time_t             now, min_time;
    3.74      int                  cpu = d->processor;
    3.75      s32                  io_warp;
    3.76  
    3.77 -    /* The runqueue accesses must be protected */
    3.78 -    spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
    3.79 -    
    3.80 -    /* If on the runqueue already then someone has done the wakeup work. */
    3.81      if ( unlikely(__task_on_runqueue(d)) )
    3.82 -    {
    3.83 -        spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 
    3.84          return;
    3.85 -    }    
    3.86 -    
    3.87 +
    3.88      __add_to_runqueue_head(d);
    3.89   
    3.90      now = NOW();
    3.91  
    3.92 -#if 0
    3.93 -    /*
    3.94 -     * XXX KAF: This was fbvt_unpause(). Not sure if it's the right thing
    3.95 -     * to do, in light of the stuff that fbvt_wake_up() does.
    3.96 -     * e.g., setting 'inf->avt = CPU_SVT(cpu);' would make the later test
    3.97 -     * 'inf->avt < CPU_SVT(cpu)' redundant!
    3.98 -     */
    3.99 -    if ( d->domain == IDLE_DOMAIN_ID )
   3.100 -    {
   3.101 -        inf->avt = inf->evt = ~0U;
   3.102 -    } 
   3.103 -    else 
   3.104 -    {
   3.105 -        /* Set avt to system virtual time. */
   3.106 -        inf->avt = CPU_SVT(cpu);
   3.107 -        /* Set some default values here. */
   3.108 -        LAST_VTB(cpu) = 0;
   3.109 -        __calc_evt(inf);
   3.110 -    }
   3.111 -#endif
   3.112 -
   3.113      /* Set the BVT parameters. */
   3.114      if ( inf->avt < CPU_SVT(cpu) )
   3.115      {
   3.116 @@ -265,11 +226,6 @@ static void fbvt_wake(struct domain *d)
   3.117      inf->warpback  = 1;
   3.118      inf->warped    = now;
   3.119      __calc_evt(inf);
   3.120 -    spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);
   3.121 -    
   3.122 -    /* Access to schedule_data protected by schedule_lock */
   3.123 -    spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags);
   3.124 -    
   3.125   
   3.126      curr = schedule_data[cpu].curr;
   3.127   
   3.128 @@ -280,47 +236,34 @@ static void fbvt_wake(struct domain *d)
   3.129          cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
   3.130      else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
   3.131          mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
   3.132 -
   3.133 -    spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags);   
   3.134  }
   3.135  
   3.136  
   3.137  static void fbvt_sleep(struct domain *d)
   3.138  {
   3.139 -    unsigned long flags;
   3.140 -
   3.141 -    
   3.142      if ( test_bit(DF_RUNNING, &d->flags) )
   3.143          cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   3.144 -    else
   3.145 -    {
   3.146 -         /* The runqueue accesses must be protected */
   3.147 -        spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags);       
   3.148 -    
   3.149 -        if ( __task_on_runqueue(d) )
   3.150 -            __del_from_runqueue(d);
   3.151 -
   3.152 -        spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags);
   3.153 -    }
   3.154 +    else if ( __task_on_runqueue(d) )
   3.155 +        __del_from_runqueue(d);
   3.156  }
   3.157  
   3.158  
   3.159  /**
   3.160   * fbvt_free_task - free FBVT private structures for a task
   3.161 - * @p:             task
   3.162 + * @d:             task
   3.163   */
   3.164 -void fbvt_free_task(struct domain *p)
   3.165 +void fbvt_free_task(struct domain *d)
   3.166  {
   3.167 -    ASSERT( p->sched_priv != NULL );
   3.168 -    xmem_cache_free( dom_info_cache, p->sched_priv );
   3.169 +    ASSERT(d->sched_priv != NULL);
   3.170 +    xmem_cache_free(dom_info_cache, d->sched_priv);
   3.171  }
   3.172  
   3.173  /* 
   3.174   * Block the currently-executing domain until a pertinent event occurs.
   3.175   */
   3.176 -static void fbvt_do_block(struct domain *p)
   3.177 +static void fbvt_do_block(struct domain *d)
   3.178  {
   3.179 -    FBVT_INFO(p)->warpback = 0; 
   3.180 +    FBVT_INFO(d)->warpback = 0; 
   3.181  }
   3.182  
   3.183  /* Control the scheduler. */
   3.184 @@ -347,7 +290,6 @@ int fbvt_adjdom(struct domain *p,
   3.185                  struct sched_adjdom_cmd *cmd)
   3.186  {
   3.187      struct fbvt_adjdom *params = &cmd->u.fbvt;
   3.188 -    unsigned long flags;
   3.189  
   3.190      if ( cmd->direction == SCHED_INFO_PUT )
   3.191      {
   3.192 @@ -367,7 +309,6 @@ int fbvt_adjdom(struct domain *p,
   3.193          if ( mcu_adv == 0 )
   3.194              return -EINVAL;
   3.195          
   3.196 -        spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);   
   3.197          inf->mcu_advance = mcu_adv;
   3.198          inf->warp = warp;
   3.199          inf->warpl = warpl;
   3.200 @@ -377,19 +318,14 @@ int fbvt_adjdom(struct domain *p,
   3.201                  "warpl=%ld, warpu=%ld\n",
   3.202                  p->domain, inf->mcu_advance, inf->warp,
   3.203                  inf->warpl, inf->warpu );
   3.204 -
   3.205 -        spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
   3.206      }
   3.207      else if ( cmd->direction == SCHED_INFO_GET )
   3.208      {
   3.209          struct fbvt_dom_info *inf = FBVT_INFO(p);
   3.210 -
   3.211 -        spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);   
   3.212          params->mcu_adv = inf->mcu_advance;
   3.213          params->warp    = inf->warp;
   3.214          params->warpl   = inf->warpl;
   3.215          params->warpu   = inf->warpu;
   3.216 -        spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
   3.217      }
   3.218      
   3.219      return 0;
   3.220 @@ -405,7 +341,6 @@ int fbvt_adjdom(struct domain *p,
   3.221   */
   3.222  static task_slice_t fbvt_do_schedule(s_time_t now)
   3.223  {
   3.224 -    unsigned long flags;
   3.225      struct domain *prev = current, *next = NULL, *next_prime, *p;
   3.226      struct list_head   *tmp;
   3.227      int                 cpu = prev->processor;
   3.228 @@ -422,9 +357,6 @@ static task_slice_t fbvt_do_schedule(s_t
   3.229  
   3.230      ASSERT(prev->sched_priv != NULL);
   3.231      ASSERT(prev_inf != NULL);
   3.232 -    
   3.233 -    spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
   3.234 -
   3.235      ASSERT(__task_on_runqueue(prev));
   3.236  
   3.237      if ( likely(!is_idle_task(prev)) ) 
   3.238 @@ -503,8 +435,6 @@ static task_slice_t fbvt_do_schedule(s_t
   3.239              min_avt = p_inf->avt;
   3.240      }
   3.241  
   3.242 -    spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);
   3.243 -
   3.244      /* Extract the domain pointers from the dom infos */
   3.245      next        = next_inf->domain;
   3.246      next_prime  = next_prime_inf->domain;
   3.247 @@ -517,8 +447,10 @@ static task_slice_t fbvt_do_schedule(s_t
   3.248      /* check for virtual time overrun on this cpu */
   3.249      if ( CPU_SVT(cpu) >= 0xf0000000 )
   3.250      {
   3.251 -        u_long t_flags; 
   3.252 -        write_lock_irqsave(&tasklist_lock, t_flags); 
   3.253 +        ASSERT(!local_irq_is_enabled());
   3.254 +
   3.255 +        write_lock(&tasklist_lock);
   3.256 +
   3.257          for_each_domain ( p )
   3.258          {
   3.259              if ( p->processor == cpu )
   3.260 @@ -528,7 +460,9 @@ static task_slice_t fbvt_do_schedule(s_t
   3.261                  p_inf->avt -= 0xe0000000;
   3.262              }
   3.263          } 
   3.264 -        write_unlock_irqrestore(&tasklist_lock, t_flags); 
   3.265 +
   3.266 +        write_unlock(&tasklist_lock);
   3.267 +
   3.268          CPU_SVT(cpu) -= 0xe0000000;
   3.269      }
   3.270  
   3.271 @@ -608,13 +542,11 @@ static void fbvt_dump_settings(void)
   3.272  
   3.273  static void fbvt_dump_cpu_state(int i)
   3.274  {
   3.275 -    unsigned long flags;
   3.276      struct list_head *list, *queue;
   3.277      int loop = 0;
   3.278      struct fbvt_dom_info *d_inf;
   3.279      struct domain *d;
   3.280  
   3.281 -    spin_lock_irqsave(&CPU_INFO(i)->run_lock, flags);
   3.282      printk("svt=0x%08lX ", CPU_SVT(i));
   3.283  
   3.284      queue = RUNQUEUE(i);
   3.285 @@ -633,23 +565,8 @@ static void fbvt_dump_cpu_state(int i)
   3.286              (unsigned long)list, (unsigned long)list->next,
   3.287              (unsigned long)list->prev);
   3.288      }
   3.289 -    spin_unlock_irqrestore(&CPU_INFO(i)->run_lock, flags);        
   3.290  }
   3.291  
   3.292 -
   3.293 -/* We use cache to create the bvt_dom_infos
   3.294 -   this functions makes sure that the run_list
   3.295 -   is initialised properly. The new domain needs
   3.296 -   NOT to appear as to be on the runqueue */
   3.297 -static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
   3.298 -{
   3.299 -    struct fbvt_dom_info *dom_inf = (struct fbvt_dom_info*)arg1;
   3.300 -    dom_inf->run_list.next = NULL;
   3.301 -    dom_inf->run_list.prev = NULL;
   3.302 -}
   3.303 -
   3.304 -                     
   3.305 -
   3.306  /* Initialise the data structures. */
   3.307  int fbvt_init_scheduler()
   3.308  {
   3.309 @@ -666,15 +583,12 @@ int fbvt_init_scheduler()
   3.310          }
   3.311  
   3.312          INIT_LIST_HEAD(RUNQUEUE(i));
   3.313 -        spin_lock_init(&CPU_INFO(i)->run_lock);
   3.314   
   3.315          CPU_SVT(i) = 0; /* XXX do I really need to do this? */
   3.316      }
   3.317  
   3.318 -    dom_info_cache = xmem_cache_create("FBVT dom info",
   3.319 -                                       sizeof(struct fbvt_dom_info),
   3.320 -                                       0, 0, cache_constructor, NULL);
   3.321 -
   3.322 +    dom_info_cache = xmem_cache_create(
   3.323 +        "FBVT dom info", sizeof(struct fbvt_dom_info), 0, 0, NULL, NULL);
   3.324      if ( dom_info_cache == NULL )
   3.325      {
   3.326          printk("FBVT: Failed to allocate domain info SLAB cache");
     4.1 --- a/xen/common/sched_rrobin.c	Tue Oct 05 09:53:10 2004 +0000
     4.2 +++ b/xen/common/sched_rrobin.c	Tue Oct 05 10:39:38 2004 +0000
     4.3 @@ -23,19 +23,12 @@ struct rrobin_dom_info
     4.4      struct domain    *domain;
     4.5  };
     4.6  
     4.7 -static spinlock_t run_locks[NR_CPUS];
     4.8 -
     4.9  #define RR_INFO(d)      ((struct rrobin_dom_info *)d->sched_priv)
    4.10  #define RUNLIST(d)      ((struct list_head *)&(RR_INFO(d)->run_list))
    4.11  #define RUNQUEUE(cpu)   RUNLIST(schedule_data[cpu].idle)
    4.12  
    4.13 -/* SLAB cache for struct rrobin_dom_info objects */
    4.14  static xmem_cache_t *dom_info_cache;
    4.15  
    4.16 -/*
    4.17 - * Wrappers for run-queue management. Must be called with the run_lock
    4.18 - * held.
    4.19 - */
    4.20  static inline void __add_to_runqueue_head(struct domain *d)
    4.21  {
    4.22      list_add(RUNLIST(d), RUNQUEUE(d->processor));
    4.23 @@ -58,92 +51,72 @@ static inline int __task_on_runqueue(str
    4.24      return (RUNLIST(d))->next != NULL;
    4.25  }
    4.26  
    4.27 -
    4.28 -/* Ensures proper initialisation of the dom_info */
    4.29 -static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
    4.30 -{
    4.31 -    struct rrobin_dom_info *dom_inf = (struct rrobin_dom_info*)arg1;
    4.32 -    dom_inf->run_list.next = NULL;
    4.33 -    dom_inf->run_list.prev = NULL;
    4.34 -}
    4.35 -            
    4.36 -
    4.37  /* Initialises the runqueues and creates the domain info cache */
    4.38  static int rr_init_scheduler()
    4.39  {
    4.40      int i;
    4.41  
    4.42      for ( i = 0; i < NR_CPUS; i++ )
    4.43 -    {
    4.44          INIT_LIST_HEAD(RUNQUEUE(i));
    4.45 -        spin_lock_init(&run_locks[i]);
    4.46 -    }
    4.47     
    4.48 -    dom_info_cache = xmem_cache_create("FBVT dom info", 
    4.49 -                                        sizeof(struct rrobin_dom_info), 
    4.50 -                                        0, 0, cache_constructor, NULL);
    4.51 -
    4.52 -    if(dom_info_cache == NULL)
    4.53 +    dom_info_cache = xmem_cache_create(
    4.54 +        "RR dom info", sizeof(struct rrobin_dom_info), 0, 0, 0, NULL);
    4.55 +    if ( dom_info_cache == NULL )
    4.56      {
    4.57          printk("Could not allocate SLAB cache.\n");
    4.58          return -1;
    4.59      }
    4.60 +
    4.61      return 0;                                                                
    4.62  }
    4.63  
    4.64  /* Allocates memory for per domain private scheduling data*/
    4.65  static int rr_alloc_task(struct domain *d)
    4.66  {
    4.67 -    d->sched_priv = xmem_cache_alloc(dom_info_cache);
    4.68 -    if ( d->sched_priv == NULL )
    4.69 +    if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
    4.70          return -1;
    4.71 -
    4.72 -   return 0;
    4.73 +    memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info));
    4.74 +    return 0;
    4.75  }
    4.76  
    4.77  /* Setup the rr_dom_info */
    4.78 -static void rr_add_task(struct domain *p)
    4.79 +static void rr_add_task(struct domain *d)
    4.80  {
    4.81      struct rrobin_dom_info *inf;
    4.82 -    RR_INFO(p)->domain = p;
    4.83 -    inf = RR_INFO(p);
    4.84 +    RR_INFO(d)->domain = d;
    4.85 +    inf = RR_INFO(d);
    4.86  }
    4.87  
    4.88  /* Frees memory used by domain info */
    4.89 -static void rr_free_task(struct domain *p)
    4.90 +static void rr_free_task(struct domain *d)
    4.91  {
    4.92 -    ASSERT( p->sched_priv != NULL );
    4.93 -    xmem_cache_free( dom_info_cache, p->sched_priv );
    4.94 +    ASSERT(d->sched_priv != NULL);
    4.95 +    xmem_cache_free(dom_info_cache, d->sched_priv);
    4.96  }
    4.97  
    4.98  /* Initialises idle task */
    4.99 -static int rr_init_idle_task(struct domain *p)
   4.100 +static int rr_init_idle_task(struct domain *d)
   4.101  {
   4.102 -    unsigned long flags;
   4.103 -    if(rr_alloc_task(p) < 0) return -1;
   4.104 -    rr_add_task(p);
   4.105 +    if ( rr_alloc_task(d) < 0 )
   4.106 +        return -1;
   4.107  
   4.108 -    spin_lock_irqsave(&run_locks[p->processor], flags);
   4.109 -    set_bit(DF_RUNNING, &p->flags);
   4.110 -    if ( !__task_on_runqueue(p) )
   4.111 -         __add_to_runqueue_head(p);
   4.112 -    spin_unlock_irqrestore(&run_locks[p->processor], flags);
   4.113 +    rr_add_task(d);
   4.114 +
   4.115 +    set_bit(DF_RUNNING, &d->flags);
   4.116 +    if ( !__task_on_runqueue(d) )
   4.117 +         __add_to_runqueue_head(d);
   4.118 +
   4.119      return 0;
   4.120  }
   4.121  
   4.122 -
   4.123  /* Main scheduling function */
   4.124  static task_slice_t rr_do_schedule(s_time_t now)
   4.125  {
   4.126 -    unsigned long flags;
   4.127      struct domain *prev = current;
   4.128      int cpu = current->processor;
   4.129 -    
   4.130      task_slice_t ret;
   4.131      
   4.132 -    spin_lock_irqsave(&run_locks[cpu], flags);
   4.133 -    
   4.134 -    if(!is_idle_task(prev))
   4.135 +    if ( !is_idle_task(prev) )
   4.136      {
   4.137          __del_from_runqueue(prev);
   4.138      
   4.139 @@ -151,11 +124,9 @@ static task_slice_t rr_do_schedule(s_tim
   4.140              __add_to_runqueue_tail(prev);
   4.141      }
   4.142      
   4.143 -    spin_unlock_irqrestore(&run_locks[cpu], flags);
   4.144 -    
   4.145 -    ret.task = list_entry(  RUNQUEUE(cpu)->next, 
   4.146 -                            struct rrobin_dom_info, 
   4.147 -                            run_list)->domain;
   4.148 +    ret.task = list_entry(RUNQUEUE(cpu)->next, 
   4.149 +                          struct rrobin_dom_info, 
   4.150 +                          run_list)->domain;
   4.151      ret.time = rr_slice;
   4.152      return ret;
   4.153  }
   4.154 @@ -182,47 +153,28 @@ static void rr_dump_settings()
   4.155  
   4.156  static void rr_sleep(struct domain *d)
   4.157  {
   4.158 -    unsigned long flags;
   4.159 -
   4.160      if ( test_bit(DF_RUNNING, &d->flags) )
   4.161          cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
   4.162 -    else
   4.163 -    {
   4.164 -        spin_lock_irqsave(&run_locks[d->processor], flags);
   4.165 -        if ( __task_on_runqueue(d) )
   4.166 -            __del_from_runqueue(d);
   4.167 -        spin_unlock_irqrestore(&run_locks[d->processor], flags);
   4.168 -    }
   4.169 +    else if ( __task_on_runqueue(d) )
   4.170 +        __del_from_runqueue(d);
   4.171  }
   4.172  
   4.173  void rr_wake(struct domain *d)
   4.174  {
   4.175 -    unsigned long       flags;
   4.176      struct domain       *curr;
   4.177      s_time_t            now;
   4.178      int                 cpu = d->processor;
   4.179  
   4.180 -    spin_lock_irqsave(&run_locks[cpu], flags);
   4.181 -    
   4.182 -    /* If on the runqueue already then someone has done the wakeup work. */
   4.183 -    if ( unlikely(__task_on_runqueue(d)))
   4.184 -    {
   4.185 -        spin_unlock_irqrestore(&run_locks[cpu], flags);
   4.186 +    if ( unlikely(__task_on_runqueue(d)) )
   4.187          return;
   4.188 -    }
   4.189  
   4.190      __add_to_runqueue_head(d);
   4.191 -    spin_unlock_irqrestore(&run_locks[cpu], flags);
   4.192  
   4.193      now = NOW();
   4.194  
   4.195 -    spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags);
   4.196      curr = schedule_data[cpu].curr;
   4.197 - 
   4.198 -    if ( is_idle_task(curr) )
   4.199 +     if ( is_idle_task(curr) )
   4.200          cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
   4.201 -
   4.202 -    spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags);
   4.203  }
   4.204  
   4.205  
   4.206 @@ -235,13 +187,10 @@ static void rr_dump_domain(struct domain
   4.207  
   4.208  static void rr_dump_cpu_state(int i)
   4.209  {
   4.210 -    unsigned long flags;
   4.211      struct list_head *list, *queue;
   4.212      int loop = 0;
   4.213      struct rrobin_dom_info *d_inf;
   4.214  
   4.215 -    spin_lock_irqsave(&run_locks[i], flags);
   4.216 -
   4.217      queue = RUNQUEUE(i);
   4.218      printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
   4.219          (unsigned long) queue->next, (unsigned long) queue->prev);
   4.220 @@ -256,7 +205,6 @@ static void rr_dump_cpu_state(int i)
   4.221          d_inf = list_entry(list, struct rrobin_dom_info, run_list);
   4.222          rr_dump_domain(d_inf->domain);
   4.223      }
   4.224 -    spin_unlock_irqrestore(&run_locks[i], flags);
   4.225  }
   4.226  
   4.227  
     5.1 --- a/xen/common/schedule.c	Tue Oct 05 09:53:10 2004 +0000
     5.2 +++ b/xen/common/schedule.c	Tue Oct 05 10:39:38 2004 +0000
     5.3 @@ -73,11 +73,9 @@ extern struct scheduler sched_rrobin_def
     5.4  extern struct scheduler sched_atropos_def;
     5.5  static struct scheduler *schedulers[] = { 
     5.6      &sched_bvt_def,
     5.7 -#ifdef BROKEN_SCHEDULERS
     5.8      &sched_fbvt_def,
     5.9      &sched_rrobin_def,
    5.10      &sched_atropos_def,
    5.11 -#endif
    5.12      NULL
    5.13  };
    5.14