ia64/xen-unstable
changeset 2598:92fff25bf21e
bitkeeper revision 1.1159.1.204 (41627bb83F8FiBATtZXHgsLfOqoj9A)
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author | iap10@labyrinth.cl.cam.ac.uk |
---|---|
date | Tue Oct 05 10:47:20 2004 +0000 (2004-10-05) |
parents | 6ceaf7d959a7 ebe6012dace7 |
children | 0dfd459518e4 170f5ce645fd |
files | xen/common/Makefile xen/common/domain.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/include/xen/sched.h |
line diff
2.1 --- a/xen/common/domain.c Tue Oct 05 10:47:11 2004 +0000 2.2 +++ b/xen/common/domain.c Tue Oct 05 10:47:20 2004 +0000 2.3 @@ -39,7 +39,6 @@ struct domain *do_createdomain(domid_t d 2.4 d->domain = dom_id; 2.5 d->processor = cpu; 2.6 d->create_time = NOW(); 2.7 - spin_lock_init(&d->sleep_lock); 2.8 2.9 memcpy(&d->thread, &idle0_task.thread, sizeof(d->thread)); 2.10
3.1 --- a/xen/common/sched_atropos.c Tue Oct 05 10:47:11 2004 +0000 3.2 +++ b/xen/common/sched_atropos.c Tue Oct 05 10:47:20 2004 +0000 3.3 @@ -54,10 +54,8 @@ struct at_dom_info 3.4 /* Atropos-specific per-CPU data */ 3.5 struct at_cpu_info 3.6 { 3.7 - spinlock_t runq_lock; 3.8 - struct list_head runq; /* run queue */ 3.9 - spinlock_t waitq_lock; 3.10 - struct list_head waitq; /* wait queue*/ 3.11 + struct list_head runq; 3.12 + struct list_head waitq; 3.13 }; 3.14 3.15 3.16 @@ -71,14 +69,8 @@ struct at_cpu_info 3.17 3.18 static void at_dump_cpu_state(int cpu); 3.19 3.20 - 3.21 -/* SLAB cache for struct at_dom_info objects */ 3.22 static xmem_cache_t *dom_info_cache; 3.23 3.24 -/* 3.25 - * Wrappers for run-queue management. Must be called with the run_lock 3.26 - * held. 3.27 - */ 3.28 static inline void __add_to_runqueue_head(struct domain *d) 3.29 { 3.30 list_add(RUNLIST(d), RUNQ(d->processor)); 3.31 @@ -590,8 +582,6 @@ static int at_init_scheduler() 3.32 return -1; 3.33 INIT_LIST_HEAD(WAITQ(i)); 3.34 INIT_LIST_HEAD(RUNQ(i)); 3.35 - spin_lock_init(&CPU_INFO(i)->runq_lock); 3.36 - spin_lock_init(&CPU_INFO(i)->waitq_lock); 3.37 } 3.38 3.39 dom_info_cache = xmem_cache_create("Atropos dom info",
4.1 --- a/xen/common/sched_bvt.c Tue Oct 05 10:47:11 2004 +0000 4.2 +++ b/xen/common/sched_bvt.c Tue Oct 05 10:47:20 2004 +0000 4.3 @@ -37,7 +37,7 @@ struct bvt_dom_info 4.4 u32 evt; /* effective virtual time */ 4.5 int warpback; /* warp? */ 4.6 int warp; /* warp set and within the warp 4.7 - limits*/ 4.8 + limits*/ 4.9 s32 warp_value; /* virtual time warp */ 4.10 s_time_t warpl; /* warp limit */ 4.11 struct ac_timer warp_timer; /* deals with warpl */ 4.12 @@ -47,12 +47,10 @@ struct bvt_dom_info 4.13 4.14 struct bvt_cpu_info 4.15 { 4.16 - spinlock_t run_lock; /* protects runqueue */ 4.17 - struct list_head runqueue; /* runqueue for given processor */ 4.18 - unsigned long svt; /* XXX check this is unsigned long! */ 4.19 + struct list_head runqueue; 4.20 + unsigned long svt; 4.21 }; 4.22 4.23 - 4.24 #define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv) 4.25 #define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv) 4.26 #define RUNLIST(p) ((struct list_head *)&(BVT_INFO(p)->run_list)) 4.27 @@ -64,13 +62,8 @@ struct bvt_cpu_info 4.28 #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */ 4.29 static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */ 4.30 4.31 -/* SLAB cache for struct bvt_dom_info objects */ 4.32 static xmem_cache_t *dom_info_cache; 4.33 4.34 -/* 4.35 - * Wrappers for run-queue management. Must be called with the run_lock 4.36 - * held. 4.37 - */ 4.38 static inline void __add_to_runqueue_head(struct domain *d) 4.39 { 4.40 list_add(RUNLIST(d), RUNQUEUE(d->processor)); 4.41 @@ -98,43 +91,42 @@ static inline int __task_on_runqueue(str 4.42 static void warp_timer_fn(unsigned long pointer) 4.43 { 4.44 struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer; 4.45 - unsigned long flags; 4.46 + unsigned int cpu = inf->domain->processor; 4.47 4.48 - spin_lock_irqsave(&CPU_INFO(inf->domain->processor)->run_lock, flags); 4.49 + spin_lock_irq(&schedule_data[cpu].schedule_lock); 4.50 + 4.51 inf->warp = 0; 4.52 + 4.53 /* unwarp equal to zero => stop warping */ 4.54 - if(inf->warpu == 0) 4.55 + if ( inf->warpu == 0 ) 4.56 { 4.57 inf->warpback = 0; 4.58 - goto reschedule; 4.59 + cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 4.60 } 4.61 4.62 /* set unwarp timer */ 4.63 inf->unwarp_timer.expires = NOW() + inf->warpu; 4.64 add_ac_timer(&inf->unwarp_timer); 4.65 - spin_unlock_irqrestore(&CPU_INFO(inf->domain->processor)->run_lock, flags); 4.66 4.67 -reschedule: 4.68 - cpu_raise_softirq(inf->domain->processor, SCHEDULE_SOFTIRQ); 4.69 + spin_unlock_irq(&schedule_data[cpu].schedule_lock); 4.70 } 4.71 4.72 static void unwarp_timer_fn(unsigned long pointer) 4.73 { 4.74 - struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer; 4.75 - unsigned long flags; 4.76 + struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer; 4.77 + unsigned int cpu = inf->domain->processor; 4.78 4.79 - spin_lock_irqsave(&CPU_INFO(inf->domain->processor)->run_lock, flags); 4.80 - if(inf->warpback) 4.81 + spin_lock_irq(&schedule_data[cpu].schedule_lock); 4.82 + 4.83 + if ( inf->warpback ) 4.84 { 4.85 inf->warp = 1; 4.86 - cpu_raise_softirq(inf->domain->processor, SCHEDULE_SOFTIRQ); 4.87 + cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 4.88 } 4.89 - 4.90 - spin_unlock_irqrestore(&CPU_INFO(inf->domain->processor)->run_lock, flags); 4.91 + 4.92 + spin_unlock_irq(&schedule_data[cpu].schedule_lock); 4.93 } 4.94 4.95 - 4.96 - 4.97 static inline u32 calc_avt(struct domain *d, s_time_t now) 4.98 { 4.99 u32 ranfor, mcus; 4.100 @@ -146,15 +138,14 @@ static inline u32 calc_avt(struct domain 4.101 return inf->avt + mcus * inf->mcu_advance; 4.102 } 4.103 4.104 - 4.105 /* 4.106 * Calculate the effective virtual time for a domain. Take into account 4.107 * warping limits 4.108 */ 4.109 static inline u32 calc_evt(struct domain *d, u32 avt) 4.110 { 4.111 - struct bvt_dom_info *inf = BVT_INFO(d); 4.112 - /* TODO The warp routines need to be rewritten GM */ 4.113 + struct bvt_dom_info *inf = BVT_INFO(d); 4.114 + /* TODO The warp routines need to be rewritten GM */ 4.115 4.116 if ( inf->warp ) 4.117 return avt - inf->warp_value; 4.118 @@ -168,26 +159,25 @@ static inline u32 calc_evt(struct domain 4.119 * 4.120 * Returns non-zero on failure. 4.121 */ 4.122 -int bvt_alloc_task(struct domain *p) 4.123 +int bvt_alloc_task(struct domain *d) 4.124 { 4.125 - p->sched_priv = xmem_cache_alloc(dom_info_cache); 4.126 - if ( p->sched_priv == NULL ) 4.127 + if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL ) 4.128 return -1; 4.129 - 4.130 + memset(d->sched_priv, 0, sizeof(struct bvt_dom_info)); 4.131 return 0; 4.132 } 4.133 4.134 /* 4.135 * Add and remove a domain 4.136 */ 4.137 -void bvt_add_task(struct domain *p) 4.138 +void bvt_add_task(struct domain *d) 4.139 { 4.140 - struct bvt_dom_info *inf = BVT_INFO(p); 4.141 + struct bvt_dom_info *inf = BVT_INFO(d); 4.142 ASSERT(inf != NULL); 4.143 - ASSERT(p != NULL); 4.144 + ASSERT(d != NULL); 4.145 4.146 inf->mcu_advance = MCU_ADVANCE; 4.147 - inf->domain = p; 4.148 + inf->domain = d; 4.149 inf->warpback = 0; 4.150 /* Set some default values here. */ 4.151 inf->warp = 0; 4.152 @@ -196,135 +186,95 @@ void bvt_add_task(struct domain *p) 4.153 inf->warpu = MILLISECS(1000); 4.154 /* initialise the timers */ 4.155 init_ac_timer(&inf->warp_timer); 4.156 - inf->warp_timer.cpu = p->processor; 4.157 + inf->warp_timer.cpu = d->processor; 4.158 inf->warp_timer.data = (unsigned long)inf; 4.159 inf->warp_timer.function = &warp_timer_fn; 4.160 init_ac_timer(&inf->unwarp_timer); 4.161 - inf->unwarp_timer.cpu = p->processor; 4.162 + inf->unwarp_timer.cpu = d->processor; 4.163 inf->unwarp_timer.data = (unsigned long)inf; 4.164 inf->unwarp_timer.function = &unwarp_timer_fn; 4.165 4.166 - if ( p->domain == IDLE_DOMAIN_ID ) 4.167 + if ( d->domain == IDLE_DOMAIN_ID ) 4.168 { 4.169 inf->avt = inf->evt = ~0U; 4.170 } 4.171 else 4.172 { 4.173 /* Set avt and evt to system virtual time. */ 4.174 - inf->avt = CPU_SVT(p->processor); 4.175 - inf->evt = CPU_SVT(p->processor); 4.176 - } 4.177 - 4.178 - return; 4.179 + inf->avt = CPU_SVT(d->processor); 4.180 + inf->evt = CPU_SVT(d->processor); 4.181 + } 4.182 } 4.183 4.184 int bvt_init_idle_task(struct domain *p) 4.185 { 4.186 - unsigned long flags; 4.187 - 4.188 - if(bvt_alloc_task(p) < 0) return -1; 4.189 + if ( bvt_alloc_task(p) < 0 ) 4.190 + return -1; 4.191 4.192 bvt_add_task(p); 4.193 4.194 - spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags); 4.195 - 4.196 set_bit(DF_RUNNING, &p->flags); 4.197 if ( !__task_on_runqueue(p) ) 4.198 __add_to_runqueue_head(p); 4.199 4.200 - spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags); 4.201 - 4.202 return 0; 4.203 } 4.204 4.205 void bvt_wake(struct domain *d) 4.206 { 4.207 - unsigned long flags; 4.208 struct bvt_dom_info *inf = BVT_INFO(d); 4.209 struct domain *curr; 4.210 s_time_t now, r_time; 4.211 int cpu = d->processor; 4.212 u32 curr_evt; 4.213 4.214 - /* The runqueue accesses must be protected */ 4.215 - spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags); 4.216 - 4.217 - /* If on the runqueue already then someone has done the wakeup work. */ 4.218 if ( unlikely(__task_on_runqueue(d)) ) 4.219 - { 4.220 - spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 4.221 return; 4.222 - } 4.223 4.224 __add_to_runqueue_head(d); 4.225 4.226 now = NOW(); 4.227 4.228 /* Set the BVT parameters. AVT should always be updated 4.229 - if CPU migration ocurred.*/ 4.230 + if CPU migration ocurred.*/ 4.231 if ( inf->avt < CPU_SVT(cpu) || 4.232 - unlikely(test_bit(DF_MIGRATED, &d->flags)) ) 4.233 + unlikely(test_bit(DF_MIGRATED, &d->flags)) ) 4.234 inf->avt = CPU_SVT(cpu); 4.235 4.236 /* Deal with warping here. */ 4.237 inf->evt = calc_evt(d, inf->avt); 4.238 - spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 4.239 - 4.240 - /* Access to schedule_data protected by schedule_lock */ 4.241 - spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); 4.242 4.243 curr = schedule_data[cpu].curr; 4.244 curr_evt = calc_evt(curr, calc_avt(curr, now)); 4.245 /* Calculate the time the current domain would run assuming 4.246 the second smallest evt is of the newly woken domain */ 4.247 r_time = curr->lastschd + 4.248 - ((inf->evt - curr_evt) / BVT_INFO(curr)->mcu_advance) + 4.249 - ctx_allow; 4.250 + ((inf->evt - curr_evt) / BVT_INFO(curr)->mcu_advance) + 4.251 + ctx_allow; 4.252 4.253 if ( is_idle_task(curr) || (inf->evt <= curr_evt) ) 4.254 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 4.255 else if ( schedule_data[cpu].s_timer.expires > r_time ) 4.256 mod_ac_timer(&schedule_data[cpu].s_timer, r_time); 4.257 - 4.258 - spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); 4.259 } 4.260 4.261 4.262 static void bvt_sleep(struct domain *d) 4.263 { 4.264 - unsigned long flags; 4.265 - 4.266 if ( test_bit(DF_RUNNING, &d->flags) ) 4.267 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); 4.268 - else 4.269 - { 4.270 - /* The runqueue accesses must be protected */ 4.271 - spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags); 4.272 - 4.273 - 4.274 - if ( __task_on_runqueue(d) ) 4.275 - __del_from_runqueue(d); 4.276 - 4.277 - spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags); 4.278 - } 4.279 + else if ( __task_on_runqueue(d) ) 4.280 + __del_from_runqueue(d); 4.281 } 4.282 4.283 /** 4.284 * bvt_free_task - free BVT private structures for a task 4.285 - * @p: task 4.286 + * @d: task 4.287 */ 4.288 -void bvt_free_task(struct domain *p) 4.289 +void bvt_free_task(struct domain *d) 4.290 { 4.291 - ASSERT( p->sched_priv != NULL ); 4.292 - xmem_cache_free( dom_info_cache, p->sched_priv ); 4.293 -} 4.294 - 4.295 - 4.296 -/* 4.297 - * Block the currently-executing domain until a pertinent event occurs. 4.298 - */ 4.299 -static void bvt_do_block(struct domain *p) 4.300 -{ 4.301 + ASSERT(d->sched_priv != NULL); 4.302 + xmem_cache_free(dom_info_cache, d->sched_priv); 4.303 } 4.304 4.305 /* Control the scheduler. */ 4.306 @@ -333,24 +283,19 @@ int bvt_ctl(struct sched_ctl_cmd *cmd) 4.307 struct bvt_ctl *params = &cmd->u.bvt; 4.308 4.309 if ( cmd->direction == SCHED_INFO_PUT ) 4.310 - { 4.311 ctx_allow = params->ctx_allow; 4.312 - } 4.313 else 4.314 - { 4.315 params->ctx_allow = ctx_allow; 4.316 - } 4.317 4.318 return 0; 4.319 } 4.320 4.321 /* Adjust scheduling parameter for a given domain. */ 4.322 -int bvt_adjdom(struct domain *p, 4.323 - struct sched_adjdom_cmd *cmd) 4.324 +int bvt_adjdom( 4.325 + struct domain *d, struct sched_adjdom_cmd *cmd) 4.326 { 4.327 struct bvt_adjdom *params = &cmd->u.bvt; 4.328 - unsigned long flags; 4.329 - 4.330 + 4.331 if ( cmd->direction == SCHED_INFO_PUT ) 4.332 { 4.333 u32 mcu_adv = params->mcu_adv; 4.334 @@ -359,27 +304,17 @@ int bvt_adjdom(struct domain *p, 4.335 s_time_t warpl = params->warpl; 4.336 s_time_t warpu = params->warpu; 4.337 4.338 - struct bvt_dom_info *inf = BVT_INFO(p); 4.339 + struct bvt_dom_info *inf = BVT_INFO(d); 4.340 4.341 DPRINTK("Get domain %u bvt mcu_adv=%u, warpback=%d, warpvalue=%d, " 4.342 "warpl=%lld, warpu=%lld\n", 4.343 - p->domain, inf->mcu_advance, inf->warpback, inf->warp_value, 4.344 + d->domain, inf->mcu_advance, inf->warpback, inf->warp_value, 4.345 inf->warpl, inf->warpu); 4.346 4.347 /* Sanity -- this can avoid divide-by-zero. */ 4.348 - if ( mcu_adv == 0 ) 4.349 - { 4.350 - printk("Mcu advance must not be set to 0 (domain %d)\n",p->domain); 4.351 + if ( (mcu_adv == 0) || (warpl < 0) || (warpu < 0) ) 4.352 return -EINVAL; 4.353 - } 4.354 - else if ( warpl < 0 || warpu < 0) 4.355 - { 4.356 - printk("Warp limits must be >= 0 (domain %d)\n", p->domain); 4.357 - return -EINVAL; 4.358 - } 4.359 - 4.360 - 4.361 - spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags); 4.362 + 4.363 inf->mcu_advance = mcu_adv; 4.364 inf->warpback = warpback; 4.365 /* The warp should be the same as warpback */ 4.366 @@ -391,27 +326,23 @@ int bvt_adjdom(struct domain *p, 4.367 /* If the unwarp timer set up it needs to be removed */ 4.368 rem_ac_timer(&inf->unwarp_timer); 4.369 /* If we stop warping the warp timer needs to be removed */ 4.370 - if(!warpback) 4.371 + if ( !warpback ) 4.372 rem_ac_timer(&inf->warp_timer); 4.373 4.374 DPRINTK("Get domain %u bvt mcu_adv=%u, warpback=%d, warpvalue=%d, " 4.375 "warpl=%lld, warpu=%lld\n", 4.376 - p->domain, inf->mcu_advance, inf->warpback, inf->warp_value, 4.377 + d->domain, inf->mcu_advance, inf->warpback, inf->warp_value, 4.378 inf->warpl, inf->warpu); 4.379 4.380 - spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags); 4.381 } 4.382 else if ( cmd->direction == SCHED_INFO_GET ) 4.383 { 4.384 - struct bvt_dom_info *inf = BVT_INFO(p); 4.385 - 4.386 - spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags); 4.387 + struct bvt_dom_info *inf = BVT_INFO(d); 4.388 params->mcu_adv = inf->mcu_advance; 4.389 params->warpvalue = inf->warp_value; 4.390 params->warpback = inf->warpback; 4.391 params->warpl = inf->warpl; 4.392 params->warpu = inf->warpu; 4.393 - spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags); 4.394 } 4.395 4.396 return 0; 4.397 @@ -427,23 +358,19 @@ int bvt_adjdom(struct domain *p, 4.398 */ 4.399 static task_slice_t bvt_do_schedule(s_time_t now) 4.400 { 4.401 - unsigned long flags; 4.402 - struct domain *prev = current, *next = NULL, *next_prime, *p; 4.403 + struct domain *prev = current, *next = NULL, *next_prime, *p; 4.404 struct list_head *tmp; 4.405 int cpu = prev->processor; 4.406 s32 r_time; /* time for new dom to run */ 4.407 u32 next_evt, next_prime_evt, min_avt; 4.408 - struct bvt_dom_info *prev_inf = BVT_INFO(prev), 4.409 - *p_inf = NULL, 4.410 - *next_inf = NULL, 4.411 - *next_prime_inf = NULL; 4.412 + struct bvt_dom_info *prev_inf = BVT_INFO(prev); 4.413 + struct bvt_dom_info *p_inf = NULL; 4.414 + struct bvt_dom_info *next_inf = NULL; 4.415 + struct bvt_dom_info *next_prime_inf = NULL; 4.416 task_slice_t ret; 4.417 4.418 - 4.419 ASSERT(prev->sched_priv != NULL); 4.420 ASSERT(prev_inf != NULL); 4.421 - spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags); 4.422 - 4.423 ASSERT(__task_on_runqueue(prev)); 4.424 4.425 if ( likely(!is_idle_task(prev)) ) 4.426 @@ -511,8 +438,6 @@ static task_slice_t bvt_do_schedule(s_ti 4.427 add_ac_timer(&next_inf->warp_timer); 4.428 } 4.429 4.430 - spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 4.431 - 4.432 /* Extract the domain pointers from the dom infos */ 4.433 next = next_inf->domain; 4.434 next_prime = next_prime_inf->domain; 4.435 @@ -524,9 +449,9 @@ static task_slice_t bvt_do_schedule(s_ti 4.436 /* check for virtual time overrun on this cpu */ 4.437 if ( CPU_SVT(cpu) >= 0xf0000000 ) 4.438 { 4.439 - u_long t_flags; 4.440 - 4.441 - write_lock_irqsave(&tasklist_lock, t_flags); 4.442 + ASSERT(!local_irq_is_enabled()); 4.443 + 4.444 + write_lock(&tasklist_lock); 4.445 4.446 for_each_domain ( p ) 4.447 { 4.448 @@ -538,7 +463,7 @@ static task_slice_t bvt_do_schedule(s_ti 4.449 } 4.450 } 4.451 4.452 - write_unlock_irqrestore(&tasklist_lock, t_flags); 4.453 + write_unlock(&tasklist_lock); 4.454 4.455 CPU_SVT(cpu) -= 0xe0000000; 4.456 } 4.457 @@ -591,43 +516,29 @@ static void bvt_dump_settings(void) 4.458 4.459 static void bvt_dump_cpu_state(int i) 4.460 { 4.461 - unsigned long flags; 4.462 struct list_head *list, *queue; 4.463 int loop = 0; 4.464 struct bvt_dom_info *d_inf; 4.465 struct domain *d; 4.466 4.467 - spin_lock_irqsave(&CPU_INFO(i)->run_lock, flags); 4.468 printk("svt=0x%08lX ", CPU_SVT(i)); 4.469 4.470 queue = RUNQUEUE(i); 4.471 printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 4.472 - (unsigned long) queue->next, (unsigned long) queue->prev); 4.473 + (unsigned long) queue->next, (unsigned long) queue->prev); 4.474 4.475 list_for_each ( list, queue ) 4.476 { 4.477 d_inf = list_entry(list, struct bvt_dom_info, run_list); 4.478 d = d_inf->domain; 4.479 printk("%3d: %u has=%c ", loop++, d->domain, 4.480 - test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 4.481 + test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 4.482 bvt_dump_runq_el(d); 4.483 printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time); 4.484 printk(" l: %lx n: %lx p: %lx\n", 4.485 - (unsigned long)list, (unsigned long)list->next, 4.486 - (unsigned long)list->prev); 4.487 + (unsigned long)list, (unsigned long)list->next, 4.488 + (unsigned long)list->prev); 4.489 } 4.490 - spin_unlock_irqrestore(&CPU_INFO(i)->run_lock, flags); 4.491 -} 4.492 - 4.493 -/* We use cache to create the bvt_dom_infos 4.494 - this functions makes sure that the run_list 4.495 - is initialised properly. 4.496 - Call to __task_on_runqueue needs to return false */ 4.497 -static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3) 4.498 -{ 4.499 - struct bvt_dom_info *dom_inf = (struct bvt_dom_info*)arg1; 4.500 - dom_inf->run_list.next = NULL; 4.501 - dom_inf->run_list.prev = NULL; 4.502 } 4.503 4.504 /* Initialise the data structures. */ 4.505 @@ -646,15 +557,12 @@ int bvt_init_scheduler() 4.506 } 4.507 4.508 INIT_LIST_HEAD(RUNQUEUE(i)); 4.509 - spin_lock_init(&CPU_INFO(i)->run_lock); 4.510 4.511 CPU_SVT(i) = 0; /* XXX do I really need to do this? */ 4.512 } 4.513 4.514 - dom_info_cache = xmem_cache_create("BVT dom info", 4.515 - sizeof(struct bvt_dom_info), 4.516 - 0, 0, cache_constructor, NULL); 4.517 - 4.518 + dom_info_cache = xmem_cache_create( 4.519 + "BVT dom info", sizeof(struct bvt_dom_info), 0, 0, NULL, NULL); 4.520 if ( dom_info_cache == NULL ) 4.521 { 4.522 printk("BVT: Failed to allocate domain info SLAB cache"); 4.523 @@ -664,8 +572,6 @@ int bvt_init_scheduler() 4.524 return 0; 4.525 } 4.526 4.527 - 4.528 - 4.529 struct scheduler sched_bvt_def = { 4.530 .name = "Borrowed Virtual Time", 4.531 .opt_name = "bvt", 4.532 @@ -676,7 +582,6 @@ struct scheduler sched_bvt_def = { 4.533 .alloc_task = bvt_alloc_task, 4.534 .add_task = bvt_add_task, 4.535 .free_task = bvt_free_task, 4.536 - .do_block = bvt_do_block, 4.537 .do_schedule = bvt_do_schedule, 4.538 .control = bvt_ctl, 4.539 .adjdom = bvt_adjdom, 4.540 @@ -685,4 +590,3 @@ struct scheduler sched_bvt_def = { 4.541 .sleep = bvt_sleep, 4.542 .wake = bvt_wake, 4.543 }; 4.544 -
5.1 --- a/xen/common/sched_fair_bvt.c Tue Oct 05 10:47:11 2004 +0000 5.2 +++ b/xen/common/sched_fair_bvt.c Tue Oct 05 10:47:20 2004 +0000 5.3 @@ -52,9 +52,8 @@ struct fbvt_dom_info 5.4 5.5 struct fbvt_cpu_info 5.6 { 5.7 - spinlock_t run_lock; /* protects runqueue */ 5.8 - struct list_head runqueue; /* runqueue for this CPU */ 5.9 - unsigned long svt; /* XXX check this is unsigned long! */ 5.10 + struct list_head runqueue; 5.11 + unsigned long svt; 5.12 u32 vtb; /* virtual time bonus */ 5.13 u32 r_time; /* last time to run */ 5.14 }; 5.15 @@ -74,14 +73,8 @@ struct fbvt_cpu_info 5.16 static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */ 5.17 static s32 max_vtb = (s32)MILLISECS(5); 5.18 5.19 -/* SLAB cache for struct fbvt_dom_info objects */ 5.20 static xmem_cache_t *dom_info_cache; 5.21 5.22 - 5.23 -/* 5.24 - * Wrappers for run-queue management. Must be called with the run_lock 5.25 - * held. 5.26 - */ 5.27 static inline void __add_to_runqueue_head(struct domain *d) 5.28 { 5.29 list_add(RUNLIST(d), RUNQUEUE(d->processor)); 5.30 @@ -140,12 +133,11 @@ static void __calc_evt(struct fbvt_dom_i 5.31 * 5.32 * Returns non-zero on failure. 5.33 */ 5.34 -int fbvt_alloc_task(struct domain *p) 5.35 +int fbvt_alloc_task(struct domain *d) 5.36 { 5.37 - p->sched_priv = xmem_cache_alloc(dom_info_cache); 5.38 - if ( p->sched_priv == NULL ) 5.39 + if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL ) 5.40 return -1; 5.41 - 5.42 + memset(d->sched_priv, 0, sizeof(struct fbvt_dom_info)); 5.43 return 0; 5.44 } 5.45 5.46 @@ -183,64 +175,33 @@ void fbvt_add_task(struct domain *p) 5.47 5.48 int fbvt_init_idle_task(struct domain *p) 5.49 { 5.50 - unsigned long flags; 5.51 - 5.52 - if(fbvt_alloc_task(p) < 0) return -1; 5.53 + if ( fbvt_alloc_task(p) < 0 ) 5.54 + return -1; 5.55 5.56 fbvt_add_task(p); 5.57 - spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags); 5.58 + 5.59 set_bit(DF_RUNNING, &p->flags); 5.60 if ( !__task_on_runqueue(p) ) 5.61 - __add_to_runqueue_head(p); 5.62 - spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags); 5.63 + __add_to_runqueue_head(p); 5.64 5.65 return 0; 5.66 } 5.67 5.68 static void fbvt_wake(struct domain *d) 5.69 { 5.70 - unsigned long flags; 5.71 struct fbvt_dom_info *inf = FBVT_INFO(d); 5.72 struct domain *curr; 5.73 s_time_t now, min_time; 5.74 int cpu = d->processor; 5.75 s32 io_warp; 5.76 5.77 - /* The runqueue accesses must be protected */ 5.78 - spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags); 5.79 - 5.80 - /* If on the runqueue already then someone has done the wakeup work. */ 5.81 if ( unlikely(__task_on_runqueue(d)) ) 5.82 - { 5.83 - spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 5.84 return; 5.85 - } 5.86 - 5.87 + 5.88 __add_to_runqueue_head(d); 5.89 5.90 now = NOW(); 5.91 5.92 -#if 0 5.93 - /* 5.94 - * XXX KAF: This was fbvt_unpause(). Not sure if it's the right thing 5.95 - * to do, in light of the stuff that fbvt_wake_up() does. 5.96 - * e.g., setting 'inf->avt = CPU_SVT(cpu);' would make the later test 5.97 - * 'inf->avt < CPU_SVT(cpu)' redundant! 5.98 - */ 5.99 - if ( d->domain == IDLE_DOMAIN_ID ) 5.100 - { 5.101 - inf->avt = inf->evt = ~0U; 5.102 - } 5.103 - else 5.104 - { 5.105 - /* Set avt to system virtual time. */ 5.106 - inf->avt = CPU_SVT(cpu); 5.107 - /* Set some default values here. */ 5.108 - LAST_VTB(cpu) = 0; 5.109 - __calc_evt(inf); 5.110 - } 5.111 -#endif 5.112 - 5.113 /* Set the BVT parameters. */ 5.114 if ( inf->avt < CPU_SVT(cpu) ) 5.115 { 5.116 @@ -265,11 +226,6 @@ static void fbvt_wake(struct domain *d) 5.117 inf->warpback = 1; 5.118 inf->warped = now; 5.119 __calc_evt(inf); 5.120 - spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 5.121 - 5.122 - /* Access to schedule_data protected by schedule_lock */ 5.123 - spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); 5.124 - 5.125 5.126 curr = schedule_data[cpu].curr; 5.127 5.128 @@ -280,47 +236,34 @@ static void fbvt_wake(struct domain *d) 5.129 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 5.130 else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) ) 5.131 mod_ac_timer(&schedule_data[cpu].s_timer, min_time); 5.132 - 5.133 - spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); 5.134 } 5.135 5.136 5.137 static void fbvt_sleep(struct domain *d) 5.138 { 5.139 - unsigned long flags; 5.140 - 5.141 - 5.142 if ( test_bit(DF_RUNNING, &d->flags) ) 5.143 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); 5.144 - else 5.145 - { 5.146 - /* The runqueue accesses must be protected */ 5.147 - spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags); 5.148 - 5.149 - if ( __task_on_runqueue(d) ) 5.150 - __del_from_runqueue(d); 5.151 - 5.152 - spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags); 5.153 - } 5.154 + else if ( __task_on_runqueue(d) ) 5.155 + __del_from_runqueue(d); 5.156 } 5.157 5.158 5.159 /** 5.160 * fbvt_free_task - free FBVT private structures for a task 5.161 - * @p: task 5.162 + * @d: task 5.163 */ 5.164 -void fbvt_free_task(struct domain *p) 5.165 +void fbvt_free_task(struct domain *d) 5.166 { 5.167 - ASSERT( p->sched_priv != NULL ); 5.168 - xmem_cache_free( dom_info_cache, p->sched_priv ); 5.169 + ASSERT(d->sched_priv != NULL); 5.170 + xmem_cache_free(dom_info_cache, d->sched_priv); 5.171 } 5.172 5.173 /* 5.174 * Block the currently-executing domain until a pertinent event occurs. 5.175 */ 5.176 -static void fbvt_do_block(struct domain *p) 5.177 +static void fbvt_do_block(struct domain *d) 5.178 { 5.179 - FBVT_INFO(p)->warpback = 0; 5.180 + FBVT_INFO(d)->warpback = 0; 5.181 } 5.182 5.183 /* Control the scheduler. */ 5.184 @@ -347,7 +290,6 @@ int fbvt_adjdom(struct domain *p, 5.185 struct sched_adjdom_cmd *cmd) 5.186 { 5.187 struct fbvt_adjdom *params = &cmd->u.fbvt; 5.188 - unsigned long flags; 5.189 5.190 if ( cmd->direction == SCHED_INFO_PUT ) 5.191 { 5.192 @@ -367,7 +309,6 @@ int fbvt_adjdom(struct domain *p, 5.193 if ( mcu_adv == 0 ) 5.194 return -EINVAL; 5.195 5.196 - spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags); 5.197 inf->mcu_advance = mcu_adv; 5.198 inf->warp = warp; 5.199 inf->warpl = warpl; 5.200 @@ -377,19 +318,14 @@ int fbvt_adjdom(struct domain *p, 5.201 "warpl=%ld, warpu=%ld\n", 5.202 p->domain, inf->mcu_advance, inf->warp, 5.203 inf->warpl, inf->warpu ); 5.204 - 5.205 - spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags); 5.206 } 5.207 else if ( cmd->direction == SCHED_INFO_GET ) 5.208 { 5.209 struct fbvt_dom_info *inf = FBVT_INFO(p); 5.210 - 5.211 - spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags); 5.212 params->mcu_adv = inf->mcu_advance; 5.213 params->warp = inf->warp; 5.214 params->warpl = inf->warpl; 5.215 params->warpu = inf->warpu; 5.216 - spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags); 5.217 } 5.218 5.219 return 0; 5.220 @@ -405,7 +341,6 @@ int fbvt_adjdom(struct domain *p, 5.221 */ 5.222 static task_slice_t fbvt_do_schedule(s_time_t now) 5.223 { 5.224 - unsigned long flags; 5.225 struct domain *prev = current, *next = NULL, *next_prime, *p; 5.226 struct list_head *tmp; 5.227 int cpu = prev->processor; 5.228 @@ -422,9 +357,6 @@ static task_slice_t fbvt_do_schedule(s_t 5.229 5.230 ASSERT(prev->sched_priv != NULL); 5.231 ASSERT(prev_inf != NULL); 5.232 - 5.233 - spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags); 5.234 - 5.235 ASSERT(__task_on_runqueue(prev)); 5.236 5.237 if ( likely(!is_idle_task(prev)) ) 5.238 @@ -503,8 +435,6 @@ static task_slice_t fbvt_do_schedule(s_t 5.239 min_avt = p_inf->avt; 5.240 } 5.241 5.242 - spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 5.243 - 5.244 /* Extract the domain pointers from the dom infos */ 5.245 next = next_inf->domain; 5.246 next_prime = next_prime_inf->domain; 5.247 @@ -517,8 +447,10 @@ static task_slice_t fbvt_do_schedule(s_t 5.248 /* check for virtual time overrun on this cpu */ 5.249 if ( CPU_SVT(cpu) >= 0xf0000000 ) 5.250 { 5.251 - u_long t_flags; 5.252 - write_lock_irqsave(&tasklist_lock, t_flags); 5.253 + ASSERT(!local_irq_is_enabled()); 5.254 + 5.255 + write_lock(&tasklist_lock); 5.256 + 5.257 for_each_domain ( p ) 5.258 { 5.259 if ( p->processor == cpu ) 5.260 @@ -528,7 +460,9 @@ static task_slice_t fbvt_do_schedule(s_t 5.261 p_inf->avt -= 0xe0000000; 5.262 } 5.263 } 5.264 - write_unlock_irqrestore(&tasklist_lock, t_flags); 5.265 + 5.266 + write_unlock(&tasklist_lock); 5.267 + 5.268 CPU_SVT(cpu) -= 0xe0000000; 5.269 } 5.270 5.271 @@ -608,13 +542,11 @@ static void fbvt_dump_settings(void) 5.272 5.273 static void fbvt_dump_cpu_state(int i) 5.274 { 5.275 - unsigned long flags; 5.276 struct list_head *list, *queue; 5.277 int loop = 0; 5.278 struct fbvt_dom_info *d_inf; 5.279 struct domain *d; 5.280 5.281 - spin_lock_irqsave(&CPU_INFO(i)->run_lock, flags); 5.282 printk("svt=0x%08lX ", CPU_SVT(i)); 5.283 5.284 queue = RUNQUEUE(i); 5.285 @@ -633,23 +565,8 @@ static void fbvt_dump_cpu_state(int i) 5.286 (unsigned long)list, (unsigned long)list->next, 5.287 (unsigned long)list->prev); 5.288 } 5.289 - spin_unlock_irqrestore(&CPU_INFO(i)->run_lock, flags); 5.290 } 5.291 5.292 - 5.293 -/* We use cache to create the bvt_dom_infos 5.294 - this functions makes sure that the run_list 5.295 - is initialised properly. The new domain needs 5.296 - NOT to appear as to be on the runqueue */ 5.297 -static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3) 5.298 -{ 5.299 - struct fbvt_dom_info *dom_inf = (struct fbvt_dom_info*)arg1; 5.300 - dom_inf->run_list.next = NULL; 5.301 - dom_inf->run_list.prev = NULL; 5.302 -} 5.303 - 5.304 - 5.305 - 5.306 /* Initialise the data structures. */ 5.307 int fbvt_init_scheduler() 5.308 { 5.309 @@ -666,15 +583,12 @@ int fbvt_init_scheduler() 5.310 } 5.311 5.312 INIT_LIST_HEAD(RUNQUEUE(i)); 5.313 - spin_lock_init(&CPU_INFO(i)->run_lock); 5.314 5.315 CPU_SVT(i) = 0; /* XXX do I really need to do this? */ 5.316 } 5.317 5.318 - dom_info_cache = xmem_cache_create("FBVT dom info", 5.319 - sizeof(struct fbvt_dom_info), 5.320 - 0, 0, cache_constructor, NULL); 5.321 - 5.322 + dom_info_cache = xmem_cache_create( 5.323 + "FBVT dom info", sizeof(struct fbvt_dom_info), 0, 0, NULL, NULL); 5.324 if ( dom_info_cache == NULL ) 5.325 { 5.326 printk("FBVT: Failed to allocate domain info SLAB cache");
6.1 --- a/xen/common/sched_rrobin.c Tue Oct 05 10:47:11 2004 +0000 6.2 +++ b/xen/common/sched_rrobin.c Tue Oct 05 10:47:20 2004 +0000 6.3 @@ -23,19 +23,12 @@ struct rrobin_dom_info 6.4 struct domain *domain; 6.5 }; 6.6 6.7 -static spinlock_t run_locks[NR_CPUS]; 6.8 - 6.9 #define RR_INFO(d) ((struct rrobin_dom_info *)d->sched_priv) 6.10 #define RUNLIST(d) ((struct list_head *)&(RR_INFO(d)->run_list)) 6.11 #define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle) 6.12 6.13 -/* SLAB cache for struct rrobin_dom_info objects */ 6.14 static xmem_cache_t *dom_info_cache; 6.15 6.16 -/* 6.17 - * Wrappers for run-queue management. Must be called with the run_lock 6.18 - * held. 6.19 - */ 6.20 static inline void __add_to_runqueue_head(struct domain *d) 6.21 { 6.22 list_add(RUNLIST(d), RUNQUEUE(d->processor)); 6.23 @@ -58,92 +51,72 @@ static inline int __task_on_runqueue(str 6.24 return (RUNLIST(d))->next != NULL; 6.25 } 6.26 6.27 - 6.28 -/* Ensures proper initialisation of the dom_info */ 6.29 -static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3) 6.30 -{ 6.31 - struct rrobin_dom_info *dom_inf = (struct rrobin_dom_info*)arg1; 6.32 - dom_inf->run_list.next = NULL; 6.33 - dom_inf->run_list.prev = NULL; 6.34 -} 6.35 - 6.36 - 6.37 /* Initialises the runqueues and creates the domain info cache */ 6.38 static int rr_init_scheduler() 6.39 { 6.40 int i; 6.41 6.42 for ( i = 0; i < NR_CPUS; i++ ) 6.43 - { 6.44 INIT_LIST_HEAD(RUNQUEUE(i)); 6.45 - spin_lock_init(&run_locks[i]); 6.46 - } 6.47 6.48 - dom_info_cache = xmem_cache_create("FBVT dom info", 6.49 - sizeof(struct rrobin_dom_info), 6.50 - 0, 0, cache_constructor, NULL); 6.51 - 6.52 - if(dom_info_cache == NULL) 6.53 + dom_info_cache = xmem_cache_create( 6.54 + "RR dom info", sizeof(struct rrobin_dom_info), 0, 0, 0, NULL); 6.55 + if ( dom_info_cache == NULL ) 6.56 { 6.57 printk("Could not allocate SLAB cache.\n"); 6.58 return -1; 6.59 } 6.60 + 6.61 return 0; 6.62 } 6.63 6.64 /* Allocates memory for per domain private scheduling data*/ 6.65 static int rr_alloc_task(struct domain *d) 6.66 { 6.67 - d->sched_priv = xmem_cache_alloc(dom_info_cache); 6.68 - if ( d->sched_priv == NULL ) 6.69 + if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL ) 6.70 return -1; 6.71 - 6.72 - return 0; 6.73 + memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info)); 6.74 + return 0; 6.75 } 6.76 6.77 /* Setup the rr_dom_info */ 6.78 -static void rr_add_task(struct domain *p) 6.79 +static void rr_add_task(struct domain *d) 6.80 { 6.81 struct rrobin_dom_info *inf; 6.82 - RR_INFO(p)->domain = p; 6.83 - inf = RR_INFO(p); 6.84 + RR_INFO(d)->domain = d; 6.85 + inf = RR_INFO(d); 6.86 } 6.87 6.88 /* Frees memory used by domain info */ 6.89 -static void rr_free_task(struct domain *p) 6.90 +static void rr_free_task(struct domain *d) 6.91 { 6.92 - ASSERT( p->sched_priv != NULL ); 6.93 - xmem_cache_free( dom_info_cache, p->sched_priv ); 6.94 + ASSERT(d->sched_priv != NULL); 6.95 + xmem_cache_free(dom_info_cache, d->sched_priv); 6.96 } 6.97 6.98 /* Initialises idle task */ 6.99 -static int rr_init_idle_task(struct domain *p) 6.100 +static int rr_init_idle_task(struct domain *d) 6.101 { 6.102 - unsigned long flags; 6.103 - if(rr_alloc_task(p) < 0) return -1; 6.104 - rr_add_task(p); 6.105 + if ( rr_alloc_task(d) < 0 ) 6.106 + return -1; 6.107 6.108 - spin_lock_irqsave(&run_locks[p->processor], flags); 6.109 - set_bit(DF_RUNNING, &p->flags); 6.110 - if ( !__task_on_runqueue(p) ) 6.111 - __add_to_runqueue_head(p); 6.112 - spin_unlock_irqrestore(&run_locks[p->processor], flags); 6.113 + rr_add_task(d); 6.114 + 6.115 + set_bit(DF_RUNNING, &d->flags); 6.116 + if ( !__task_on_runqueue(d) ) 6.117 + __add_to_runqueue_head(d); 6.118 + 6.119 return 0; 6.120 } 6.121 6.122 - 6.123 /* Main scheduling function */ 6.124 static task_slice_t rr_do_schedule(s_time_t now) 6.125 { 6.126 - unsigned long flags; 6.127 struct domain *prev = current; 6.128 int cpu = current->processor; 6.129 - 6.130 task_slice_t ret; 6.131 6.132 - spin_lock_irqsave(&run_locks[cpu], flags); 6.133 - 6.134 - if(!is_idle_task(prev)) 6.135 + if ( !is_idle_task(prev) ) 6.136 { 6.137 __del_from_runqueue(prev); 6.138 6.139 @@ -151,11 +124,9 @@ static task_slice_t rr_do_schedule(s_tim 6.140 __add_to_runqueue_tail(prev); 6.141 } 6.142 6.143 - spin_unlock_irqrestore(&run_locks[cpu], flags); 6.144 - 6.145 - ret.task = list_entry( RUNQUEUE(cpu)->next, 6.146 - struct rrobin_dom_info, 6.147 - run_list)->domain; 6.148 + ret.task = list_entry(RUNQUEUE(cpu)->next, 6.149 + struct rrobin_dom_info, 6.150 + run_list)->domain; 6.151 ret.time = rr_slice; 6.152 return ret; 6.153 } 6.154 @@ -182,47 +153,28 @@ static void rr_dump_settings() 6.155 6.156 static void rr_sleep(struct domain *d) 6.157 { 6.158 - unsigned long flags; 6.159 - 6.160 if ( test_bit(DF_RUNNING, &d->flags) ) 6.161 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); 6.162 - else 6.163 - { 6.164 - spin_lock_irqsave(&run_locks[d->processor], flags); 6.165 - if ( __task_on_runqueue(d) ) 6.166 - __del_from_runqueue(d); 6.167 - spin_unlock_irqrestore(&run_locks[d->processor], flags); 6.168 - } 6.169 + else if ( __task_on_runqueue(d) ) 6.170 + __del_from_runqueue(d); 6.171 } 6.172 6.173 void rr_wake(struct domain *d) 6.174 { 6.175 - unsigned long flags; 6.176 struct domain *curr; 6.177 s_time_t now; 6.178 int cpu = d->processor; 6.179 6.180 - spin_lock_irqsave(&run_locks[cpu], flags); 6.181 - 6.182 - /* If on the runqueue already then someone has done the wakeup work. */ 6.183 - if ( unlikely(__task_on_runqueue(d))) 6.184 - { 6.185 - spin_unlock_irqrestore(&run_locks[cpu], flags); 6.186 + if ( unlikely(__task_on_runqueue(d)) ) 6.187 return; 6.188 - } 6.189 6.190 __add_to_runqueue_head(d); 6.191 - spin_unlock_irqrestore(&run_locks[cpu], flags); 6.192 6.193 now = NOW(); 6.194 6.195 - spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); 6.196 curr = schedule_data[cpu].curr; 6.197 - 6.198 - if ( is_idle_task(curr) ) 6.199 + if ( is_idle_task(curr) ) 6.200 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 6.201 - 6.202 - spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); 6.203 } 6.204 6.205 6.206 @@ -235,13 +187,10 @@ static void rr_dump_domain(struct domain 6.207 6.208 static void rr_dump_cpu_state(int i) 6.209 { 6.210 - unsigned long flags; 6.211 struct list_head *list, *queue; 6.212 int loop = 0; 6.213 struct rrobin_dom_info *d_inf; 6.214 6.215 - spin_lock_irqsave(&run_locks[i], flags); 6.216 - 6.217 queue = RUNQUEUE(i); 6.218 printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 6.219 (unsigned long) queue->next, (unsigned long) queue->prev); 6.220 @@ -256,7 +205,6 @@ static void rr_dump_cpu_state(int i) 6.221 d_inf = list_entry(list, struct rrobin_dom_info, run_list); 6.222 rr_dump_domain(d_inf->domain); 6.223 } 6.224 - spin_unlock_irqrestore(&run_locks[i], flags); 6.225 } 6.226 6.227
7.1 --- a/xen/common/schedule.c Tue Oct 05 10:47:11 2004 +0000 7.2 +++ b/xen/common/schedule.c Tue Oct 05 10:47:20 2004 +0000 7.3 @@ -67,16 +67,17 @@ static void fallback_timer_fn(unsigned l 7.4 /* This is global for now so that private implementations can reach it */ 7.5 schedule_data_t schedule_data[NR_CPUS]; 7.6 7.7 -/* 7.8 - * TODO: It would be nice if the schedulers array could get populated 7.9 - * automagically without having to hack the code in here. 7.10 - */ 7.11 -extern struct scheduler sched_bvt_def, sched_fbvt_def, sched_rrobin_def, sched_atropos_def; 7.12 -static struct scheduler *schedulers[] = { &sched_bvt_def, 7.13 - &sched_fbvt_def, 7.14 - &sched_rrobin_def, 7.15 - &sched_atropos_def, 7.16 - NULL}; 7.17 +extern struct scheduler sched_bvt_def; 7.18 +extern struct scheduler sched_fbvt_def; 7.19 +extern struct scheduler sched_rrobin_def; 7.20 +extern struct scheduler sched_atropos_def; 7.21 +static struct scheduler *schedulers[] = { 7.22 + &sched_bvt_def, 7.23 + &sched_fbvt_def, 7.24 + &sched_rrobin_def, 7.25 + &sched_atropos_def, 7.26 + NULL 7.27 +}; 7.28 7.29 /* Operations for the current scheduler. */ 7.30 static struct scheduler ops; 7.31 @@ -155,21 +156,20 @@ void sched_rem_domain(struct domain *d) 7.32 7.33 void init_idle_task(void) 7.34 { 7.35 - struct domain *d = current; 7.36 - 7.37 - if ( SCHED_OP(init_idle_task, d) < 0) 7.38 - panic("Failed to initialise idle task for processor %d",d->processor); 7.39 + if ( SCHED_OP(init_idle_task, current) < 0 ) 7.40 + BUG(); 7.41 } 7.42 7.43 void domain_sleep(struct domain *d) 7.44 { 7.45 unsigned long flags; 7.46 7.47 - /* sleep and wake protected by domain's sleep_lock */ 7.48 - spin_lock_irqsave(&d->sleep_lock, flags); 7.49 + spin_lock_irqsave(&schedule_data[d->processor].schedule_lock, flags); 7.50 + 7.51 if ( likely(!domain_runnable(d)) ) 7.52 SCHED_OP(sleep, d); 7.53 - spin_unlock_irqrestore(&d->sleep_lock, flags); 7.54 + 7.55 + spin_unlock_irqrestore(&schedule_data[d->processor].schedule_lock, flags); 7.56 7.57 /* Synchronous. */ 7.58 while ( test_bit(DF_RUNNING, &d->flags) && !domain_runnable(d) ) 7.59 @@ -181,10 +181,10 @@ void domain_sleep(struct domain *d) 7.60 7.61 void domain_wake(struct domain *d) 7.62 { 7.63 - unsigned long flags; 7.64 + unsigned long flags; 7.65 7.66 - spin_lock_irqsave(&d->sleep_lock, flags); 7.67 - 7.68 + spin_lock_irqsave(&schedule_data[d->processor].schedule_lock, flags); 7.69 + 7.70 if ( likely(domain_runnable(d)) ) 7.71 { 7.72 TRACE_2D(TRC_SCHED_WAKE, d->domain, d); 7.73 @@ -196,7 +196,7 @@ void domain_wake(struct domain *d) 7.74 7.75 clear_bit(DF_MIGRATED, &d->flags); 7.76 7.77 - spin_unlock_irqrestore(&d->sleep_lock, flags); 7.78 + spin_unlock_irqrestore(&schedule_data[d->processor].schedule_lock, flags); 7.79 } 7.80 7.81 /* Block the currently-executing domain until a pertinent event occurs. */ 7.82 @@ -218,7 +218,6 @@ static long do_yield(void) 7.83 return 0; 7.84 } 7.85 7.86 - 7.87 /* 7.88 * Demultiplex scheduler-related hypercalls. 7.89 */ 7.90 @@ -292,24 +291,25 @@ long sched_ctl(struct sched_ctl_cmd *cmd 7.91 /* Adjust scheduling parameter for a given domain. */ 7.92 long sched_adjdom(struct sched_adjdom_cmd *cmd) 7.93 { 7.94 - struct domain *p; 7.95 - 7.96 + struct domain *d; 7.97 + 7.98 if ( cmd->sched_id != ops.sched_id ) 7.99 return -EINVAL; 7.100 7.101 if ( cmd->direction != SCHED_INFO_PUT && cmd->direction != SCHED_INFO_GET ) 7.102 return -EINVAL; 7.103 7.104 - p = find_domain_by_id(cmd->domain); 7.105 - 7.106 - if( p == NULL ) 7.107 + d = find_domain_by_id(cmd->domain); 7.108 + if ( d == NULL ) 7.109 return -ESRCH; 7.110 7.111 - TRACE_1D(TRC_SCHED_ADJDOM, p->domain); 7.112 + TRACE_1D(TRC_SCHED_ADJDOM, d->domain); 7.113 7.114 - SCHED_OP(adjdom, p, cmd); 7.115 + spin_lock_irq(&schedule_data[d->processor].schedule_lock); 7.116 + SCHED_OP(adjdom, d, cmd); 7.117 + spin_unlock_irq(&schedule_data[d->processor].schedule_lock); 7.118 7.119 - put_domain(p); 7.120 + put_domain(d); 7.121 return 0; 7.122 } 7.123 7.124 @@ -335,7 +335,6 @@ void __enter_scheduler(void) 7.125 rem_ac_timer(&schedule_data[cpu].s_timer); 7.126 7.127 ASSERT(!in_irq()); 7.128 - // TODO - move to specific scheduler ASSERT(__task_on_runqueue(prev)); 7.129 7.130 if ( test_bit(DF_BLOCKED, &prev->flags) ) 7.131 { 7.132 @@ -362,6 +361,9 @@ void __enter_scheduler(void) 7.133 schedule_data[cpu].s_timer.expires = now + r_time; 7.134 add_ac_timer(&schedule_data[cpu].s_timer); 7.135 7.136 + /* Must be protected by the schedule_lock! */ 7.137 + set_bit(DF_RUNNING, &next->flags); 7.138 + 7.139 spin_unlock_irq(&schedule_data[cpu].schedule_lock); 7.140 7.141 /* Ensure that the domain has an up-to-date time base. */ 7.142 @@ -405,7 +407,6 @@ void __enter_scheduler(void) 7.143 * without warning). 7.144 */ 7.145 clear_bit(DF_RUNNING, &prev->flags); 7.146 - set_bit(DF_RUNNING, &next->flags); 7.147 7.148 /* Mark a timer event for the newly-scheduled domain. */ 7.149 if ( !is_idle_task(next) ) 7.150 @@ -549,15 +550,23 @@ void dump_runq(u_char key, void *dev_id, 7.151 { 7.152 s_time_t now = NOW(); 7.153 int i; 7.154 + unsigned long flags; 7.155 + 7.156 + local_irq_save(flags); 7.157 7.158 printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name); 7.159 SCHED_OP(dump_settings); 7.160 printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now); 7.161 + 7.162 for ( i = 0; i < smp_num_cpus; i++ ) 7.163 { 7.164 + spin_lock(&schedule_data[i].schedule_lock); 7.165 printk("CPU[%02d] ", i); 7.166 SCHED_OP(dump_cpu_state,i); 7.167 + spin_unlock(&schedule_data[i].schedule_lock); 7.168 } 7.169 + 7.170 + local_irq_restore(flags); 7.171 } 7.172 7.173 #if defined(WAKE_HISTO) || defined(BLOCKTIME_HISTO)
8.1 --- a/xen/include/xen/sched.h Tue Oct 05 10:47:11 2004 +0000 8.2 +++ b/xen/include/xen/sched.h Tue Oct 05 10:47:20 2004 +0000 8.3 @@ -97,7 +97,6 @@ struct domain 8.4 8.5 /* Scheduling. */ 8.6 int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */ 8.7 - spinlock_t sleep_lock; /* wake/sleep lock */ 8.8 s_time_t lastschd; /* time this domain was last scheduled */ 8.9 s_time_t lastdeschd; /* time this domain was last descheduled */ 8.10 s_time_t cpu_time; /* total CPU time received till now */