ia64/xen-unstable
changeset 1927:a83817a4f882
bitkeeper revision 1.1108.23.1 (41063218Lo2jBTfOaHXFy3vKMmTV1Q)
Merge boulderdash.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into boulderdash.cl.cam.ac.uk:/auto/anfs/scratch/boulderdash/gm281/xeno-clone/xeno.bk
Merge boulderdash.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into boulderdash.cl.cam.ac.uk:/auto/anfs/scratch/boulderdash/gm281/xeno-clone/xeno.bk
author | gm281@boulderdash.cl.cam.ac.uk |
---|---|
date | Tue Jul 27 10:44:40 2004 +0000 (2004-07-27) |
parents | 42f37b71f9b1 f48d42a90681 |
children | 04ebbad06e8f 28c90122f778 02fb4c801767 |
files | xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h |
line diff
1.1 --- a/xen/common/sched_bvt.c Mon Jul 26 20:03:34 2004 +0000 1.2 +++ b/xen/common/sched_bvt.c Tue Jul 27 10:44:40 2004 +0000 1.3 @@ -148,11 +148,11 @@ int bvt_init_idle_task(struct domain *p) 1.4 1.5 bvt_add_task(p); 1.6 1.7 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 1.8 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 1.9 set_bit(DF_RUNNING, &p->flags); 1.10 if ( !__task_on_runqueue(RUNLIST(p)) ) 1.11 __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor)); 1.12 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 1.13 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); 1.14 1.15 return 0; 1.16 } 1.17 @@ -218,7 +218,7 @@ int bvt_adjdom(struct domain *p, 1.18 if ( mcu_adv == 0 ) 1.19 return -EINVAL; 1.20 1.21 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 1.22 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 1.23 inf->mcu_advance = mcu_adv; 1.24 inf->warp = warp; 1.25 inf->warpl = warpl; 1.26 @@ -229,18 +229,18 @@ int bvt_adjdom(struct domain *p, 1.27 p->domain, inf->mcu_advance, inf->warp, 1.28 inf->warpl, inf->warpu ); 1.29 1.30 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 1.31 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); 1.32 } 1.33 else if ( cmd->direction == SCHED_INFO_GET ) 1.34 { 1.35 struct bvt_dom_info *inf = BVT_INFO(p); 1.36 1.37 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 1.38 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 1.39 params->mcu_adv = inf->mcu_advance; 1.40 params->warp = inf->warp; 1.41 params->warpl = inf->warpl; 1.42 params->warpu = inf->warpu; 1.43 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 1.44 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); 1.45 } 1.46 1.47 return 0; 1.48 @@ -411,7 +411,7 @@ static void bvt_dump_cpu_state(int i) 1.49 struct bvt_dom_info *d_inf; 1.50 struct domain *d; 1.51 1.52 - spin_lock_irqsave(&schedule_lock[i], flags); 1.53 + spin_lock_irqsave(&schedule_data[i].schedule_lock, flags); 1.54 printk("svt=0x%08lX ", CPU_SVT(i)); 1.55 1.56 queue = RUNQUEUE(i); 1.57 @@ -430,7 +430,7 @@ static void bvt_dump_cpu_state(int i) 1.58 (unsigned long)list, (unsigned long)list->next, 1.59 (unsigned long)list->prev); 1.60 } 1.61 - spin_unlock_irqrestore(&schedule_lock[i], flags); 1.62 + spin_unlock_irqrestore(&schedule_data[i].schedule_lock, flags); 1.63 } 1.64 1.65 /* We use cache to create the bvt_dom_infos
2.1 --- a/xen/common/sched_fair_bvt.c Mon Jul 26 20:03:34 2004 +0000 2.2 +++ b/xen/common/sched_fair_bvt.c Tue Jul 27 10:44:40 2004 +0000 2.3 @@ -160,12 +160,11 @@ int fbvt_init_idle_task(struct domain *p 2.4 if(fbvt_alloc_task(p) < 0) return -1; 2.5 2.6 fbvt_add_task(p); 2.7 -//printk("< ----- >Initialising idle task for processor %d, address %d, priv %d\n", p->processor, (int)p, (int)p->sched_priv); 2.8 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 2.9 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 2.10 set_bit(DF_RUNNING, &p->flags); 2.11 if ( !__task_on_runqueue(RUNLIST(p)) ) 2.12 __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor)); 2.13 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 2.14 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); 2.15 2.16 return 0; 2.17 } 2.18 @@ -233,7 +232,7 @@ int fbvt_adjdom(struct domain *p, 2.19 if ( mcu_adv == 0 ) 2.20 return -EINVAL; 2.21 2.22 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 2.23 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 2.24 inf->mcu_advance = mcu_adv; 2.25 inf->warp = warp; 2.26 inf->warpl = warpl; 2.27 @@ -244,18 +243,20 @@ int fbvt_adjdom(struct domain *p, 2.28 p->domain, inf->mcu_advance, inf->warp, 2.29 inf->warpl, inf->warpu ); 2.30 2.31 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 2.32 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, 2.33 + flags); 2.34 } 2.35 else if ( cmd->direction == SCHED_INFO_GET ) 2.36 { 2.37 struct fbvt_dom_info *inf = FBVT_INFO(p); 2.38 2.39 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 2.40 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 2.41 params->mcu_adv = inf->mcu_advance; 2.42 params->warp = inf->warp; 2.43 params->warpl = inf->warpl; 2.44 params->warpu = inf->warpu; 2.45 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 2.46 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, 2.47 + flags); 2.48 } 2.49 2.50 return 0; 2.51 @@ -285,7 +286,6 @@ static task_slice_t fbvt_do_schedule(s_t 2.52 struct fbvt_dom_info *next_prime_inf = NULL; 2.53 task_slice_t ret; 2.54 2.55 -//if(prev->sched_priv == NULL) printk("----> %d\n", prev->domain); 2.56 ASSERT(prev->sched_priv != NULL); 2.57 ASSERT(prev_inf != NULL); 2.58 2.59 @@ -450,7 +450,6 @@ static task_slice_t fbvt_do_schedule(s_t 2.60 next->min_slice = ctx_allow; 2.61 ret.task = next; 2.62 ret.time = r_time; 2.63 -//printk("NEXT --> domain %d (address %d, processor %d), priv %d\n",next->domain, (int)next, next->processor, (int)next->sched_priv); 2.64 return ret; 2.65 } 2.66 2.67 @@ -476,7 +475,7 @@ static void fbvt_dump_cpu_state(int i) 2.68 struct fbvt_dom_info *d_inf; 2.69 struct domain *d; 2.70 2.71 - spin_lock_irqsave(&schedule_lock[i], flags); 2.72 + spin_lock_irqsave(&schedule_data[i].schedule_lock, flags); 2.73 printk("svt=0x%08lX ", CPU_SVT(i)); 2.74 2.75 queue = RUNQUEUE(i); 2.76 @@ -495,7 +494,7 @@ static void fbvt_dump_cpu_state(int i) 2.77 (unsigned long)list, (unsigned long)list->next, 2.78 (unsigned long)list->prev); 2.79 } 2.80 - spin_unlock_irqrestore(&schedule_lock[i], flags); 2.81 + spin_unlock_irqrestore(&schedule_data[i].schedule_lock, flags); 2.82 } 2.83 2.84 2.85 @@ -559,14 +558,10 @@ static void fbvt_wake(struct domain *d) 2.86 int cpu = d->processor; 2.87 s32 io_warp; 2.88 2.89 -//printk("-|--> Adding new domain %d\n",d->domain); 2.90 -//printk("-|--> Current%d (address %d, processor %d) %d\n",current->domain,(int)current, current->processor, (int)current->sched_priv); 2.91 /* If on the runqueue already then someone has done the wakeup work. */ 2.92 if ( unlikely(__task_on_runqueue(RUNLIST(d))) ) 2.93 return; 2.94 -//printk("----> Not on runqueue\n"); 2.95 __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu)); 2.96 -//printk(" ---> %d\n",(int)current->sched_priv); 2.97 2.98 now = NOW(); 2.99 2.100 @@ -617,7 +612,6 @@ static void fbvt_wake(struct domain *d) 2.101 __calc_evt(inf); 2.102 2.103 curr = schedule_data[cpu].curr; 2.104 -//printk(" ---> %d\n",(int)current->sched_priv); 2.105 2.106 /* Currently-running domain should run at least for ctx_allow. */ 2.107 min_time = curr->lastschd + curr->min_slice; 2.108 @@ -626,7 +620,6 @@ static void fbvt_wake(struct domain *d) 2.109 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 2.110 else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) ) 2.111 mod_ac_timer(&schedule_data[cpu].s_timer, min_time); 2.112 -//printk(" ---> %d\n",(int)current->sched_priv); 2.113 } 2.114 2.115 struct scheduler sched_fbvt_def = {
3.1 --- a/xen/common/sched_rrobin.c Mon Jul 26 20:03:34 2004 +0000 3.2 +++ b/xen/common/sched_rrobin.c Tue Jul 27 10:44:40 2004 +0000 3.3 @@ -95,11 +95,11 @@ static int rr_init_idle_task(struct doma 3.4 if(rr_alloc_task(p) < 0) return -1; 3.5 rr_add_task(p); 3.6 3.7 - spin_lock_irqsave(&schedule_lock[p->processor], flags); 3.8 + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); 3.9 set_bit(DF_RUNNING, &p->flags); 3.10 if ( !__task_on_runqueue(RUNLIST(p)) ) 3.11 __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor)); 3.12 - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); 3.13 + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); 3.14 return 0; 3.15 } 3.16 3.17 @@ -194,7 +194,7 @@ static void rr_dump_cpu_state(int i) 3.18 int loop = 0; 3.19 struct rrobin_dom_info *d_inf; 3.20 3.21 - spin_lock_irqsave(&schedule_lock[i], flags); 3.22 + spin_lock_irqsave(&schedule_data[i].schedule_lock, flags); 3.23 3.24 queue = RUNQUEUE(i); 3.25 printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 3.26 @@ -210,7 +210,7 @@ static void rr_dump_cpu_state(int i) 3.27 d_inf = list_entry(list, struct rrobin_dom_info, run_list); 3.28 rr_dump_domain(d_inf->domain); 3.29 } 3.30 - spin_unlock_irqrestore(&schedule_lock[i], flags); 3.31 + spin_unlock_irqrestore(&schedule_data[i].schedule_lock, flags); 3.32 } 3.33 3.34
4.1 --- a/xen/common/schedule.c Mon Jul 26 20:03:34 2004 +0000 4.2 +++ b/xen/common/schedule.c Tue Jul 27 10:44:40 2004 +0000 4.3 @@ -85,8 +85,6 @@ static struct scheduler ops; 4.4 (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \ 4.5 : (typeof(ops.fn(__VA_ARGS__)))0 ) 4.6 4.7 -spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned; 4.8 - 4.9 /* Per-CPU periodic timer sends an event to the currently-executing domain. */ 4.10 static struct ac_timer t_timer[NR_CPUS]; 4.11 4.12 @@ -168,10 +166,10 @@ void domain_sleep(struct domain *d) 4.13 unsigned long flags; 4.14 int cpu = d->processor; 4.15 4.16 - spin_lock_irqsave(&schedule_lock[cpu], flags); 4.17 + spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); 4.18 if ( likely(!domain_runnable(d)) ) 4.19 SCHED_OP(sleep, d); 4.20 - spin_unlock_irqrestore(&schedule_lock[cpu], flags); 4.21 + spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); 4.22 4.23 /* Synchronous. */ 4.24 while ( test_bit(DF_RUNNING, &d->flags) && !domain_runnable(d) ) 4.25 @@ -185,7 +183,7 @@ void domain_wake(struct domain *d) 4.26 { 4.27 unsigned long flags; 4.28 int cpu = d->processor; 4.29 - spin_lock_irqsave(&schedule_lock[cpu], flags); 4.30 + spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); 4.31 if ( likely(domain_runnable(d)) ) 4.32 { 4.33 TRACE_2D(TRC_SCHED_WAKE, d->domain, d); 4.34 @@ -194,7 +192,7 @@ void domain_wake(struct domain *d) 4.35 d->wokenup = NOW(); 4.36 #endif 4.37 } 4.38 - spin_unlock_irqrestore(&schedule_lock[cpu], flags); 4.39 + spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); 4.40 } 4.41 4.42 /* Block the currently-executing domain until a pertinent event occurs. */ 4.43 @@ -326,7 +324,7 @@ void __enter_scheduler(void) 4.44 4.45 perfc_incrc(sched_run); 4.46 4.47 - spin_lock_irq(&schedule_lock[cpu]); 4.48 + spin_lock_irq(&schedule_data[cpu].schedule_lock); 4.49 4.50 now = NOW(); 4.51 4.52 @@ -360,7 +358,7 @@ void __enter_scheduler(void) 4.53 schedule_data[cpu].s_timer.expires = now + r_time; 4.54 add_ac_timer(&schedule_data[cpu].s_timer); 4.55 4.56 - spin_unlock_irq(&schedule_lock[cpu]); 4.57 + spin_unlock_irq(&schedule_data[cpu].schedule_lock); 4.58 4.59 /* Ensure that the domain has an up-to-date time base. */ 4.60 if ( !is_idle_task(next) ) 4.61 @@ -481,7 +479,7 @@ void __init scheduler_init(void) 4.62 4.63 for ( i = 0; i < NR_CPUS; i++ ) 4.64 { 4.65 - spin_lock_init(&schedule_lock[i]); 4.66 + spin_lock_init(&schedule_data[i].schedule_lock); 4.67 schedule_data[i].curr = &idle0_task; 4.68 4.69 init_ac_timer(&schedule_data[i].s_timer);
5.1 --- a/xen/include/xen/sched-if.h Mon Jul 26 20:03:34 2004 +0000 5.2 +++ b/xen/include/xen/sched-if.h Tue Jul 27 10:44:40 2004 +0000 5.3 @@ -11,8 +11,10 @@ 5.4 5.5 typedef struct schedule_data_st 5.6 { 5.7 - struct domain *curr; /* current task */ 5.8 - struct domain *idle; /* idle task for this cpu */ 5.9 + spinlock_t schedule_lock; /* spinlock protecting curr pointer 5.10 + TODO check this */ 5.11 + struct domain *curr; /* current task */ 5.12 + struct domain *idle; /* idle task for this cpu */ 5.13 void * sched_priv; 5.14 struct ac_timer s_timer; /* scheduling timer */ 5.15 #ifdef BUCKETS
6.1 --- a/xen/include/xen/sched.h Mon Jul 26 20:03:34 2004 +0000 6.2 +++ b/xen/include/xen/sched.h Tue Jul 27 10:44:40 2004 +0000 6.3 @@ -196,8 +196,6 @@ void new_thread(struct domain *d, 6.4 extern unsigned long wait_init_idle; 6.5 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle); 6.6 6.7 -extern spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned; 6.8 - 6.9 #define set_current_state(_s) do { current->state = (_s); } while (0) 6.10 void scheduler_init(void); 6.11 void schedulers_start(void);