ia64/xen-unstable
changeset 11007:5e8c254c9dcd
[XEN] Make per-cpu schedule data explicitly PER_CPU.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Aug 08 13:55:22 2006 +0100 (2006-08-08) |
parents | 7ce412dde1be |
children | 0caf8d9218cc |
files | xen/common/sched_bvt.c xen/common/sched_credit.c xen/common/sched_sedf.c xen/common/schedule.c xen/include/xen/sched-if.h |
line diff
1.1 --- a/xen/common/sched_bvt.c Tue Aug 08 12:04:46 2006 +0100 1.2 +++ b/xen/common/sched_bvt.c Tue Aug 08 13:55:22 2006 +0100 1.3 @@ -60,7 +60,8 @@ struct bvt_cpu_info 1.4 1.5 #define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv) 1.6 #define EBVT_INFO(p) ((struct bvt_vcpu_info *)(p)->sched_priv) 1.7 -#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv) 1.8 +#define CPU_INFO(cpu) \ 1.9 + ((struct bvt_cpu_info *)(per_cpu(schedule_data, cpu).sched_priv)) 1.10 #define RUNLIST(p) ((struct list_head *)&(EBVT_INFO(p)->run_list)) 1.11 #define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue)) 1.12 #define CPU_SVT(cpu) (CPU_INFO(cpu)->svt) 1.13 @@ -203,7 +204,8 @@ static int bvt_init_vcpu(struct vcpu *v) 1.14 /* Allocate per-CPU context if this is the first domain to be added. */ 1.15 if ( CPU_INFO(v->processor) == NULL ) 1.16 { 1.17 - schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info); 1.18 + per_cpu(schedule_data, v->processor).sched_priv = 1.19 + xmalloc(struct bvt_cpu_info); 1.20 BUG_ON(CPU_INFO(v->processor) == NULL); 1.21 INIT_LIST_HEAD(RUNQUEUE(v->processor)); 1.22 CPU_SVT(v->processor) = 0; 1.23 @@ -251,7 +253,7 @@ static void bvt_wake(struct vcpu *v) 1.24 /* Deal with warping here. */ 1.25 einf->evt = calc_evt(v, einf->avt); 1.26 1.27 - curr = schedule_data[cpu].curr; 1.28 + curr = per_cpu(schedule_data, cpu).curr; 1.29 curr_evt = calc_evt(curr, calc_avt(curr, now)); 1.30 /* Calculate the time the current domain would run assuming 1.31 the second smallest evt is of the newly woken domain */ 1.32 @@ -261,14 +263,14 @@ static void bvt_wake(struct vcpu *v) 1.33 1.34 if ( is_idle_vcpu(curr) || (einf->evt <= curr_evt) ) 1.35 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 1.36 - else if ( schedule_data[cpu].s_timer.expires > r_time ) 1.37 - set_timer(&schedule_data[cpu].s_timer, r_time); 1.38 + else if ( per_cpu(schedule_data, cpu).s_timer.expires > r_time ) 1.39 + set_timer(&per_cpu(schedule_data, cpu).s_timer, r_time); 1.40 } 1.41 1.42 1.43 static void bvt_sleep(struct vcpu *v) 1.44 { 1.45 - if ( schedule_data[v->processor].curr == v ) 1.46 + if ( per_cpu(schedule_data, v->processor).curr == v ) 1.47 cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); 1.48 else if ( __task_on_runqueue(v) ) 1.49 __del_from_runqueue(v); 1.50 @@ -418,7 +420,7 @@ static struct task_slice bvt_do_schedule 1.51 * *and* the task the second lowest evt. 1.52 * this code is O(n) but we expect n to be small. 1.53 */ 1.54 - next_einf = EBVT_INFO(schedule_data[cpu].idle); 1.55 + next_einf = EBVT_INFO(per_cpu(schedule_data, cpu).idle); 1.56 next_prime_einf = NULL; 1.57 1.58 next_evt = ~0U;
2.1 --- a/xen/common/sched_credit.c Tue Aug 08 12:04:46 2006 +0100 2.2 +++ b/xen/common/sched_credit.c Tue Aug 08 13:55:22 2006 +0100 2.3 @@ -55,7 +55,8 @@ 2.4 /* 2.5 * Useful macros 2.6 */ 2.7 -#define CSCHED_PCPU(_c) ((struct csched_pcpu *)schedule_data[_c].sched_priv) 2.8 +#define CSCHED_PCPU(_c) \ 2.9 + ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv) 2.10 #define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv) 2.11 #define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv) 2.12 #define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq)) 2.13 @@ -253,7 +254,8 @@ static inline void 2.14 static inline void 2.15 __runq_tickle(unsigned int cpu, struct csched_vcpu *new) 2.16 { 2.17 - struct csched_vcpu * const cur = CSCHED_VCPU(schedule_data[cpu].curr); 2.18 + struct csched_vcpu * const cur = 2.19 + CSCHED_VCPU(per_cpu(schedule_data, cpu).curr); 2.20 cpumask_t mask; 2.21 2.22 ASSERT(cur); 2.23 @@ -318,10 +320,10 @@ csched_pcpu_init(int cpu) 2.24 2.25 INIT_LIST_HEAD(&spc->runq); 2.26 spc->runq_sort_last = csched_priv.runq_sort; 2.27 - schedule_data[cpu].sched_priv = spc; 2.28 + per_cpu(schedule_data, cpu).sched_priv = spc; 2.29 2.30 /* Start off idling... */ 2.31 - BUG_ON( !is_idle_vcpu(schedule_data[cpu].curr) ); 2.32 + BUG_ON( !is_idle_vcpu(per_cpu(schedule_data, cpu).curr) ); 2.33 cpu_set(cpu, csched_priv.idlers); 2.34 2.35 spin_unlock_irqrestore(&csched_priv.lock, flags); 2.36 @@ -533,7 +535,7 @@ csched_vcpu_sleep(struct vcpu *vc) 2.37 2.38 BUG_ON( is_idle_vcpu(vc) ); 2.39 2.40 - if ( schedule_data[vc->processor].curr == vc ) 2.41 + if ( per_cpu(schedule_data, vc->processor).curr == vc ) 2.42 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ); 2.43 else if ( __vcpu_on_runq(svc) ) 2.44 __runq_remove(svc); 2.45 @@ -547,7 +549,7 @@ csched_vcpu_wake(struct vcpu *vc) 2.46 2.47 BUG_ON( is_idle_vcpu(vc) ); 2.48 2.49 - if ( unlikely(schedule_data[cpu].curr == vc) ) 2.50 + if ( unlikely(per_cpu(schedule_data, cpu).curr == vc) ) 2.51 { 2.52 CSCHED_STAT_CRANK(vcpu_wake_running); 2.53 return; 2.54 @@ -599,7 +601,8 @@ csched_vcpu_set_affinity(struct vcpu *vc 2.55 2.56 vc->processor = first_cpu(vc->cpu_affinity); 2.57 2.58 - spin_unlock_irqrestore(&schedule_data[lcpu].schedule_lock, flags); 2.59 + spin_unlock_irqrestore(&per_cpu(schedule_data, lcpu).schedule_lock, 2.60 + flags); 2.61 } 2.62 2.63 vcpu_unpause(vc); 2.64 @@ -685,7 +688,7 @@ csched_runq_sort(unsigned int cpu) 2.65 2.66 spc->runq_sort_last = sort_epoch; 2.67 2.68 - spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); 2.69 + spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags); 2.70 2.71 runq = &spc->runq; 2.72 elem = runq->next; 2.73 @@ -710,7 +713,7 @@ csched_runq_sort(unsigned int cpu) 2.74 elem = next; 2.75 } 2.76 2.77 - spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); 2.78 + spin_unlock_irqrestore(&per_cpu(schedule_data, cpu).schedule_lock, flags); 2.79 } 2.80 2.81 static void 2.82 @@ -900,7 +903,7 @@ csched_tick(unsigned int cpu) 2.83 * we could distribute or at the very least cycle the duty. 2.84 */ 2.85 if ( (csched_priv.master == cpu) && 2.86 - (schedule_data[cpu].tick % CSCHED_ACCT_NTICKS) == 0 ) 2.87 + (per_cpu(schedule_data, cpu).tick % CSCHED_ACCT_NTICKS) == 0 ) 2.88 { 2.89 csched_acct(); 2.90 } 2.91 @@ -984,7 +987,7 @@ csched_load_balance(int cpu, struct csch 2.92 * cause a deadlock if the peer CPU is also load balancing and trying 2.93 * to lock this CPU. 2.94 */ 2.95 - if ( spin_trylock(&schedule_data[peer_cpu].schedule_lock) ) 2.96 + if ( spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) ) 2.97 { 2.98 2.99 spc = CSCHED_PCPU(peer_cpu); 2.100 @@ -998,7 +1001,7 @@ csched_load_balance(int cpu, struct csch 2.101 speer = csched_runq_steal(spc, cpu, snext->pri); 2.102 } 2.103 2.104 - spin_unlock(&schedule_data[peer_cpu].schedule_lock); 2.105 + spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock); 2.106 2.107 /* Got one! */ 2.108 if ( speer ) 2.109 @@ -1120,11 +1123,11 @@ csched_dump_pcpu(int cpu) 2.110 runq = &spc->runq; 2.111 2.112 printk(" tick=%lu, sort=%d\n", 2.113 - schedule_data[cpu].tick, 2.114 + per_cpu(schedule_data, cpu).tick, 2.115 spc->runq_sort_last); 2.116 2.117 /* current VCPU */ 2.118 - svc = CSCHED_VCPU(schedule_data[cpu].curr); 2.119 + svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr); 2.120 if ( svc ) 2.121 { 2.122 printk("\trun: ");
3.1 --- a/xen/common/sched_sedf.c Tue Aug 08 12:04:46 2006 +0100 3.2 +++ b/xen/common/sched_sedf.c Tue Aug 08 13:55:22 2006 +0100 3.3 @@ -113,13 +113,14 @@ struct sedf_cpu_info { 3.4 }; 3.5 3.6 #define EDOM_INFO(d) ((struct sedf_vcpu_info *)((d)->sched_priv)) 3.7 -#define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv) 3.8 +#define CPU_INFO(cpu) \ 3.9 + ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv) 3.10 #define LIST(d) (&EDOM_INFO(d)->list) 3.11 #define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i])) 3.12 #define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq) 3.13 #define WAITQ(cpu) (&CPU_INFO(cpu)->waitq) 3.14 #define EXTRAQ(cpu,i) (&(CPU_INFO(cpu)->extraq[i])) 3.15 -#define IDLETASK(cpu) ((struct vcpu *)schedule_data[cpu].idle) 3.16 +#define IDLETASK(cpu) ((struct vcpu *)per_cpu(schedule_data, cpu).idle) 3.17 3.18 #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period) 3.19 3.20 @@ -348,11 +349,11 @@ static int sedf_init_vcpu(struct vcpu *v 3.21 inf->vcpu = v; 3.22 3.23 /* Allocate per-CPU context if this is the first domain to be added. */ 3.24 - if ( unlikely(schedule_data[v->processor].sched_priv == NULL) ) 3.25 + if ( unlikely(per_cpu(schedule_data, v->processor).sched_priv == NULL) ) 3.26 { 3.27 - schedule_data[v->processor].sched_priv = 3.28 + per_cpu(schedule_data, v->processor).sched_priv = 3.29 xmalloc(struct sedf_cpu_info); 3.30 - BUG_ON(schedule_data[v->processor].sched_priv == NULL); 3.31 + BUG_ON(per_cpu(schedule_data, v->processor).sched_priv == NULL); 3.32 memset(CPU_INFO(v->processor), 0, sizeof(*CPU_INFO(v->processor))); 3.33 INIT_LIST_HEAD(WAITQ(v->processor)); 3.34 INIT_LIST_HEAD(RUNQ(v->processor)); 3.35 @@ -847,7 +848,7 @@ static void sedf_sleep(struct vcpu *d) 3.36 3.37 EDOM_INFO(d)->status |= SEDF_ASLEEP; 3.38 3.39 - if ( schedule_data[d->processor].curr == d ) 3.40 + if ( per_cpu(schedule_data, d->processor).curr == d ) 3.41 { 3.42 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); 3.43 } 3.44 @@ -1167,9 +1168,9 @@ void sedf_wake(struct vcpu *d) 3.45 Save approximation: Always switch to scheduler!*/ 3.46 ASSERT(d->processor >= 0); 3.47 ASSERT(d->processor < NR_CPUS); 3.48 - ASSERT(schedule_data[d->processor].curr); 3.49 + ASSERT(per_cpu(schedule_data, d->processor).curr); 3.50 3.51 - if ( should_switch(schedule_data[d->processor].curr, d, now) ) 3.52 + if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) ) 3.53 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); 3.54 } 3.55
4.1 --- a/xen/common/schedule.c Tue Aug 08 12:04:46 2006 +0100 4.2 +++ b/xen/common/schedule.c Tue Aug 08 13:55:22 2006 +0100 4.3 @@ -46,7 +46,7 @@ static void vcpu_timer_fn(void *data); 4.4 static void poll_timer_fn(void *data); 4.5 4.6 /* This is global for now so that private implementations can reach it */ 4.7 -struct schedule_data schedule_data[NR_CPUS]; 4.8 +DEFINE_PER_CPU(struct schedule_data, schedule_data); 4.9 4.10 extern struct scheduler sched_bvt_def; 4.11 extern struct scheduler sched_sedf_def; 4.12 @@ -73,7 +73,7 @@ static inline void vcpu_runstate_change( 4.13 struct vcpu *v, int new_state, s_time_t new_entry_time) 4.14 { 4.15 ASSERT(v->runstate.state != new_state); 4.16 - ASSERT(spin_is_locked(&schedule_data[v->processor].schedule_lock)); 4.17 + ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock)); 4.18 4.19 v->runstate.time[v->runstate.state] += 4.20 new_entry_time - v->runstate.state_entry_time; 4.21 @@ -107,8 +107,8 @@ int sched_init_vcpu(struct vcpu *v) 4.22 4.23 if ( is_idle_vcpu(v) ) 4.24 { 4.25 - schedule_data[v->processor].curr = v; 4.26 - schedule_data[v->processor].idle = v; 4.27 + per_cpu(schedule_data, v->processor).curr = v; 4.28 + per_cpu(schedule_data, v->processor).idle = v; 4.29 set_bit(_VCPUF_running, &v->vcpu_flags); 4.30 } 4.31 4.32 @@ -500,19 +500,21 @@ long sched_adjdom(struct sched_adjdom_cm 4.33 */ 4.34 static void __enter_scheduler(void) 4.35 { 4.36 - struct vcpu *prev = current, *next = NULL; 4.37 - int cpu = smp_processor_id(); 4.38 - s_time_t now = NOW(); 4.39 - struct task_slice next_slice; 4.40 - s32 r_time; /* time for new dom to run */ 4.41 + struct vcpu *prev = current, *next = NULL; 4.42 + s_time_t now = NOW(); 4.43 + struct schedule_data *sd; 4.44 + struct task_slice next_slice; 4.45 + s32 r_time; /* time for new dom to run */ 4.46 4.47 ASSERT(!in_irq()); 4.48 4.49 perfc_incrc(sched_run); 4.50 4.51 - spin_lock_irq(&schedule_data[cpu].schedule_lock); 4.52 + sd = &this_cpu(schedule_data); 4.53 4.54 - stop_timer(&schedule_data[cpu].s_timer); 4.55 + spin_lock_irq(&sd->schedule_lock); 4.56 + 4.57 + stop_timer(&sd->s_timer); 4.58 4.59 /* get policy-specific decision on scheduling... */ 4.60 next_slice = ops.do_schedule(now); 4.61 @@ -520,13 +522,13 @@ static void __enter_scheduler(void) 4.62 r_time = next_slice.time; 4.63 next = next_slice.task; 4.64 4.65 - schedule_data[cpu].curr = next; 4.66 + sd->curr = next; 4.67 4.68 - set_timer(&schedule_data[cpu].s_timer, now + r_time); 4.69 + set_timer(&sd->s_timer, now + r_time); 4.70 4.71 if ( unlikely(prev == next) ) 4.72 { 4.73 - spin_unlock_irq(&schedule_data[cpu].schedule_lock); 4.74 + spin_unlock_irq(&sd->schedule_lock); 4.75 return continue_running(prev); 4.76 } 4.77 4.78 @@ -552,17 +554,17 @@ static void __enter_scheduler(void) 4.79 ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags)); 4.80 set_bit(_VCPUF_running, &next->vcpu_flags); 4.81 4.82 - spin_unlock_irq(&schedule_data[cpu].schedule_lock); 4.83 + spin_unlock_irq(&sd->schedule_lock); 4.84 4.85 perfc_incrc(sched_ctx); 4.86 4.87 - prev->sleep_tick = schedule_data[cpu].tick; 4.88 + prev->sleep_tick = sd->tick; 4.89 4.90 /* Ensure that the domain has an up-to-date time base. */ 4.91 if ( !is_idle_vcpu(next) ) 4.92 { 4.93 update_vcpu_system_time(next); 4.94 - if ( next->sleep_tick != schedule_data[cpu].tick ) 4.95 + if ( next->sleep_tick != sd->tick ) 4.96 send_timer_event(next); 4.97 } 4.98 4.99 @@ -594,7 +596,7 @@ static void t_timer_fn(void *unused) 4.100 struct vcpu *v = current; 4.101 unsigned int cpu = smp_processor_id(); 4.102 4.103 - schedule_data[cpu].tick++; 4.104 + per_cpu(schedule_data, cpu).tick++; 4.105 4.106 if ( !is_idle_vcpu(v) ) 4.107 { 4.108 @@ -633,8 +635,8 @@ void __init scheduler_init(void) 4.109 4.110 for ( i = 0; i < NR_CPUS; i++ ) 4.111 { 4.112 - spin_lock_init(&schedule_data[i].schedule_lock); 4.113 - init_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i); 4.114 + spin_lock_init(&per_cpu(schedule_data, i).schedule_lock); 4.115 + init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i); 4.116 init_timer(&t_timer[i], t_timer_fn, NULL, i); 4.117 } 4.118 4.119 @@ -676,10 +678,10 @@ void dump_runq(unsigned char key) 4.120 4.121 for_each_online_cpu ( i ) 4.122 { 4.123 - spin_lock(&schedule_data[i].schedule_lock); 4.124 + spin_lock(&per_cpu(schedule_data, i).schedule_lock); 4.125 printk("CPU[%02d] ", i); 4.126 - SCHED_OP(dump_cpu_state,i); 4.127 - spin_unlock(&schedule_data[i].schedule_lock); 4.128 + SCHED_OP(dump_cpu_state, i); 4.129 + spin_unlock(&per_cpu(schedule_data, i).schedule_lock); 4.130 } 4.131 4.132 local_irq_restore(flags);
5.1 --- a/xen/include/xen/sched-if.h Tue Aug 08 12:04:46 2006 +0100 5.2 +++ b/xen/include/xen/sched-if.h Tue Aug 08 13:55:22 2006 +0100 5.3 @@ -8,6 +8,8 @@ 5.4 #ifndef __XEN_SCHED_IF_H__ 5.5 #define __XEN_SCHED_IF_H__ 5.6 5.7 +#include <xen/percpu.h> 5.8 + 5.9 struct schedule_data { 5.10 spinlock_t schedule_lock; /* spinlock protecting curr */ 5.11 struct vcpu *curr; /* current task */ 5.12 @@ -17,7 +19,7 @@ struct schedule_data { 5.13 unsigned long tick; /* current periodic 'tick' */ 5.14 } __cacheline_aligned; 5.15 5.16 -extern struct schedule_data schedule_data[]; 5.17 +DECLARE_PER_CPU(struct schedule_data, schedule_data); 5.18 5.19 static inline void vcpu_schedule_lock(struct vcpu *v) 5.20 { 5.21 @@ -26,10 +28,10 @@ static inline void vcpu_schedule_lock(st 5.22 for ( ; ; ) 5.23 { 5.24 cpu = v->processor; 5.25 - spin_lock(&schedule_data[cpu].schedule_lock); 5.26 + spin_lock(&per_cpu(schedule_data, cpu).schedule_lock); 5.27 if ( likely(v->processor == cpu) ) 5.28 break; 5.29 - spin_unlock(&schedule_data[cpu].schedule_lock); 5.30 + spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock); 5.31 } 5.32 } 5.33 5.34 @@ -40,7 +42,7 @@ static inline void vcpu_schedule_lock(st 5.35 5.36 static inline void vcpu_schedule_unlock(struct vcpu *v) 5.37 { 5.38 - spin_unlock(&schedule_data[v->processor].schedule_lock); 5.39 + spin_unlock(&per_cpu(schedule_data, v->processor).schedule_lock); 5.40 } 5.41 5.42 #define vcpu_schedule_unlock_irq(v) \