ia64/xen-unstable
changeset 19279:a44751edcb76
Fix cpu selection at the time vCPU allocation
After cpu_[online/offline], set bits in cpu_online_map could be not
continuous. Use cycle_cpu() to pick the next one.
Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
After cpu_[online/offline], set bits in cpu_online_map could be not
continuous. Use cycle_cpu() to pick the next one.
Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Mar 06 18:54:09 2009 +0000 (2009-03-06) |
parents | 3fd8f9b34941 |
children | 728d8dee880b |
files | xen/common/domctl.c xen/common/sched_credit.c xen/include/xen/cpumask.h |
line diff
1.1 --- a/xen/common/domctl.c Fri Mar 06 14:28:27 2009 +0000 1.2 +++ b/xen/common/domctl.c Fri Mar 06 18:54:09 2009 +0000 1.3 @@ -433,7 +433,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 1.4 1.5 cpu = (i == 0) ? 1.6 default_vcpu0_location() : 1.7 - (d->vcpu[i-1]->processor + 1) % num_online_cpus(); 1.8 + cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map); 1.9 1.10 if ( alloc_vcpu(d, i, cpu) == NULL ) 1.11 goto maxvcpu_out;
2.1 --- a/xen/common/sched_credit.c Fri Mar 06 14:28:27 2009 +0000 2.2 +++ b/xen/common/sched_credit.c Fri Mar 06 18:54:09 2009 +0000 2.3 @@ -250,15 +250,6 @@ static struct csched_private csched_priv 2.4 static void csched_tick(void *_cpu); 2.5 2.6 static inline int 2.7 -__cycle_cpu(int cpu, const cpumask_t *mask) 2.8 -{ 2.9 - int nxt = next_cpu(cpu, *mask); 2.10 - if (nxt == NR_CPUS) 2.11 - nxt = first_cpu(*mask); 2.12 - return nxt; 2.13 -} 2.14 - 2.15 -static inline int 2.16 __vcpu_on_runq(struct csched_vcpu *svc) 2.17 { 2.18 return !list_empty(&svc->runq_elem); 2.19 @@ -428,7 +419,7 @@ csched_cpu_pick(struct vcpu *vc) 2.20 cpus_and(cpus, cpu_online_map, vc->cpu_affinity); 2.21 cpu = cpu_isset(vc->processor, cpus) 2.22 ? vc->processor 2.23 - : __cycle_cpu(vc->processor, &cpus); 2.24 + : cycle_cpu(vc->processor, cpus); 2.25 ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) ); 2.26 2.27 /* 2.28 @@ -454,7 +445,7 @@ csched_cpu_pick(struct vcpu *vc) 2.29 cpumask_t nxt_idlers; 2.30 int nxt; 2.31 2.32 - nxt = __cycle_cpu(cpu, &cpus); 2.33 + nxt = cycle_cpu(cpu, cpus); 2.34 2.35 if ( cpu_isset(cpu, cpu_core_map[nxt]) ) 2.36 { 2.37 @@ -1128,7 +1119,7 @@ csched_load_balance(int cpu, struct csch 2.38 2.39 while ( !cpus_empty(workers) ) 2.40 { 2.41 - peer_cpu = __cycle_cpu(peer_cpu, &workers); 2.42 + peer_cpu = cycle_cpu(peer_cpu, workers); 2.43 cpu_clear(peer_cpu, workers); 2.44 2.45 /*
3.1 --- a/xen/include/xen/cpumask.h Fri Mar 06 14:28:27 2009 +0000 3.2 +++ b/xen/include/xen/cpumask.h Fri Mar 06 18:54:09 2009 +0000 3.3 @@ -38,6 +38,8 @@ 3.4 * 3.5 * int first_cpu(mask) Number lowest set bit, or NR_CPUS 3.6 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS 3.7 + * int last_cpu(mask) Number highest set bit, or NR_CPUS 3.8 + * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS 3.9 * 3.10 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set 3.11 * CPU_MASK_ALL Initializer - all bits set 3.12 @@ -225,12 +227,23 @@ static inline int __next_cpu(int n, cons 3.13 #define last_cpu(src) __last_cpu(&(src), NR_CPUS) 3.14 static inline int __last_cpu(const cpumask_t *srcp, int nbits) 3.15 { 3.16 - int cpu, pcpu = NR_CPUS; 3.17 - for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp)) 3.18 + int cpu, pcpu = nbits; 3.19 + for (cpu = __first_cpu(srcp, nbits); 3.20 + cpu < nbits; 3.21 + cpu = __next_cpu(cpu, srcp, nbits)) 3.22 pcpu = cpu; 3.23 return pcpu; 3.24 } 3.25 3.26 +#define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS) 3.27 +static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits) 3.28 +{ 3.29 + int nxt = __next_cpu(n, srcp, nbits); 3.30 + if (nxt == nbits) 3.31 + nxt = __first_cpu(srcp, nbits); 3.32 + return nxt; 3.33 +} 3.34 + 3.35 #define cpumask_of_cpu(cpu) \ 3.36 ({ \ 3.37 typeof(_unused_cpumask_arg_) m; \