ia64/xen-unstable

view xen/include/xen/cpumask.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 822ea2bf0c54
children
line source
1 #ifndef __XEN_CPUMASK_H
2 #define __XEN_CPUMASK_H
4 /*
5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number.
7 *
8 * See detailed comments in the file xen/bitmap.h describing the
9 * data type on which these cpumasks are based.
10 *
11 * For details of cpumask_scnprintf() and cpulist_scnprintf(),
12 * see bitmap_scnprintf() and bitmap_scnlistprintf() in lib/bitmap.c.
13 *
14 * The available cpumask operations are:
15 *
16 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
17 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
18 * void cpus_setall(mask) set all bits
19 * void cpus_clear(mask) clear all bits
20 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
21 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
22 *
23 * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
24 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
25 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
26 * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
27 * void cpus_complement(dst, src) dst = ~src
28 *
29 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
30 * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
31 * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
32 * int cpus_empty(mask) Is mask empty (no bits sets)?
33 * int cpus_full(mask) Is mask full (all bits sets)?
34 * int cpus_weight(mask) Hamming weigh - number of set bits
35 *
36 * void cpus_shift_right(dst, src, n) Shift right
37 * void cpus_shift_left(dst, src, n) Shift left
38 *
39 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
40 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
41 * int last_cpu(mask) Number highest set bit, or NR_CPUS
42 * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
43 *
44 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
45 * CPU_MASK_ALL Initializer - all bits set
46 * CPU_MASK_NONE Initializer - no bits set
47 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
48 *
49 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
50 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
51 *
52 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask
53 *
54 * int num_online_cpus() Number of online CPUs
55 * int num_possible_cpus() Number of all possible CPUs
56 * int num_present_cpus() Number of present CPUs
57 *
58 * int cpu_online(cpu) Is some cpu online?
59 * int cpu_possible(cpu) Is some cpu possible?
60 * int cpu_present(cpu) Is some cpu present (can schedule)?
61 *
62 * int any_online_cpu(mask) First online cpu in mask, or NR_CPUS
63 *
64 * for_each_cpu(cpu) for-loop cpu over cpu_possible_map
65 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
66 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
67 *
68 * Subtlety:
69 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
70 * to generate slightly worse code. Note for example the additional
71 * 40 lines of assembly code compiling the "for each possible cpu"
72 * loops buried in the disk_stat_read() macros calls when compiling
73 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
74 * one-line #define for cpu_isset(), instead of wrapping an inline
75 * inside a macro, the way we do the other calls.
76 */
78 #include <xen/config.h>
79 #include <xen/bitmap.h>
80 #include <xen/kernel.h>
82 typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
84 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
85 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
86 {
87 set_bit(cpu, dstp->bits);
88 }
90 #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
91 static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
92 {
93 clear_bit(cpu, dstp->bits);
94 }
96 #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
97 static inline void __cpus_setall(cpumask_t *dstp, int nbits)
98 {
99 bitmap_fill(dstp->bits, nbits);
100 }
102 #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
103 static inline void __cpus_clear(cpumask_t *dstp, int nbits)
104 {
105 bitmap_zero(dstp->bits, nbits);
106 }
108 /* No static inline type checking - see Subtlety (1) above. */
109 #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
111 #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
112 static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
113 {
114 return test_and_set_bit(cpu, addr->bits);
115 }
117 #define cpu_test_and_clear(cpu, cpumask) __cpu_test_and_clear((cpu), &(cpumask))
118 static inline int __cpu_test_and_clear(int cpu, cpumask_t *addr)
119 {
120 return test_and_clear_bit(cpu, addr->bits);
121 }
123 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
124 static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
125 const cpumask_t *src2p, int nbits)
126 {
127 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
128 }
130 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
131 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
132 const cpumask_t *src2p, int nbits)
133 {
134 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
135 }
137 #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
138 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
139 const cpumask_t *src2p, int nbits)
140 {
141 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
142 }
144 #define cpus_andnot(dst, src1, src2) \
145 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
146 static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
147 const cpumask_t *src2p, int nbits)
148 {
149 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
150 }
152 #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
153 static inline void __cpus_complement(cpumask_t *dstp,
154 const cpumask_t *srcp, int nbits)
155 {
156 bitmap_complement(dstp->bits, srcp->bits, nbits);
157 }
159 #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
160 static inline int __cpus_equal(const cpumask_t *src1p,
161 const cpumask_t *src2p, int nbits)
162 {
163 return bitmap_equal(src1p->bits, src2p->bits, nbits);
164 }
166 #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
167 static inline int __cpus_intersects(const cpumask_t *src1p,
168 const cpumask_t *src2p, int nbits)
169 {
170 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
171 }
173 #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
174 static inline int __cpus_subset(const cpumask_t *src1p,
175 const cpumask_t *src2p, int nbits)
176 {
177 return bitmap_subset(src1p->bits, src2p->bits, nbits);
178 }
180 #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
181 static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
182 {
183 return bitmap_empty(srcp->bits, nbits);
184 }
186 #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
187 static inline int __cpus_full(const cpumask_t *srcp, int nbits)
188 {
189 return bitmap_full(srcp->bits, nbits);
190 }
192 #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
193 static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
194 {
195 return bitmap_weight(srcp->bits, nbits);
196 }
198 #define cpus_shift_right(dst, src, n) \
199 __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
200 static inline void __cpus_shift_right(cpumask_t *dstp,
201 const cpumask_t *srcp, int n, int nbits)
202 {
203 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
204 }
206 #define cpus_shift_left(dst, src, n) \
207 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
208 static inline void __cpus_shift_left(cpumask_t *dstp,
209 const cpumask_t *srcp, int n, int nbits)
210 {
211 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
212 }
214 #define first_cpu(src) __first_cpu(&(src), NR_CPUS)
215 static inline int __first_cpu(const cpumask_t *srcp, int nbits)
216 {
217 return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
218 }
220 #define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
221 static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
222 {
223 return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
224 }
226 #define last_cpu(src) __last_cpu(&(src), NR_CPUS)
227 static inline int __last_cpu(const cpumask_t *srcp, int nbits)
228 {
229 int cpu, pcpu = nbits;
230 for (cpu = __first_cpu(srcp, nbits);
231 cpu < nbits;
232 cpu = __next_cpu(cpu, srcp, nbits))
233 pcpu = cpu;
234 return pcpu;
235 }
237 #define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS)
238 static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
239 {
240 int nxt = __next_cpu(n, srcp, nbits);
241 if (nxt == nbits)
242 nxt = __first_cpu(srcp, nbits);
243 return nxt;
244 }
246 /*
247 * Special-case data structure for "single bit set only" constant CPU masks.
248 *
249 * We pre-generate all the 64 (or 32) possible bit positions, with enough
250 * padding to the left and the right, and return the constant pointer
251 * appropriately offset.
252 */
253 extern const unsigned long
254 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
256 static inline const cpumask_t *cpumask_of(unsigned int cpu)
257 {
258 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
259 return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
260 }
262 #define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
264 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
266 #if NR_CPUS <= BITS_PER_LONG
268 #define CPU_MASK_ALL \
269 /*(cpumask_t)*/ { { \
270 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
271 } }
273 #else
275 #define CPU_MASK_ALL \
276 /*(cpumask_t)*/ { { \
277 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
278 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
279 } }
281 #endif
283 #define CPU_MASK_NONE \
284 /*(cpumask_t)*/ { { \
285 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
286 } }
288 #define CPU_MASK_CPU0 \
289 /*(cpumask_t)*/ { { \
290 [0] = 1UL \
291 } }
293 #define cpus_addr(src) ((src).bits)
295 #define cpumask_scnprintf(buf, len, src) \
296 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
297 static inline int __cpumask_scnprintf(char *buf, int len,
298 const cpumask_t *srcp, int nbits)
299 {
300 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
301 }
303 #define cpulist_scnprintf(buf, len, src) \
304 __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
305 static inline int __cpulist_scnprintf(char *buf, int len,
306 const cpumask_t *srcp, int nbits)
307 {
308 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
309 }
311 #if NR_CPUS > 1
312 #define for_each_cpu_mask(cpu, mask) \
313 for ((cpu) = first_cpu(mask); \
314 (cpu) < NR_CPUS; \
315 (cpu) = next_cpu((cpu), (mask)))
316 #else /* NR_CPUS == 1 */
317 #define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
318 #endif /* NR_CPUS */
320 /*
321 * The following particular system cpumasks and operations manage
322 * possible, present and online cpus. Each of them is a fixed size
323 * bitmap of size NR_CPUS.
324 *
325 * #ifdef CONFIG_HOTPLUG_CPU
326 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
327 * cpu_present_map - has bit 'cpu' set iff cpu is populated
328 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
329 * #else
330 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
331 * cpu_present_map - copy of cpu_possible_map
332 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
333 * #endif
334 *
335 * In either case, NR_CPUS is fixed at compile time, as the static
336 * size of these bitmaps. The cpu_possible_map is fixed at boot
337 * time, as the set of CPU id's that it is possible might ever
338 * be plugged in at anytime during the life of that system boot.
339 * The cpu_present_map is dynamic(*), representing which CPUs
340 * are currently plugged in. And cpu_online_map is the dynamic
341 * subset of cpu_present_map, indicating those CPUs available
342 * for scheduling.
343 *
344 * If HOTPLUG is enabled, then cpu_possible_map is forced to have
345 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
346 * ACPI reports present at boot.
347 *
348 * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
349 * depending on what ACPI reports as currently plugged in, otherwise
350 * cpu_present_map is just a copy of cpu_possible_map.
351 *
352 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
353 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
354 *
355 * Subtleties:
356 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
357 * assumption that their single CPU is online. The UP
358 * cpu_{online,possible,present}_maps are placebos. Changing them
359 * will have no useful affect on the following num_*_cpus()
360 * and cpu_*() macros in the UP case. This ugliness is a UP
361 * optimization - don't waste any instructions or memory references
362 * asking if you're online or how many CPUs there are if there is
363 * only one CPU.
364 * 2) Most SMP arch's #define some of these maps to be some
365 * other map specific to that arch. Therefore, the following
366 * must be #define macros, not inlines. To see why, examine
367 * the assembly code produced by the following. Note that
368 * set1() writes phys_x_map, but set2() writes x_map:
369 * int x_map, phys_x_map;
370 * #define set1(a) x_map = a
371 * inline void set2(int a) { x_map = a; }
372 * #define x_map phys_x_map
373 * main(){ set1(3); set2(5); }
374 */
376 extern cpumask_t cpu_possible_map;
377 extern cpumask_t cpu_online_map;
378 extern cpumask_t cpu_present_map;
380 #if NR_CPUS > 1
381 #define num_online_cpus() cpus_weight(cpu_online_map)
382 #define num_possible_cpus() cpus_weight(cpu_possible_map)
383 #define num_present_cpus() cpus_weight(cpu_present_map)
384 #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
385 #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
386 #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
387 #else
388 #define num_online_cpus() 1
389 #define num_possible_cpus() 1
390 #define num_present_cpus() 1
391 #define cpu_online(cpu) ((cpu) == 0)
392 #define cpu_possible(cpu) ((cpu) == 0)
393 #define cpu_present(cpu) ((cpu) == 0)
394 #endif
396 #define any_online_cpu(mask) \
397 ({ \
398 int cpu; \
399 for_each_cpu_mask(cpu, (mask)) \
400 if (cpu_online(cpu)) \
401 break; \
402 cpu; \
403 })
405 #define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
406 #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
407 #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
409 /* Copy to/from cpumap provided by control tools. */
410 struct xenctl_cpumap;
411 void cpumask_to_xenctl_cpumap(
412 struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
413 void xenctl_cpumap_to_cpumask(
414 cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
416 #endif /* __XEN_CPUMASK_H */