ia64/xen-unstable

annotate xen/include/xen/cpumask.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 822ea2bf0c54
children
rev   line source
kaf24@5146 1 #ifndef __XEN_CPUMASK_H
kaf24@5146 2 #define __XEN_CPUMASK_H
kaf24@5146 3
kaf24@4804 4 /*
kaf24@5146 5 * Cpumasks provide a bitmap suitable for representing the
kaf24@5146 6 * set of CPU's in a system, one bit position per CPU number.
kaf24@5146 7 *
kaf24@5146 8 * See detailed comments in the file xen/bitmap.h describing the
kaf24@5146 9 * data type on which these cpumasks are based.
kaf24@5146 10 *
kaf24@8520 11 * For details of cpumask_scnprintf() and cpulist_scnprintf(),
kaf24@8520 12 * see bitmap_scnprintf() and bitmap_scnlistprintf() in lib/bitmap.c.
kaf24@5146 13 *
kaf24@5146 14 * The available cpumask operations are:
kaf24@5146 15 *
kaf24@5146 16 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
kaf24@5146 17 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
kaf24@5146 18 * void cpus_setall(mask) set all bits
kaf24@5146 19 * void cpus_clear(mask) clear all bits
kaf24@5146 20 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
kaf24@5146 21 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
kaf24@5146 22 *
kaf24@5146 23 * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
kaf24@5146 24 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
kaf24@5146 25 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
kaf24@5146 26 * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
kaf24@5146 27 * void cpus_complement(dst, src) dst = ~src
kaf24@5146 28 *
kaf24@5146 29 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
kaf24@5146 30 * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
kaf24@5146 31 * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
kaf24@5146 32 * int cpus_empty(mask) Is mask empty (no bits sets)?
kaf24@5146 33 * int cpus_full(mask) Is mask full (all bits sets)?
kaf24@5146 34 * int cpus_weight(mask) Hamming weigh - number of set bits
kaf24@5146 35 *
kaf24@5146 36 * void cpus_shift_right(dst, src, n) Shift right
kaf24@5146 37 * void cpus_shift_left(dst, src, n) Shift left
kaf24@5146 38 *
kaf24@8520 39 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
kaf24@8520 40 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
keir@19279 41 * int last_cpu(mask) Number highest set bit, or NR_CPUS
keir@19279 42 * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
kaf24@5146 43 *
kaf24@5146 44 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
kaf24@5146 45 * CPU_MASK_ALL Initializer - all bits set
kaf24@5146 46 * CPU_MASK_NONE Initializer - no bits set
kaf24@5146 47 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
kaf24@5146 48 *
kaf24@5146 49 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
kaf24@8520 50 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
kaf24@5146 51 *
kaf24@5146 52 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask
kaf24@5146 53 *
kaf24@5146 54 * int num_online_cpus() Number of online CPUs
kaf24@5146 55 * int num_possible_cpus() Number of all possible CPUs
kaf24@5146 56 * int num_present_cpus() Number of present CPUs
kaf24@5146 57 *
kaf24@5146 58 * int cpu_online(cpu) Is some cpu online?
kaf24@5146 59 * int cpu_possible(cpu) Is some cpu possible?
kaf24@5146 60 * int cpu_present(cpu) Is some cpu present (can schedule)?
kaf24@5146 61 *
kaf24@5197 62 * int any_online_cpu(mask) First online cpu in mask, or NR_CPUS
kaf24@5146 63 *
kaf24@5146 64 * for_each_cpu(cpu) for-loop cpu over cpu_possible_map
kaf24@5146 65 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
kaf24@5146 66 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
kaf24@5146 67 *
kaf24@5146 68 * Subtlety:
kaf24@5146 69 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
kaf24@5146 70 * to generate slightly worse code. Note for example the additional
kaf24@5146 71 * 40 lines of assembly code compiling the "for each possible cpu"
kaf24@5146 72 * loops buried in the disk_stat_read() macros calls when compiling
kaf24@5146 73 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
kaf24@5146 74 * one-line #define for cpu_isset(), instead of wrapping an inline
kaf24@5146 75 * inside a macro, the way we do the other calls.
kaf24@4804 76 */
kaf24@4804 77
kaf24@5146 78 #include <xen/config.h>
kaf24@4804 79 #include <xen/bitmap.h>
kaf24@5146 80 #include <xen/kernel.h>
kaf24@4804 81
kaf24@5146 82 typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
kaf24@4804 83
kaf24@5146 84 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
kaf24@5146 85 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
kaf24@5146 86 {
kaf24@5146 87 set_bit(cpu, dstp->bits);
kaf24@5146 88 }
kaf24@5146 89
kaf24@5146 90 #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
kaf24@5146 91 static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
kaf24@5146 92 {
kaf24@5146 93 clear_bit(cpu, dstp->bits);
kaf24@5146 94 }
kaf24@5146 95
kaf24@5146 96 #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
kaf24@5146 97 static inline void __cpus_setall(cpumask_t *dstp, int nbits)
kaf24@5146 98 {
kaf24@5146 99 bitmap_fill(dstp->bits, nbits);
kaf24@5146 100 }
kaf24@5146 101
kaf24@5146 102 #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
kaf24@5146 103 static inline void __cpus_clear(cpumask_t *dstp, int nbits)
kaf24@5146 104 {
kaf24@5146 105 bitmap_zero(dstp->bits, nbits);
kaf24@5146 106 }
kaf24@5146 107
kaf24@5146 108 /* No static inline type checking - see Subtlety (1) above. */
kaf24@5146 109 #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
kaf24@5146 110
kaf24@5146 111 #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
kaf24@5146 112 static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
kaf24@5146 113 {
kaf24@5146 114 return test_and_set_bit(cpu, addr->bits);
kaf24@5146 115 }
kaf24@5146 116
kaf24@9775 117 #define cpu_test_and_clear(cpu, cpumask) __cpu_test_and_clear((cpu), &(cpumask))
kaf24@9775 118 static inline int __cpu_test_and_clear(int cpu, cpumask_t *addr)
kaf24@9775 119 {
kaf24@9775 120 return test_and_clear_bit(cpu, addr->bits);
kaf24@9775 121 }
kaf24@9775 122
kaf24@5146 123 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
kaf24@5146 124 static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
kaf24@5146 125 const cpumask_t *src2p, int nbits)
kaf24@5146 126 {
kaf24@5146 127 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
kaf24@5146 128 }
kaf24@5146 129
kaf24@5146 130 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
kaf24@5146 131 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
kaf24@5146 132 const cpumask_t *src2p, int nbits)
kaf24@5146 133 {
kaf24@5146 134 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
kaf24@5146 135 }
kaf24@5146 136
kaf24@5146 137 #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
kaf24@5146 138 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
kaf24@5146 139 const cpumask_t *src2p, int nbits)
kaf24@5146 140 {
kaf24@5146 141 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
kaf24@5146 142 }
kaf24@5146 143
kaf24@5146 144 #define cpus_andnot(dst, src1, src2) \
kaf24@5146 145 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
kaf24@5146 146 static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
kaf24@5146 147 const cpumask_t *src2p, int nbits)
kaf24@5146 148 {
kaf24@5146 149 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
kaf24@5146 150 }
kaf24@5146 151
kaf24@5146 152 #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
kaf24@5146 153 static inline void __cpus_complement(cpumask_t *dstp,
kaf24@5146 154 const cpumask_t *srcp, int nbits)
kaf24@5146 155 {
kaf24@5146 156 bitmap_complement(dstp->bits, srcp->bits, nbits);
kaf24@5146 157 }
kaf24@5146 158
kaf24@5146 159 #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
kaf24@5146 160 static inline int __cpus_equal(const cpumask_t *src1p,
kaf24@5146 161 const cpumask_t *src2p, int nbits)
kaf24@5146 162 {
kaf24@5146 163 return bitmap_equal(src1p->bits, src2p->bits, nbits);
kaf24@5146 164 }
kaf24@5146 165
kaf24@5146 166 #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
kaf24@5146 167 static inline int __cpus_intersects(const cpumask_t *src1p,
kaf24@5146 168 const cpumask_t *src2p, int nbits)
kaf24@5146 169 {
kaf24@5146 170 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
kaf24@5146 171 }
kaf24@5146 172
kaf24@5146 173 #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
kaf24@5146 174 static inline int __cpus_subset(const cpumask_t *src1p,
kaf24@5146 175 const cpumask_t *src2p, int nbits)
kaf24@5146 176 {
kaf24@5146 177 return bitmap_subset(src1p->bits, src2p->bits, nbits);
kaf24@5146 178 }
kaf24@5146 179
kaf24@5146 180 #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
kaf24@5146 181 static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
kaf24@5146 182 {
kaf24@5146 183 return bitmap_empty(srcp->bits, nbits);
kaf24@5146 184 }
kaf24@5146 185
kaf24@5146 186 #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
kaf24@5146 187 static inline int __cpus_full(const cpumask_t *srcp, int nbits)
kaf24@5146 188 {
kaf24@5146 189 return bitmap_full(srcp->bits, nbits);
kaf24@5146 190 }
kaf24@5146 191
kaf24@5146 192 #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
kaf24@5146 193 static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
kaf24@5146 194 {
kaf24@5146 195 return bitmap_weight(srcp->bits, nbits);
kaf24@5146 196 }
kaf24@5146 197
kaf24@5146 198 #define cpus_shift_right(dst, src, n) \
kaf24@5146 199 __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
kaf24@5146 200 static inline void __cpus_shift_right(cpumask_t *dstp,
kaf24@5146 201 const cpumask_t *srcp, int n, int nbits)
kaf24@5146 202 {
kaf24@5146 203 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
kaf24@5146 204 }
kaf24@5146 205
kaf24@5146 206 #define cpus_shift_left(dst, src, n) \
kaf24@5146 207 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
kaf24@5146 208 static inline void __cpus_shift_left(cpumask_t *dstp,
kaf24@5146 209 const cpumask_t *srcp, int n, int nbits)
kaf24@5146 210 {
kaf24@5146 211 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
kaf24@5146 212 }
kaf24@5146 213
kaf24@5146 214 #define first_cpu(src) __first_cpu(&(src), NR_CPUS)
kaf24@5146 215 static inline int __first_cpu(const cpumask_t *srcp, int nbits)
kaf24@5146 216 {
kaf24@8520 217 return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
kaf24@5146 218 }
kaf24@5146 219
kaf24@5146 220 #define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
kaf24@5146 221 static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
kaf24@5146 222 {
kaf24@8520 223 return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
kaf24@5146 224 }
kaf24@5146 225
kfraser@15518 226 #define last_cpu(src) __last_cpu(&(src), NR_CPUS)
kfraser@15518 227 static inline int __last_cpu(const cpumask_t *srcp, int nbits)
kfraser@15518 228 {
keir@19279 229 int cpu, pcpu = nbits;
keir@19279 230 for (cpu = __first_cpu(srcp, nbits);
keir@19279 231 cpu < nbits;
keir@19279 232 cpu = __next_cpu(cpu, srcp, nbits))
kfraser@15518 233 pcpu = cpu;
kfraser@15518 234 return pcpu;
kfraser@15518 235 }
kfraser@15518 236
keir@19279 237 #define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS)
keir@19279 238 static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
keir@19279 239 {
keir@19279 240 int nxt = __next_cpu(n, srcp, nbits);
keir@19279 241 if (nxt == nbits)
keir@19279 242 nxt = __first_cpu(srcp, nbits);
keir@19279 243 return nxt;
keir@19279 244 }
keir@19279 245
keir@19651 246 /*
keir@19651 247 * Special-case data structure for "single bit set only" constant CPU masks.
keir@19651 248 *
keir@19651 249 * We pre-generate all the 64 (or 32) possible bit positions, with enough
keir@19651 250 * padding to the left and the right, and return the constant pointer
keir@19651 251 * appropriately offset.
keir@19651 252 */
keir@19651 253 extern const unsigned long
keir@19651 254 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
keir@19651 255
keir@19651 256 static inline const cpumask_t *cpumask_of(unsigned int cpu)
keir@19651 257 {
keir@19651 258 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
keir@19651 259 return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
keir@19651 260 }
keir@19651 261
keir@19651 262 #define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
kaf24@5146 263
kaf24@5146 264 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
kaf24@5146 265
kaf24@5146 266 #if NR_CPUS <= BITS_PER_LONG
kaf24@5146 267
kaf24@5146 268 #define CPU_MASK_ALL \
kfraser@11872 269 /*(cpumask_t)*/ { { \
kaf24@5146 270 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
kaf24@5146 271 } }
kaf24@5146 272
kaf24@5146 273 #else
kaf24@5146 274
kaf24@5146 275 #define CPU_MASK_ALL \
kfraser@11872 276 /*(cpumask_t)*/ { { \
kaf24@5146 277 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
kaf24@5146 278 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
kaf24@5146 279 } }
kaf24@5146 280
kaf24@4839 281 #endif
kaf24@4804 282
kaf24@5146 283 #define CPU_MASK_NONE \
kfraser@11872 284 /*(cpumask_t)*/ { { \
kaf24@5146 285 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
kaf24@5146 286 } }
kaf24@5146 287
kaf24@5146 288 #define CPU_MASK_CPU0 \
kfraser@11872 289 /*(cpumask_t)*/ { { \
kaf24@5146 290 [0] = 1UL \
kaf24@5146 291 } }
kaf24@5146 292
kaf24@5146 293 #define cpus_addr(src) ((src).bits)
kaf24@5146 294
kaf24@5146 295 #define cpumask_scnprintf(buf, len, src) \
kaf24@5146 296 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
kaf24@5146 297 static inline int __cpumask_scnprintf(char *buf, int len,
kaf24@5146 298 const cpumask_t *srcp, int nbits)
kaf24@4804 299 {
kaf24@5146 300 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
kaf24@4804 301 }
kaf24@4804 302
kaf24@8520 303 #define cpulist_scnprintf(buf, len, src) \
kaf24@8520 304 __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
kaf24@8520 305 static inline int __cpulist_scnprintf(char *buf, int len,
kaf24@8520 306 const cpumask_t *srcp, int nbits)
kaf24@5146 307 {
kaf24@8520 308 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
kaf24@5146 309 }
kaf24@4804 310
kaf24@5146 311 #if NR_CPUS > 1
kaf24@5146 312 #define for_each_cpu_mask(cpu, mask) \
kaf24@5146 313 for ((cpu) = first_cpu(mask); \
kaf24@5146 314 (cpu) < NR_CPUS; \
kaf24@5146 315 (cpu) = next_cpu((cpu), (mask)))
kaf24@5146 316 #else /* NR_CPUS == 1 */
kaf24@5146 317 #define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
kaf24@5146 318 #endif /* NR_CPUS */
kaf24@5146 319
kaf24@5146 320 /*
kaf24@5146 321 * The following particular system cpumasks and operations manage
kaf24@5146 322 * possible, present and online cpus. Each of them is a fixed size
kaf24@5146 323 * bitmap of size NR_CPUS.
kaf24@5146 324 *
kaf24@5146 325 * #ifdef CONFIG_HOTPLUG_CPU
kfraser@15589 326 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
kaf24@5146 327 * cpu_present_map - has bit 'cpu' set iff cpu is populated
kaf24@5146 328 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
kaf24@5146 329 * #else
kaf24@5146 330 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
kaf24@5146 331 * cpu_present_map - copy of cpu_possible_map
kaf24@5146 332 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
kaf24@5146 333 * #endif
kaf24@5146 334 *
kaf24@5146 335 * In either case, NR_CPUS is fixed at compile time, as the static
kaf24@5146 336 * size of these bitmaps. The cpu_possible_map is fixed at boot
kaf24@5146 337 * time, as the set of CPU id's that it is possible might ever
kaf24@5146 338 * be plugged in at anytime during the life of that system boot.
kaf24@5146 339 * The cpu_present_map is dynamic(*), representing which CPUs
kaf24@5146 340 * are currently plugged in. And cpu_online_map is the dynamic
kaf24@5146 341 * subset of cpu_present_map, indicating those CPUs available
kaf24@5146 342 * for scheduling.
kaf24@5146 343 *
kaf24@5146 344 * If HOTPLUG is enabled, then cpu_possible_map is forced to have
kaf24@5146 345 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
kaf24@5146 346 * ACPI reports present at boot.
kaf24@5146 347 *
kaf24@5146 348 * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
kaf24@5146 349 * depending on what ACPI reports as currently plugged in, otherwise
kaf24@5146 350 * cpu_present_map is just a copy of cpu_possible_map.
kaf24@5146 351 *
kaf24@5146 352 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
kaf24@5146 353 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
kaf24@5146 354 *
kaf24@5146 355 * Subtleties:
kaf24@5146 356 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
kaf24@5146 357 * assumption that their single CPU is online. The UP
kaf24@5146 358 * cpu_{online,possible,present}_maps are placebos. Changing them
kaf24@5146 359 * will have no useful affect on the following num_*_cpus()
kaf24@5146 360 * and cpu_*() macros in the UP case. This ugliness is a UP
kaf24@5146 361 * optimization - don't waste any instructions or memory references
kaf24@5146 362 * asking if you're online or how many CPUs there are if there is
kaf24@5146 363 * only one CPU.
kaf24@5146 364 * 2) Most SMP arch's #define some of these maps to be some
kaf24@5146 365 * other map specific to that arch. Therefore, the following
kaf24@5146 366 * must be #define macros, not inlines. To see why, examine
kaf24@5146 367 * the assembly code produced by the following. Note that
kaf24@5146 368 * set1() writes phys_x_map, but set2() writes x_map:
kaf24@5146 369 * int x_map, phys_x_map;
kaf24@5146 370 * #define set1(a) x_map = a
kaf24@5146 371 * inline void set2(int a) { x_map = a; }
kaf24@5146 372 * #define x_map phys_x_map
kaf24@5146 373 * main(){ set1(3); set2(5); }
kaf24@5146 374 */
kaf24@5146 375
kaf24@5146 376 extern cpumask_t cpu_possible_map;
kaf24@5146 377 extern cpumask_t cpu_online_map;
kaf24@5146 378 extern cpumask_t cpu_present_map;
kaf24@5146 379
kaf24@5146 380 #if NR_CPUS > 1
kaf24@5146 381 #define num_online_cpus() cpus_weight(cpu_online_map)
kaf24@5146 382 #define num_possible_cpus() cpus_weight(cpu_possible_map)
kaf24@5146 383 #define num_present_cpus() cpus_weight(cpu_present_map)
kaf24@5146 384 #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
kaf24@5146 385 #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
kaf24@5146 386 #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
kaf24@5146 387 #else
kaf24@5146 388 #define num_online_cpus() 1
kaf24@5146 389 #define num_possible_cpus() 1
kaf24@5146 390 #define num_present_cpus() 1
kaf24@5146 391 #define cpu_online(cpu) ((cpu) == 0)
kaf24@5146 392 #define cpu_possible(cpu) ((cpu) == 0)
kaf24@5146 393 #define cpu_present(cpu) ((cpu) == 0)
kaf24@5146 394 #endif
kaf24@5146 395
kaf24@5146 396 #define any_online_cpu(mask) \
kaf24@5146 397 ({ \
kaf24@5146 398 int cpu; \
kaf24@5146 399 for_each_cpu_mask(cpu, (mask)) \
kaf24@5146 400 if (cpu_online(cpu)) \
kaf24@5146 401 break; \
kaf24@8520 402 cpu; \
kaf24@5146 403 })
kaf24@5146 404
kaf24@5146 405 #define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
kaf24@5146 406 #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
kaf24@5146 407 #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
kaf24@5146 408
kfraser@11296 409 /* Copy to/from cpumap provided by control tools. */
kfraser@11296 410 struct xenctl_cpumap;
kfraser@11296 411 void cpumask_to_xenctl_cpumap(
kfraser@11296 412 struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
kfraser@11296 413 void xenctl_cpumap_to_cpumask(
kfraser@11296 414 cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
kfraser@11296 415
kaf24@5146 416 #endif /* __XEN_CPUMASK_H */