ia64/xen-unstable

view xen/include/asm-x86/bitops.h @ 18440:8d982c7a0d30

x86: smp_mb__{before,after}_clear_bit() are no-ops.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Sep 04 14:37:56 2008 +0100 (2008-09-04)
parents fec632d30571
children
line source
1 #ifndef _X86_BITOPS_H
2 #define _X86_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <xen/config.h>
10 #ifdef CONFIG_SMP
11 #define LOCK_PREFIX "lock ; "
12 #else
13 #define LOCK_PREFIX ""
14 #endif
16 /*
17 * We specify the memory operand as both input and output because the memory
18 * operand is both read from and written to. Since the operand is in fact a
19 * word array, we also specify "memory" in the clobbers list to indicate that
20 * words other than the one directly addressed by the memory operand may be
21 * modified. We don't use "+m" because the gcc manual says that it should be
22 * used only when the constraint allows the operand to reside in a register.
23 */
25 #define ADDR (*(volatile long *) addr)
26 #define CONST_ADDR (*(const volatile long *) addr)
28 extern void __bitop_bad_size(void);
29 #define bitop_bad_size(addr) (sizeof(*(addr)) < 4)
31 /**
32 * set_bit - Atomically set a bit in memory
33 * @nr: the bit to set
34 * @addr: the address to start counting from
35 *
36 * This function is atomic and may not be reordered. See __set_bit()
37 * if you do not require the atomic guarantees.
38 * Note that @nr may be almost arbitrarily large; this function is not
39 * restricted to acting on a single-word quantity.
40 */
41 static inline void set_bit(int nr, volatile void *addr)
42 {
43 asm volatile (
44 LOCK_PREFIX
45 "btsl %1,%0"
46 : "=m" (ADDR)
47 : "Ir" (nr), "m" (ADDR) : "memory");
48 }
49 #define set_bit(nr, addr) ({ \
50 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
51 set_bit(nr, addr); \
52 })
54 /**
55 * __set_bit - Set a bit in memory
56 * @nr: the bit to set
57 * @addr: the address to start counting from
58 *
59 * Unlike set_bit(), this function is non-atomic and may be reordered.
60 * If it's called on the same region of memory simultaneously, the effect
61 * may be that only one operation succeeds.
62 */
63 static inline void __set_bit(int nr, volatile void *addr)
64 {
65 asm volatile (
66 "btsl %1,%0"
67 : "=m" (ADDR)
68 : "Ir" (nr), "m" (ADDR) : "memory");
69 }
70 #define __set_bit(nr, addr) ({ \
71 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
72 __set_bit(nr, addr); \
73 })
75 /**
76 * clear_bit - Clears a bit in memory
77 * @nr: Bit to clear
78 * @addr: Address to start counting from
79 *
80 * clear_bit() is atomic and may not be reordered. However, it does
81 * not contain a memory barrier, so if it is used for locking purposes,
82 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
83 * in order to ensure changes are visible on other processors.
84 */
85 static inline void clear_bit(int nr, volatile void *addr)
86 {
87 asm volatile (
88 LOCK_PREFIX
89 "btrl %1,%0"
90 : "=m" (ADDR)
91 : "Ir" (nr), "m" (ADDR) : "memory");
92 }
93 #define clear_bit(nr, addr) ({ \
94 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
95 clear_bit(nr, addr); \
96 })
98 /**
99 * __clear_bit - Clears a bit in memory
100 * @nr: Bit to clear
101 * @addr: Address to start counting from
102 *
103 * Unlike clear_bit(), this function is non-atomic and may be reordered.
104 * If it's called on the same region of memory simultaneously, the effect
105 * may be that only one operation succeeds.
106 */
107 static inline void __clear_bit(int nr, volatile void *addr)
108 {
109 asm volatile (
110 "btrl %1,%0"
111 : "=m" (ADDR)
112 : "Ir" (nr), "m" (ADDR) : "memory");
113 }
114 #define __clear_bit(nr, addr) ({ \
115 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
116 __clear_bit(nr, addr); \
117 })
119 #define smp_mb__before_clear_bit() ((void)0)
120 #define smp_mb__after_clear_bit() ((void)0)
122 /**
123 * __change_bit - Toggle a bit in memory
124 * @nr: the bit to set
125 * @addr: the address to start counting from
126 *
127 * Unlike change_bit(), this function is non-atomic and may be reordered.
128 * If it's called on the same region of memory simultaneously, the effect
129 * may be that only one operation succeeds.
130 */
131 static inline void __change_bit(int nr, volatile void *addr)
132 {
133 asm volatile (
134 "btcl %1,%0"
135 : "=m" (ADDR)
136 : "Ir" (nr), "m" (ADDR) : "memory");
137 }
138 #define __change_bit(nr, addr) ({ \
139 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
140 __change_bit(nr, addr); \
141 })
143 /**
144 * change_bit - Toggle a bit in memory
145 * @nr: Bit to clear
146 * @addr: Address to start counting from
147 *
148 * change_bit() is atomic and may not be reordered.
149 * Note that @nr may be almost arbitrarily large; this function is not
150 * restricted to acting on a single-word quantity.
151 */
152 static inline void change_bit(int nr, volatile void *addr)
153 {
154 asm volatile (
155 LOCK_PREFIX
156 "btcl %1,%0"
157 : "=m" (ADDR)
158 : "Ir" (nr), "m" (ADDR) : "memory");
159 }
160 #define change_bit(nr, addr) ({ \
161 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
162 change_bit(nr, addr); \
163 })
165 /**
166 * test_and_set_bit - Set a bit and return its old value
167 * @nr: Bit to set
168 * @addr: Address to count from
169 *
170 * This operation is atomic and cannot be reordered.
171 * It also implies a memory barrier.
172 */
173 static inline int test_and_set_bit(int nr, volatile void *addr)
174 {
175 int oldbit;
177 asm volatile (
178 LOCK_PREFIX
179 "btsl %2,%1\n\tsbbl %0,%0"
180 : "=r" (oldbit), "=m" (ADDR)
181 : "Ir" (nr), "m" (ADDR) : "memory");
182 return oldbit;
183 }
184 #define test_and_set_bit(nr, addr) ({ \
185 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
186 test_and_set_bit(nr, addr); \
187 })
189 /**
190 * __test_and_set_bit - Set a bit and return its old value
191 * @nr: Bit to set
192 * @addr: Address to count from
193 *
194 * This operation is non-atomic and can be reordered.
195 * If two examples of this operation race, one can appear to succeed
196 * but actually fail. You must protect multiple accesses with a lock.
197 */
198 static inline int __test_and_set_bit(int nr, volatile void *addr)
199 {
200 int oldbit;
202 asm volatile (
203 "btsl %2,%1\n\tsbbl %0,%0"
204 : "=r" (oldbit), "=m" (ADDR)
205 : "Ir" (nr), "m" (ADDR) : "memory");
206 return oldbit;
207 }
208 #define __test_and_set_bit(nr, addr) ({ \
209 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
210 __test_and_set_bit(nr, addr); \
211 })
213 /**
214 * test_and_clear_bit - Clear a bit and return its old value
215 * @nr: Bit to set
216 * @addr: Address to count from
217 *
218 * This operation is atomic and cannot be reordered.
219 * It also implies a memory barrier.
220 */
221 static inline int test_and_clear_bit(int nr, volatile void *addr)
222 {
223 int oldbit;
225 asm volatile (
226 LOCK_PREFIX
227 "btrl %2,%1\n\tsbbl %0,%0"
228 : "=r" (oldbit), "=m" (ADDR)
229 : "Ir" (nr), "m" (ADDR) : "memory");
230 return oldbit;
231 }
232 #define test_and_clear_bit(nr, addr) ({ \
233 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
234 test_and_clear_bit(nr, addr); \
235 })
237 /**
238 * __test_and_clear_bit - Clear a bit and return its old value
239 * @nr: Bit to set
240 * @addr: Address to count from
241 *
242 * This operation is non-atomic and can be reordered.
243 * If two examples of this operation race, one can appear to succeed
244 * but actually fail. You must protect multiple accesses with a lock.
245 */
246 static inline int __test_and_clear_bit(int nr, volatile void *addr)
247 {
248 int oldbit;
250 asm volatile (
251 "btrl %2,%1\n\tsbbl %0,%0"
252 : "=r" (oldbit), "=m" (ADDR)
253 : "Ir" (nr), "m" (ADDR) : "memory");
254 return oldbit;
255 }
256 #define __test_and_clear_bit(nr, addr) ({ \
257 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
258 __test_and_clear_bit(nr, addr); \
259 })
261 /* WARNING: non atomic and it can be reordered! */
262 static inline int __test_and_change_bit(int nr, volatile void *addr)
263 {
264 int oldbit;
266 asm volatile (
267 "btcl %2,%1\n\tsbbl %0,%0"
268 : "=r" (oldbit), "=m" (ADDR)
269 : "Ir" (nr), "m" (ADDR) : "memory");
270 return oldbit;
271 }
272 #define __test_and_change_bit(nr, addr) ({ \
273 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
274 __test_and_change_bit(nr, addr); \
275 })
277 /**
278 * test_and_change_bit - Change a bit and return its new value
279 * @nr: Bit to set
280 * @addr: Address to count from
281 *
282 * This operation is atomic and cannot be reordered.
283 * It also implies a memory barrier.
284 */
285 static inline int test_and_change_bit(int nr, volatile void *addr)
286 {
287 int oldbit;
289 asm volatile (
290 LOCK_PREFIX
291 "btcl %2,%1\n\tsbbl %0,%0"
292 : "=r" (oldbit), "=m" (ADDR)
293 : "Ir" (nr), "m" (ADDR) : "memory");
294 return oldbit;
295 }
296 #define test_and_change_bit(nr, addr) ({ \
297 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
298 test_and_change_bit(nr, addr); \
299 })
301 static inline int constant_test_bit(int nr, const volatile void *addr)
302 {
303 return ((1U << (nr & 31)) &
304 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
305 }
307 static inline int variable_test_bit(int nr, const volatile void *addr)
308 {
309 int oldbit;
311 asm volatile (
312 "btl %2,%1\n\tsbbl %0,%0"
313 : "=r" (oldbit)
314 : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
315 return oldbit;
316 }
318 #define test_bit(nr, addr) ({ \
319 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
320 (__builtin_constant_p(nr) ? \
321 constant_test_bit((nr),(addr)) : \
322 variable_test_bit((nr),(addr))); \
323 })
325 extern unsigned int __find_first_bit(
326 const unsigned long *addr, unsigned int size);
327 extern unsigned int __find_next_bit(
328 const unsigned long *addr, unsigned int size, unsigned int offset);
329 extern unsigned int __find_first_zero_bit(
330 const unsigned long *addr, unsigned int size);
331 extern unsigned int __find_next_zero_bit(
332 const unsigned long *addr, unsigned int size, unsigned int offset);
334 static inline unsigned int __scanbit(unsigned long val, unsigned long max)
335 {
336 asm ( "bsf %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max) );
337 return (unsigned int)val;
338 }
340 /**
341 * find_first_bit - find the first set bit in a memory region
342 * @addr: The address to start the search at
343 * @size: The maximum size to search
344 *
345 * Returns the bit-number of the first set bit, not the number of the byte
346 * containing a bit.
347 */
348 #define find_first_bit(addr,size) \
349 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
350 (__scanbit(*(const unsigned long *)addr, size)) : \
351 __find_first_bit(addr,size)))
353 /**
354 * find_next_bit - find the first set bit in a memory region
355 * @addr: The address to base the search on
356 * @offset: The bitnumber to start searching at
357 * @size: The maximum size to search
358 */
359 #define find_next_bit(addr,size,off) \
360 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
361 ((off) + (__scanbit((*(const unsigned long *)addr) >> (off), size))) : \
362 __find_next_bit(addr,size,off)))
364 /**
365 * find_first_zero_bit - find the first zero bit in a memory region
366 * @addr: The address to start the search at
367 * @size: The maximum size to search
368 *
369 * Returns the bit-number of the first zero bit, not the number of the byte
370 * containing a bit.
371 */
372 #define find_first_zero_bit(addr,size) \
373 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
374 (__scanbit(~*(const unsigned long *)addr, size)) : \
375 __find_first_zero_bit(addr,size)))
377 /**
378 * find_next_zero_bit - find the first zero bit in a memory region
379 * @addr: The address to base the search on
380 * @offset: The bitnumber to start searching at
381 * @size: The maximum size to search
382 */
383 #define find_next_zero_bit(addr,size,off) \
384 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
385 ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off)), size))) : \
386 __find_next_zero_bit(addr,size,off)))
389 /**
390 * find_first_set_bit - find the first set bit in @word
391 * @word: the word to search
392 *
393 * Returns the bit-number of the first set bit. The input must *not* be zero.
394 */
395 static inline unsigned int find_first_set_bit(unsigned long word)
396 {
397 asm ( "bsf %1,%0" : "=r" (word) : "r" (word) );
398 return (unsigned int)word;
399 }
401 /**
402 * ffs - find first bit set
403 * @x: the word to search
404 *
405 * This is defined the same way as the libc and compiler builtin ffs routines.
406 */
407 static inline int ffs(unsigned long x)
408 {
409 long r;
411 asm ( "bsf %1,%0\n\t"
412 "jnz 1f\n\t"
413 "mov $-1,%0\n"
414 "1:" : "=r" (r) : "rm" (x));
415 return (int)r+1;
416 }
418 /**
419 * fls - find last bit set
420 * @x: the word to search
421 *
422 * This is defined the same way as ffs.
423 */
424 static inline int fls(unsigned long x)
425 {
426 long r;
428 asm ( "bsr %1,%0\n\t"
429 "jnz 1f\n\t"
430 "mov $-1,%0\n"
431 "1:" : "=r" (r) : "rm" (x));
432 return (int)r+1;
433 }
435 /**
436 * hweightN - returns the hamming weight of a N-bit word
437 * @x: the word to weigh
438 *
439 * The Hamming Weight of a number is the total number of bits set in it.
440 */
441 #define hweight64(x) generic_hweight64(x)
442 #define hweight32(x) generic_hweight32(x)
443 #define hweight16(x) generic_hweight16(x)
444 #define hweight8(x) generic_hweight8(x)
446 #endif /* _X86_BITOPS_H */