ia64/xen-unstable

view xen/include/asm-x86/bitops.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 36e33da5146b
children 5ccf8bbf8628
line source
1 #ifndef _X86_BITOPS_H
2 #define _X86_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <xen/config.h>
10 #ifdef CONFIG_SMP
11 #define LOCK_PREFIX "lock ; "
12 #else
13 #define LOCK_PREFIX ""
14 #endif
16 /*
17 * We specify the memory operand as both input and output because the memory
18 * operand is both read from and written to. Since the operand is in fact a
19 * word array, we also specify "memory" in the clobbers list to indicate that
20 * words other than the one directly addressed by the memory operand may be
21 * modified. We don't use "+m" because the gcc manual says that it should be
22 * used only when the constraint allows the operand to reside in a register.
23 */
25 #define ADDR (*(volatile long *) addr)
26 #define CONST_ADDR (*(const volatile long *) addr)
28 /**
29 * set_bit - Atomically set a bit in memory
30 * @nr: the bit to set
31 * @addr: the address to start counting from
32 *
33 * This function is atomic and may not be reordered. See __set_bit()
34 * if you do not require the atomic guarantees.
35 * Note that @nr may be almost arbitrarily large; this function is not
36 * restricted to acting on a single-word quantity.
37 */
38 static __inline__ void set_bit(int nr, volatile void * addr)
39 {
40 __asm__ __volatile__( LOCK_PREFIX
41 "btsl %1,%0"
42 :"=m" (ADDR)
43 :"dIr" (nr), "m" (ADDR) : "memory");
44 }
46 /**
47 * __set_bit - Set a bit in memory
48 * @nr: the bit to set
49 * @addr: the address to start counting from
50 *
51 * Unlike set_bit(), this function is non-atomic and may be reordered.
52 * If it's called on the same region of memory simultaneously, the effect
53 * may be that only one operation succeeds.
54 */
55 static __inline__ void __set_bit(int nr, volatile void * addr)
56 {
57 __asm__(
58 "btsl %1,%0"
59 :"=m" (ADDR)
60 :"dIr" (nr), "m" (ADDR) : "memory");
61 }
63 /**
64 * clear_bit - Clears a bit in memory
65 * @nr: Bit to clear
66 * @addr: Address to start counting from
67 *
68 * clear_bit() is atomic and may not be reordered. However, it does
69 * not contain a memory barrier, so if it is used for locking purposes,
70 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
71 * in order to ensure changes are visible on other processors.
72 */
73 static __inline__ void clear_bit(int nr, volatile void * addr)
74 {
75 __asm__ __volatile__( LOCK_PREFIX
76 "btrl %1,%0"
77 :"=m" (ADDR)
78 :"dIr" (nr), "m" (ADDR) : "memory");
79 }
81 /**
82 * __clear_bit - Clears a bit in memory
83 * @nr: Bit to clear
84 * @addr: Address to start counting from
85 *
86 * Unlike clear_bit(), this function is non-atomic and may be reordered.
87 * If it's called on the same region of memory simultaneously, the effect
88 * may be that only one operation succeeds.
89 */
90 static __inline__ void __clear_bit(int nr, volatile void * addr)
91 {
92 __asm__(
93 "btrl %1,%0"
94 :"=m" (ADDR)
95 :"dIr" (nr), "m" (ADDR) : "memory");
96 }
98 #define smp_mb__before_clear_bit() barrier()
99 #define smp_mb__after_clear_bit() barrier()
101 /**
102 * __change_bit - Toggle a bit in memory
103 * @nr: the bit to set
104 * @addr: the address to start counting from
105 *
106 * Unlike change_bit(), this function is non-atomic and may be reordered.
107 * If it's called on the same region of memory simultaneously, the effect
108 * may be that only one operation succeeds.
109 */
110 static __inline__ void __change_bit(int nr, volatile void * addr)
111 {
112 __asm__ __volatile__(
113 "btcl %1,%0"
114 :"=m" (ADDR)
115 :"dIr" (nr), "m" (ADDR) : "memory");
116 }
118 /**
119 * change_bit - Toggle a bit in memory
120 * @nr: Bit to clear
121 * @addr: Address to start counting from
122 *
123 * change_bit() is atomic and may not be reordered.
124 * Note that @nr may be almost arbitrarily large; this function is not
125 * restricted to acting on a single-word quantity.
126 */
127 static __inline__ void change_bit(int nr, volatile void * addr)
128 {
129 __asm__ __volatile__( LOCK_PREFIX
130 "btcl %1,%0"
131 :"=m" (ADDR)
132 :"dIr" (nr), "m" (ADDR) : "memory");
133 }
135 /**
136 * test_and_set_bit - Set a bit and return its old value
137 * @nr: Bit to set
138 * @addr: Address to count from
139 *
140 * This operation is atomic and cannot be reordered.
141 * It also implies a memory barrier.
142 */
143 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
144 {
145 int oldbit;
147 __asm__ __volatile__( LOCK_PREFIX
148 "btsl %2,%1\n\tsbbl %0,%0"
149 :"=r" (oldbit),"=m" (ADDR)
150 :"dIr" (nr), "m" (ADDR) : "memory");
151 return oldbit;
152 }
154 /**
155 * __test_and_set_bit - Set a bit and return its old value
156 * @nr: Bit to set
157 * @addr: Address to count from
158 *
159 * This operation is non-atomic and can be reordered.
160 * If two examples of this operation race, one can appear to succeed
161 * but actually fail. You must protect multiple accesses with a lock.
162 */
163 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
164 {
165 int oldbit;
167 __asm__(
168 "btsl %2,%1\n\tsbbl %0,%0"
169 :"=r" (oldbit),"=m" (ADDR)
170 :"dIr" (nr), "m" (ADDR) : "memory");
171 return oldbit;
172 }
174 /**
175 * test_and_clear_bit - Clear a bit and return its old value
176 * @nr: Bit to set
177 * @addr: Address to count from
178 *
179 * This operation is atomic and cannot be reordered.
180 * It also implies a memory barrier.
181 */
182 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
183 {
184 int oldbit;
186 __asm__ __volatile__( LOCK_PREFIX
187 "btrl %2,%1\n\tsbbl %0,%0"
188 :"=r" (oldbit),"=m" (ADDR)
189 :"dIr" (nr), "m" (ADDR) : "memory");
190 return oldbit;
191 }
193 /**
194 * __test_and_clear_bit - Clear a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
197 *
198 * This operation is non-atomic and can be reordered.
199 * If two examples of this operation race, one can appear to succeed
200 * but actually fail. You must protect multiple accesses with a lock.
201 */
202 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
203 {
204 int oldbit;
206 __asm__(
207 "btrl %2,%1\n\tsbbl %0,%0"
208 :"=r" (oldbit),"=m" (ADDR)
209 :"dIr" (nr), "m" (ADDR) : "memory");
210 return oldbit;
211 }
213 /* WARNING: non atomic and it can be reordered! */
214 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
215 {
216 int oldbit;
218 __asm__ __volatile__(
219 "btcl %2,%1\n\tsbbl %0,%0"
220 :"=r" (oldbit),"=m" (ADDR)
221 :"dIr" (nr), "m" (ADDR) : "memory");
222 return oldbit;
223 }
225 /**
226 * test_and_change_bit - Change a bit and return its new value
227 * @nr: Bit to set
228 * @addr: Address to count from
229 *
230 * This operation is atomic and cannot be reordered.
231 * It also implies a memory barrier.
232 */
233 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
234 {
235 int oldbit;
237 __asm__ __volatile__( LOCK_PREFIX
238 "btcl %2,%1\n\tsbbl %0,%0"
239 :"=r" (oldbit),"=m" (ADDR)
240 :"dIr" (nr), "m" (ADDR) : "memory");
241 return oldbit;
242 }
245 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
246 {
247 return ((1U << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
248 }
250 static __inline__ int variable_test_bit(int nr, const volatile void * addr)
251 {
252 int oldbit;
254 __asm__ __volatile__(
255 "btl %2,%1\n\tsbbl %0,%0"
256 :"=r" (oldbit)
257 :"m" (CONST_ADDR),"dIr" (nr));
258 return oldbit;
259 }
261 #define test_bit(nr,addr) \
262 (__builtin_constant_p(nr) ? \
263 constant_test_bit((nr),(addr)) : \
264 variable_test_bit((nr),(addr)))
266 extern unsigned int __find_first_bit(
267 const unsigned long *addr, unsigned int size);
268 extern unsigned int __find_next_bit(
269 const unsigned long *addr, unsigned int size, unsigned int offset);
270 extern unsigned int __find_first_zero_bit(
271 const unsigned long *addr, unsigned int size);
272 extern unsigned int __find_next_zero_bit(
273 const unsigned long *addr, unsigned int size, unsigned int offset);
275 /* return index of first bit set in val or BITS_PER_LONG when no bit is set */
276 static inline unsigned int __scanbit(unsigned long val)
277 {
278 __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
279 return (unsigned int)val;
280 }
282 /**
283 * find_first_bit - find the first set bit in a memory region
284 * @addr: The address to start the search at
285 * @size: The maximum size to search
286 *
287 * Returns the bit-number of the first set bit, not the number of the byte
288 * containing a bit.
289 */
290 #define find_first_bit(addr,size) \
291 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
292 (__scanbit(*(const unsigned long *)addr)) : \
293 __find_first_bit(addr,size)))
295 /**
296 * find_next_bit - find the first set bit in a memory region
297 * @addr: The address to base the search on
298 * @offset: The bitnumber to start searching at
299 * @size: The maximum size to search
300 */
301 #define find_next_bit(addr,size,off) \
302 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
303 ((off) + (__scanbit((*(const unsigned long *)addr) >> (off)))) : \
304 __find_next_bit(addr,size,off)))
306 /**
307 * find_first_zero_bit - find the first zero bit in a memory region
308 * @addr: The address to start the search at
309 * @size: The maximum size to search
310 *
311 * Returns the bit-number of the first zero bit, not the number of the byte
312 * containing a bit.
313 */
314 #define find_first_zero_bit(addr,size) \
315 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
316 (__scanbit(~*(const unsigned long *)addr)) : \
317 __find_first_zero_bit(addr,size)))
319 /**
320 * find_next_zero_bit - find the first zero bit in a memory region
321 * @addr: The address to base the search on
322 * @offset: The bitnumber to start searching at
323 * @size: The maximum size to search
324 */
325 #define find_next_zero_bit(addr,size,off) \
326 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
327 ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off))))) : \
328 __find_next_zero_bit(addr,size,off)))
331 /**
332 * find_first_set_bit - find the first set bit in @word
333 * @word: the word to search
334 *
335 * Returns the bit-number of the first set bit. If no bits are set then the
336 * result is undefined.
337 */
338 static __inline__ unsigned int find_first_set_bit(unsigned long word)
339 {
340 __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) );
341 return (unsigned int)word;
342 }
344 /**
345 * ffz - find first zero in word.
346 * @word: The word to search
347 *
348 * Undefined if no zero exists, so code should check against ~0UL first.
349 */
350 static inline unsigned long ffz(unsigned long word)
351 {
352 __asm__("bsf %1,%0"
353 :"=r" (word)
354 :"r" (~word));
355 return word;
356 }
358 /**
359 * ffs - find first bit set
360 * @x: the word to search
361 *
362 * This is defined the same way as
363 * the libc and compiler builtin ffs routines, therefore
364 * differs in spirit from the above ffz (man ffs).
365 */
366 static inline int ffs(unsigned long x)
367 {
368 long r;
370 __asm__("bsf %1,%0\n\t"
371 "jnz 1f\n\t"
372 "mov $-1,%0\n"
373 "1:" : "=r" (r) : "rm" (x));
374 return (int)r+1;
375 }
377 /**
378 * fls - find last bit set
379 * @x: the word to search
380 *
381 * This is defined the same way as ffs.
382 */
383 static inline int fls(unsigned long x)
384 {
385 long r;
387 __asm__("bsr %1,%0\n\t"
388 "jnz 1f\n\t"
389 "mov $-1,%0\n"
390 "1:" : "=r" (r) : "rm" (x));
391 return (int)r+1;
392 }
394 /**
395 * hweightN - returns the hamming weight of a N-bit word
396 * @x: the word to weigh
397 *
398 * The Hamming Weight of a number is the total number of bits set in it.
399 */
400 #define hweight64(x) generic_hweight64(x)
401 #define hweight32(x) generic_hweight32(x)
402 #define hweight16(x) generic_hweight16(x)
403 #define hweight8(x) generic_hweight8(x)
405 #endif /* _X86_BITOPS_H */