ia64/xen-unstable

view extras/mini-os/include/os.h @ 10843:4f6d858ea570

[PCI] Per-device permissive flag (replaces global permissive flag).
Signed-off-by: Chris Bookholt <hap10@tycho.ncsc.mil>
author kfraser@localhost.localdomain
date Fri Jul 28 12:56:10 2006 +0100 (2006-07-28)
parents bbea54da02b5
children
line source
1 /******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 */
7 #ifndef _OS_H_
8 #define _OS_H_
10 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
11 #define __builtin_expect(x, expected_value) (x)
12 #endif
13 #define unlikely(x) __builtin_expect((x),0)
15 #define smp_processor_id() 0
18 #ifndef __ASSEMBLY__
19 #include <types.h>
20 #include <hypervisor.h>
22 extern void do_exit(void);
23 #define BUG do_exit
25 #endif
26 #include <xen/xen.h>
29 #define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0))
31 #define __KERNEL_CS FLAT_KERNEL_CS
32 #define __KERNEL_DS FLAT_KERNEL_DS
33 #define __KERNEL_SS FLAT_KERNEL_SS
35 #define TRAP_divide_error 0
36 #define TRAP_debug 1
37 #define TRAP_nmi 2
38 #define TRAP_int3 3
39 #define TRAP_overflow 4
40 #define TRAP_bounds 5
41 #define TRAP_invalid_op 6
42 #define TRAP_no_device 7
43 #define TRAP_double_fault 8
44 #define TRAP_copro_seg 9
45 #define TRAP_invalid_tss 10
46 #define TRAP_no_segment 11
47 #define TRAP_stack_error 12
48 #define TRAP_gp_fault 13
49 #define TRAP_page_fault 14
50 #define TRAP_spurious_int 15
51 #define TRAP_copro_error 16
52 #define TRAP_alignment_check 17
53 #define TRAP_machine_check 18
54 #define TRAP_simd_error 19
55 #define TRAP_deferred_nmi 31
57 /* Everything below this point is not included by assembler (.S) files. */
58 #ifndef __ASSEMBLY__
60 extern shared_info_t *HYPERVISOR_shared_info;
62 void trap_init(void);
66 /*
67 * The use of 'barrier' in the following reflects their use as local-lock
68 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
69 * critical operations are executed. All critical operations must complete
70 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
71 * includes these barriers, for example.
72 */
74 #define __cli() \
75 do { \
76 vcpu_info_t *_vcpu; \
77 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
78 _vcpu->evtchn_upcall_mask = 1; \
79 barrier(); \
80 } while (0)
82 #define __sti() \
83 do { \
84 vcpu_info_t *_vcpu; \
85 barrier(); \
86 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
87 _vcpu->evtchn_upcall_mask = 0; \
88 barrier(); /* unmask then check (avoid races) */ \
89 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
90 force_evtchn_callback(); \
91 } while (0)
93 #define __save_flags(x) \
94 do { \
95 vcpu_info_t *_vcpu; \
96 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
97 (x) = _vcpu->evtchn_upcall_mask; \
98 } while (0)
100 #define __restore_flags(x) \
101 do { \
102 vcpu_info_t *_vcpu; \
103 barrier(); \
104 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
105 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
106 barrier(); /* unmask then check (avoid races) */ \
107 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
108 force_evtchn_callback(); \
109 }\
110 } while (0)
112 #define safe_halt() ((void)0)
114 #define __save_and_cli(x) \
115 do { \
116 vcpu_info_t *_vcpu; \
117 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
118 (x) = _vcpu->evtchn_upcall_mask; \
119 _vcpu->evtchn_upcall_mask = 1; \
120 barrier(); \
121 } while (0)
123 #define local_irq_save(x) __save_and_cli(x)
124 #define local_irq_restore(x) __restore_flags(x)
125 #define local_save_flags(x) __save_flags(x)
126 #define local_irq_disable() __cli()
127 #define local_irq_enable() __sti()
129 #define irqs_disabled() \
130 HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
132 /* This is a barrier for the compiler only, NOT the processor! */
133 #define barrier() __asm__ __volatile__("": : :"memory")
135 #if defined(__i386__)
136 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
137 #define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
138 #define wmb() __asm__ __volatile__ ("": : :"memory")
139 #elif defined(__x86_64__)
140 #define mb() __asm__ __volatile__ ("mfence":::"memory")
141 #define rmb() __asm__ __volatile__ ("lfence":::"memory")
142 #define wmb() __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */
143 #endif
146 #define LOCK_PREFIX ""
147 #define LOCK ""
148 #define ADDR (*(volatile long *) addr)
149 /*
150 * Make sure gcc doesn't try to be clever and move things around
151 * on us. We need to use _exactly_ the address the user gave us,
152 * not some alias that contains the same information.
153 */
154 typedef struct { volatile int counter; } atomic_t;
157 /************************** i386 *******************************/
158 #if defined (__i386__)
160 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
161 struct __xchg_dummy { unsigned long a[100]; };
162 #define __xg(x) ((struct __xchg_dummy *)(x))
163 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
164 {
165 switch (size) {
166 case 1:
167 __asm__ __volatile__("xchgb %b0,%1"
168 :"=q" (x)
169 :"m" (*__xg(ptr)), "0" (x)
170 :"memory");
171 break;
172 case 2:
173 __asm__ __volatile__("xchgw %w0,%1"
174 :"=r" (x)
175 :"m" (*__xg(ptr)), "0" (x)
176 :"memory");
177 break;
178 case 4:
179 __asm__ __volatile__("xchgl %0,%1"
180 :"=r" (x)
181 :"m" (*__xg(ptr)), "0" (x)
182 :"memory");
183 break;
184 }
185 return x;
186 }
188 /**
189 * test_and_clear_bit - Clear a bit and return its old value
190 * @nr: Bit to clear
191 * @addr: Address to count from
192 *
193 * This operation is atomic and cannot be reordered.
194 * It can be reorderdered on other architectures other than x86.
195 * It also implies a memory barrier.
196 */
197 static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
198 {
199 int oldbit;
201 __asm__ __volatile__( LOCK
202 "btrl %2,%1\n\tsbbl %0,%0"
203 :"=r" (oldbit),"=m" (ADDR)
204 :"Ir" (nr) : "memory");
205 return oldbit;
206 }
208 static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
209 {
210 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
211 }
213 static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
214 {
215 int oldbit;
217 __asm__ __volatile__(
218 "btl %2,%1\n\tsbbl %0,%0"
219 :"=r" (oldbit)
220 :"m" (ADDR),"Ir" (nr));
221 return oldbit;
222 }
224 #define test_bit(nr,addr) \
225 (__builtin_constant_p(nr) ? \
226 constant_test_bit((nr),(addr)) : \
227 variable_test_bit((nr),(addr)))
229 /**
230 * set_bit - Atomically set a bit in memory
231 * @nr: the bit to set
232 * @addr: the address to start counting from
233 *
234 * This function is atomic and may not be reordered. See __set_bit()
235 * if you do not require the atomic guarantees.
236 *
237 * Note: there are no guarantees that this function will not be reordered
238 * on non x86 architectures, so if you are writting portable code,
239 * make sure not to rely on its reordering guarantees.
240 *
241 * Note that @nr may be almost arbitrarily large; this function is not
242 * restricted to acting on a single-word quantity.
243 */
244 static inline void set_bit(int nr, volatile unsigned long * addr)
245 {
246 __asm__ __volatile__( LOCK
247 "btsl %1,%0"
248 :"=m" (ADDR)
249 :"Ir" (nr));
250 }
252 /**
253 * clear_bit - Clears a bit in memory
254 * @nr: Bit to clear
255 * @addr: Address to start counting from
256 *
257 * clear_bit() is atomic and may not be reordered. However, it does
258 * not contain a memory barrier, so if it is used for locking purposes,
259 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
260 * in order to ensure changes are visible on other processors.
261 */
262 static inline void clear_bit(int nr, volatile unsigned long * addr)
263 {
264 __asm__ __volatile__( LOCK
265 "btrl %1,%0"
266 :"=m" (ADDR)
267 :"Ir" (nr));
268 }
270 /**
271 * __ffs - find first bit in word.
272 * @word: The word to search
273 *
274 * Undefined if no bit exists, so code should check against 0 first.
275 */
276 static inline unsigned long __ffs(unsigned long word)
277 {
278 __asm__("bsfl %1,%0"
279 :"=r" (word)
280 :"rm" (word));
281 return word;
282 }
285 /*
286 * These have to be done with inline assembly: that way the bit-setting
287 * is guaranteed to be atomic. All bit operations return 0 if the bit
288 * was cleared before the operation and != 0 if it was not.
289 *
290 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
291 */
292 #define ADDR (*(volatile long *) addr)
294 #define rdtscll(val) \
295 __asm__ __volatile__("rdtsc" : "=A" (val))
299 #elif defined(__x86_64__)/* ifdef __i386__ */
300 /************************** x86_84 *******************************/
302 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
303 #define __xg(x) ((volatile long *)(x))
304 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
305 {
306 switch (size) {
307 case 1:
308 __asm__ __volatile__("xchgb %b0,%1"
309 :"=q" (x)
310 :"m" (*__xg(ptr)), "0" (x)
311 :"memory");
312 break;
313 case 2:
314 __asm__ __volatile__("xchgw %w0,%1"
315 :"=r" (x)
316 :"m" (*__xg(ptr)), "0" (x)
317 :"memory");
318 break;
319 case 4:
320 __asm__ __volatile__("xchgl %k0,%1"
321 :"=r" (x)
322 :"m" (*__xg(ptr)), "0" (x)
323 :"memory");
324 break;
325 case 8:
326 __asm__ __volatile__("xchgq %0,%1"
327 :"=r" (x)
328 :"m" (*__xg(ptr)), "0" (x)
329 :"memory");
330 break;
331 }
332 return x;
333 }
335 /**
336 * test_and_clear_bit - Clear a bit and return its old value
337 * @nr: Bit to clear
338 * @addr: Address to count from
339 *
340 * This operation is atomic and cannot be reordered.
341 * It also implies a memory barrier.
342 */
343 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
344 {
345 int oldbit;
347 __asm__ __volatile__( LOCK_PREFIX
348 "btrl %2,%1\n\tsbbl %0,%0"
349 :"=r" (oldbit),"=m" (ADDR)
350 :"dIr" (nr) : "memory");
351 return oldbit;
352 }
354 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
355 {
356 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
357 }
359 static __inline__ int variable_test_bit(int nr, volatile const void * addr)
360 {
361 int oldbit;
363 __asm__ __volatile__(
364 "btl %2,%1\n\tsbbl %0,%0"
365 :"=r" (oldbit)
366 :"m" (ADDR),"dIr" (nr));
367 return oldbit;
368 }
370 #define test_bit(nr,addr) \
371 (__builtin_constant_p(nr) ? \
372 constant_test_bit((nr),(addr)) : \
373 variable_test_bit((nr),(addr)))
376 /**
377 * set_bit - Atomically set a bit in memory
378 * @nr: the bit to set
379 * @addr: the address to start counting from
380 *
381 * This function is atomic and may not be reordered. See __set_bit()
382 * if you do not require the atomic guarantees.
383 * Note that @nr may be almost arbitrarily large; this function is not
384 * restricted to acting on a single-word quantity.
385 */
386 static __inline__ void set_bit(int nr, volatile void * addr)
387 {
388 __asm__ __volatile__( LOCK_PREFIX
389 "btsl %1,%0"
390 :"=m" (ADDR)
391 :"dIr" (nr) : "memory");
392 }
394 /**
395 * clear_bit - Clears a bit in memory
396 * @nr: Bit to clear
397 * @addr: Address to start counting from
398 *
399 * clear_bit() is atomic and may not be reordered. However, it does
400 * not contain a memory barrier, so if it is used for locking purposes,
401 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
402 * in order to ensure changes are visible on other processors.
403 */
404 static __inline__ void clear_bit(int nr, volatile void * addr)
405 {
406 __asm__ __volatile__( LOCK_PREFIX
407 "btrl %1,%0"
408 :"=m" (ADDR)
409 :"dIr" (nr));
410 }
412 /**
413 * __ffs - find first bit in word.
414 * @word: The word to search
415 *
416 * Undefined if no bit exists, so code should check against 0 first.
417 */
418 static __inline__ unsigned long __ffs(unsigned long word)
419 {
420 __asm__("bsfq %1,%0"
421 :"=r" (word)
422 :"rm" (word));
423 return word;
424 }
426 #define ADDR (*(volatile long *) addr)
428 #define rdtscll(val) do { \
429 unsigned int __a,__d; \
430 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
431 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
432 } while(0)
434 #define wrmsr(msr,val1,val2) \
435 __asm__ __volatile__("wrmsr" \
436 : /* no outputs */ \
437 : "c" (msr), "a" (val1), "d" (val2))
439 #define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32)
442 #else /* ifdef __x86_64__ */
443 #error "Unsupported architecture"
444 #endif
447 /********************* common i386 and x86_64 ****************************/
448 struct __synch_xchg_dummy { unsigned long a[100]; };
449 #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
451 #define synch_cmpxchg(ptr, old, new) \
452 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
453 (unsigned long)(old), \
454 (unsigned long)(new), \
455 sizeof(*(ptr))))
457 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
458 unsigned long old,
459 unsigned long new, int size)
460 {
461 unsigned long prev;
462 switch (size) {
463 case 1:
464 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
465 : "=a"(prev)
466 : "q"(new), "m"(*__synch_xg(ptr)),
467 "0"(old)
468 : "memory");
469 return prev;
470 case 2:
471 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
472 : "=a"(prev)
473 : "r"(new), "m"(*__synch_xg(ptr)),
474 "0"(old)
475 : "memory");
476 return prev;
477 #ifdef __x86_64__
478 case 4:
479 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
480 : "=a"(prev)
481 : "r"(new), "m"(*__synch_xg(ptr)),
482 "0"(old)
483 : "memory");
484 return prev;
485 case 8:
486 __asm__ __volatile__("lock; cmpxchgq %1,%2"
487 : "=a"(prev)
488 : "r"(new), "m"(*__synch_xg(ptr)),
489 "0"(old)
490 : "memory");
491 return prev;
492 #else
493 case 4:
494 __asm__ __volatile__("lock; cmpxchgl %1,%2"
495 : "=a"(prev)
496 : "r"(new), "m"(*__synch_xg(ptr)),
497 "0"(old)
498 : "memory");
499 return prev;
500 #endif
501 }
502 return old;
503 }
506 static __inline__ void synch_set_bit(int nr, volatile void * addr)
507 {
508 __asm__ __volatile__ (
509 "lock btsl %1,%0"
510 : "=m" (ADDR) : "Ir" (nr) : "memory" );
511 }
513 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
514 {
515 __asm__ __volatile__ (
516 "lock btrl %1,%0"
517 : "=m" (ADDR) : "Ir" (nr) : "memory" );
518 }
520 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
521 {
522 int oldbit;
523 __asm__ __volatile__ (
524 "lock btsl %2,%1\n\tsbbl %0,%0"
525 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
526 return oldbit;
527 }
529 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
530 {
531 int oldbit;
532 __asm__ __volatile__ (
533 "lock btrl %2,%1\n\tsbbl %0,%0"
534 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
535 return oldbit;
536 }
538 static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
539 {
540 return ((1UL << (nr & 31)) &
541 (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
542 }
544 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
545 {
546 int oldbit;
547 __asm__ __volatile__ (
548 "btl %2,%1\n\tsbbl %0,%0"
549 : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
550 return oldbit;
551 }
553 #define synch_test_bit(nr,addr) \
554 (__builtin_constant_p(nr) ? \
555 synch_const_test_bit((nr),(addr)) : \
556 synch_var_test_bit((nr),(addr)))
560 #endif /* not assembly */
561 #endif /* _OS_H_ */