ia64/xen-unstable

view extras/mini-os/include/os.h @ 6106:a64ac7fafbf0

PAE page directories must be below 4GB. Based on a patch
from Gerd Knorr.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 11 10:34:32 2005 +0000 (2005-08-11)
parents a83ac0806d6b
children df1348e72390 cdfa7dd00c44 b2f4823b6ff0
line source
1 /******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 */
7 #ifndef _OS_H_
8 #define _OS_H_
10 #define NULL 0
12 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
13 #define __builtin_expect(x, expected_value) (x)
14 #endif
15 #define unlikely(x) __builtin_expect((x),0)
17 #define smp_processor_id() 0
18 #define preempt_disable() ((void)0)
19 #define preempt_enable() ((void)0)
21 #define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
23 #ifndef __ASSEMBLY__
24 #include <types.h>
25 #endif
26 #include <xen/xen.h>
28 #define __KERNEL_CS FLAT_KERNEL_CS
29 #define __KERNEL_DS FLAT_KERNEL_DS
30 #define __KERNEL_SS FLAT_KERNEL_SS
32 #define TRAP_divide_error 0
33 #define TRAP_debug 1
34 #define TRAP_nmi 2
35 #define TRAP_int3 3
36 #define TRAP_overflow 4
37 #define TRAP_bounds 5
38 #define TRAP_invalid_op 6
39 #define TRAP_no_device 7
40 #define TRAP_double_fault 8
41 #define TRAP_copro_seg 9
42 #define TRAP_invalid_tss 10
43 #define TRAP_no_segment 11
44 #define TRAP_stack_error 12
45 #define TRAP_gp_fault 13
46 #define TRAP_page_fault 14
47 #define TRAP_spurious_int 15
48 #define TRAP_copro_error 16
49 #define TRAP_alignment_check 17
50 #define TRAP_machine_check 18
51 #define TRAP_simd_error 19
52 #define TRAP_deferred_nmi 31
54 /* Everything below this point is not included by assembler (.S) files. */
55 #ifndef __ASSEMBLY__
57 #define pt_regs xen_regs
59 void trap_init(void);
61 /*
62 * The use of 'barrier' in the following reflects their use as local-lock
63 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
64 * critical operations are executed. All critical operations must complete
65 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
66 * includes these barriers, for example.
67 */
69 #define __cli() \
70 do { \
71 vcpu_info_t *_vcpu; \
72 preempt_disable(); \
73 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
74 _vcpu->evtchn_upcall_mask = 1; \
75 preempt_enable_no_resched(); \
76 barrier(); \
77 } while (0)
79 #define __sti() \
80 do { \
81 vcpu_info_t *_vcpu; \
82 barrier(); \
83 preempt_disable(); \
84 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
85 _vcpu->evtchn_upcall_mask = 0; \
86 barrier(); /* unmask then check (avoid races) */ \
87 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
88 force_evtchn_callback(); \
89 preempt_enable(); \
90 } while (0)
92 #define __save_flags(x) \
93 do { \
94 vcpu_info_t *_vcpu; \
95 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
96 (x) = _vcpu->evtchn_upcall_mask; \
97 } while (0)
99 #define __restore_flags(x) \
100 do { \
101 vcpu_info_t *_vcpu; \
102 barrier(); \
103 preempt_disable(); \
104 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
105 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
106 barrier(); /* unmask then check (avoid races) */ \
107 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
108 force_evtchn_callback(); \
109 preempt_enable(); \
110 } else \
111 preempt_enable_no_resched(); \
112 } while (0)
114 #define safe_halt() ((void)0)
116 #define __save_and_cli(x) \
117 do { \
118 vcpu_info_t *_vcpu; \
119 preempt_disable(); \
120 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
121 (x) = _vcpu->evtchn_upcall_mask; \
122 _vcpu->evtchn_upcall_mask = 1; \
123 preempt_enable_no_resched(); \
124 barrier(); \
125 } while (0)
127 #define local_irq_save(x) __save_and_cli(x)
128 #define local_irq_restore(x) __restore_flags(x)
129 #define local_save_flags(x) __save_flags(x)
130 #define local_irq_disable() __cli()
131 #define local_irq_enable() __sti()
133 #define irqs_disabled() \
134 HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
136 /* This is a barrier for the compiler only, NOT the processor! */
137 #define barrier() __asm__ __volatile__("": : :"memory")
139 #define LOCK_PREFIX ""
140 #define LOCK ""
141 #define ADDR (*(volatile long *) addr)
142 /*
143 * Make sure gcc doesn't try to be clever and move things around
144 * on us. We need to use _exactly_ the address the user gave us,
145 * not some alias that contains the same information.
146 */
147 typedef struct { volatile int counter; } atomic_t;
150 #define xchg(ptr,v) \
151 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
152 struct __xchg_dummy { unsigned long a[100]; };
153 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
154 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
155 int size)
156 {
157 switch (size) {
158 case 1:
159 __asm__ __volatile__("xchgb %b0,%1"
160 :"=q" (x)
161 :"m" (*__xg(ptr)), "0" (x)
162 :"memory");
163 break;
164 case 2:
165 __asm__ __volatile__("xchgw %w0,%1"
166 :"=r" (x)
167 :"m" (*__xg(ptr)), "0" (x)
168 :"memory");
169 break;
170 case 4:
171 __asm__ __volatile__("xchgl %0,%1"
172 :"=r" (x)
173 :"m" (*__xg(ptr)), "0" (x)
174 :"memory");
175 break;
176 }
177 return x;
178 }
180 /**
181 * test_and_clear_bit - Clear a bit and return its old value
182 * @nr: Bit to set
183 * @addr: Address to count from
184 *
185 * This operation is atomic and cannot be reordered.
186 * It also implies a memory barrier.
187 */
188 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
189 {
190 int oldbit;
192 __asm__ __volatile__( LOCK_PREFIX
193 "btrl %2,%1\n\tsbbl %0,%0"
194 :"=r" (oldbit),"=m" (ADDR)
195 :"Ir" (nr) : "memory");
196 return oldbit;
197 }
199 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
200 {
201 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
202 }
204 static __inline__ int variable_test_bit(int nr, volatile void * addr)
205 {
206 int oldbit;
208 __asm__ __volatile__(
209 "btl %2,%1\n\tsbbl %0,%0"
210 :"=r" (oldbit)
211 :"m" (ADDR),"Ir" (nr));
212 return oldbit;
213 }
215 #define test_bit(nr,addr) \
216 (__builtin_constant_p(nr) ? \
217 constant_test_bit((nr),(addr)) : \
218 variable_test_bit((nr),(addr)))
221 /**
222 * set_bit - Atomically set a bit in memory
223 * @nr: the bit to set
224 * @addr: the address to start counting from
225 *
226 * This function is atomic and may not be reordered. See __set_bit()
227 * if you do not require the atomic guarantees.
228 * Note that @nr may be almost arbitrarily large; this function is not
229 * restricted to acting on a single-word quantity.
230 */
231 static __inline__ void set_bit(int nr, volatile void * addr)
232 {
233 __asm__ __volatile__( LOCK_PREFIX
234 "btsl %1,%0"
235 :"=m" (ADDR)
236 :"Ir" (nr));
237 }
239 /**
240 * clear_bit - Clears a bit in memory
241 * @nr: Bit to clear
242 * @addr: Address to start counting from
243 *
244 * clear_bit() is atomic and may not be reordered. However, it does
245 * not contain a memory barrier, so if it is used for locking purposes,
246 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
247 * in order to ensure changes are visible on other processors.
248 */
249 static __inline__ void clear_bit(int nr, volatile void * addr)
250 {
251 __asm__ __volatile__( LOCK_PREFIX
252 "btrl %1,%0"
253 :"=m" (ADDR)
254 :"Ir" (nr));
255 }
257 /**
258 * atomic_inc - increment atomic variable
259 * @v: pointer of type atomic_t
260 *
261 * Atomically increments @v by 1. Note that the guaranteed
262 * useful range of an atomic_t is only 24 bits.
263 */
264 static __inline__ void atomic_inc(atomic_t *v)
265 {
266 __asm__ __volatile__(
267 LOCK "incl %0"
268 :"=m" (v->counter)
269 :"m" (v->counter));
270 }
273 #define rdtscll(val) \
274 __asm__ __volatile__("rdtsc" : "=A" (val))
276 static __inline__ unsigned long __ffs(unsigned long word)
277 {
278 __asm__("bsfl %1,%0"
279 :"=r" (word)
280 :"rm" (word));
281 return word;
282 }
284 #define ADDR (*(volatile long *) addr)
286 static __inline__ void synch_set_bit(int nr, volatile void * addr)
287 {
288 __asm__ __volatile__ (
289 "lock btsl %1,%0"
290 : "=m" (ADDR) : "Ir" (nr) : "memory" );
291 }
293 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
294 {
295 __asm__ __volatile__ (
296 "lock btrl %1,%0"
297 : "=m" (ADDR) : "Ir" (nr) : "memory" );
298 }
300 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
301 {
302 int oldbit;
303 __asm__ __volatile__ (
304 "lock btsl %2,%1\n\tsbbl %0,%0"
305 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
306 return oldbit;
307 }
310 static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
311 {
312 return ((1UL << (nr & 31)) &
313 (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
314 }
316 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
317 {
318 int oldbit;
319 __asm__ __volatile__ (
320 "btl %2,%1\n\tsbbl %0,%0"
321 : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
322 return oldbit;
323 }
325 #define synch_test_bit(nr,addr) \
326 (__builtin_constant_p(nr) ? \
327 synch_const_test_bit((nr),(addr)) : \
328 synch_var_test_bit((nr),(addr)))
329 #endif /* !__ASSEMBLY__ */
331 #define rdtsc(low,high) \
332 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
334 #endif /* _OS_H_ */