ia64/xen-unstable

view extras/mini-os/h/os.h @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents 7561a06348cf
children
line source
1 /******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 */
7 #ifndef _OS_H_
8 #define _OS_H_
10 #define NULL 0
12 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
13 #define __builtin_expect(x, expected_value) (x)
14 #endif
15 #define unlikely(x) __builtin_expect((x),0)
17 #define smp_processor_id() 0
18 #define preempt_disable() ((void)0)
19 #define preempt_enable() ((void)0)
21 #define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
23 #ifndef __ASSEMBLY__
24 #include <types.h>
25 #endif
26 #include <xen-public/xen.h>
28 #define __KERNEL_CS FLAT_KERNEL_CS
29 #define __KERNEL_DS FLAT_KERNEL_DS
30 #define __KERNEL_SS FLAT_KERNEL_SS
32 #define TRAP_divide_error 0
33 #define TRAP_debug 1
34 #define TRAP_nmi 2
35 #define TRAP_int3 3
36 #define TRAP_overflow 4
37 #define TRAP_bounds 5
38 #define TRAP_invalid_op 6
39 #define TRAP_no_device 7
40 #define TRAP_double_fault 8
41 #define TRAP_copro_seg 9
42 #define TRAP_invalid_tss 10
43 #define TRAP_no_segment 11
44 #define TRAP_stack_error 12
45 #define TRAP_gp_fault 13
46 #define TRAP_page_fault 14
47 #define TRAP_spurious_int 15
48 #define TRAP_copro_error 16
49 #define TRAP_alignment_check 17
50 #define TRAP_machine_check 18
51 #define TRAP_simd_error 19
52 #define TRAP_deferred_nmi 31
54 /* Everything below this point is not included by assembler (.S) files. */
55 #ifndef __ASSEMBLY__
57 #define pt_regs xen_regs
59 void trap_init(void);
60 void dump_regs(struct pt_regs *regs);
62 /*
63 * The use of 'barrier' in the following reflects their use as local-lock
64 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
65 * critical operations are executed. All critical operations must complete
66 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
67 * includes these barriers, for example.
68 */
70 #define __cli() \
71 do { \
72 vcpu_info_t *_vcpu; \
73 preempt_disable(); \
74 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
75 _vcpu->evtchn_upcall_mask = 1; \
76 preempt_enable_no_resched(); \
77 barrier(); \
78 } while (0)
80 #define __sti() \
81 do { \
82 vcpu_info_t *_vcpu; \
83 barrier(); \
84 preempt_disable(); \
85 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
86 _vcpu->evtchn_upcall_mask = 0; \
87 barrier(); /* unmask then check (avoid races) */ \
88 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
89 force_evtchn_callback(); \
90 preempt_enable(); \
91 } while (0)
93 #define __save_flags(x) \
94 do { \
95 vcpu_info_t *_vcpu; \
96 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
97 (x) = _vcpu->evtchn_upcall_mask; \
98 } while (0)
100 #define __restore_flags(x) \
101 do { \
102 vcpu_info_t *_vcpu; \
103 barrier(); \
104 preempt_disable(); \
105 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
106 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
107 barrier(); /* unmask then check (avoid races) */ \
108 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
109 force_evtchn_callback(); \
110 preempt_enable(); \
111 } else \
112 preempt_enable_no_resched(); \
113 } while (0)
115 #define safe_halt() ((void)0)
117 #define __save_and_cli(x) \
118 do { \
119 vcpu_info_t *_vcpu; \
120 preempt_disable(); \
121 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
122 (x) = _vcpu->evtchn_upcall_mask; \
123 _vcpu->evtchn_upcall_mask = 1; \
124 preempt_enable_no_resched(); \
125 barrier(); \
126 } while (0)
128 #define local_irq_save(x) __save_and_cli(x)
129 #define local_irq_restore(x) __restore_flags(x)
130 #define local_save_flags(x) __save_flags(x)
131 #define local_irq_disable() __cli()
132 #define local_irq_enable() __sti()
134 #define irqs_disabled() \
135 HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
137 /* This is a barrier for the compiler only, NOT the processor! */
138 #define barrier() __asm__ __volatile__("": : :"memory")
140 #define LOCK_PREFIX ""
141 #define LOCK ""
142 #define ADDR (*(volatile long *) addr)
143 /*
144 * Make sure gcc doesn't try to be clever and move things around
145 * on us. We need to use _exactly_ the address the user gave us,
146 * not some alias that contains the same information.
147 */
148 typedef struct { volatile int counter; } atomic_t;
151 #define xchg(ptr,v) \
152 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
153 struct __xchg_dummy { unsigned long a[100]; };
154 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
155 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
156 int size)
157 {
158 switch (size) {
159 case 1:
160 __asm__ __volatile__("xchgb %b0,%1"
161 :"=q" (x)
162 :"m" (*__xg(ptr)), "0" (x)
163 :"memory");
164 break;
165 case 2:
166 __asm__ __volatile__("xchgw %w0,%1"
167 :"=r" (x)
168 :"m" (*__xg(ptr)), "0" (x)
169 :"memory");
170 break;
171 case 4:
172 __asm__ __volatile__("xchgl %0,%1"
173 :"=r" (x)
174 :"m" (*__xg(ptr)), "0" (x)
175 :"memory");
176 break;
177 }
178 return x;
179 }
181 /**
182 * test_and_clear_bit - Clear a bit and return its old value
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This operation is atomic and cannot be reordered.
187 * It also implies a memory barrier.
188 */
189 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
190 {
191 int oldbit;
193 __asm__ __volatile__( LOCK_PREFIX
194 "btrl %2,%1\n\tsbbl %0,%0"
195 :"=r" (oldbit),"=m" (ADDR)
196 :"Ir" (nr) : "memory");
197 return oldbit;
198 }
200 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
201 {
202 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
203 }
205 static __inline__ int variable_test_bit(int nr, volatile void * addr)
206 {
207 int oldbit;
209 __asm__ __volatile__(
210 "btl %2,%1\n\tsbbl %0,%0"
211 :"=r" (oldbit)
212 :"m" (ADDR),"Ir" (nr));
213 return oldbit;
214 }
216 #define test_bit(nr,addr) \
217 (__builtin_constant_p(nr) ? \
218 constant_test_bit((nr),(addr)) : \
219 variable_test_bit((nr),(addr)))
222 /**
223 * set_bit - Atomically set a bit in memory
224 * @nr: the bit to set
225 * @addr: the address to start counting from
226 *
227 * This function is atomic and may not be reordered. See __set_bit()
228 * if you do not require the atomic guarantees.
229 * Note that @nr may be almost arbitrarily large; this function is not
230 * restricted to acting on a single-word quantity.
231 */
232 static __inline__ void set_bit(int nr, volatile void * addr)
233 {
234 __asm__ __volatile__( LOCK_PREFIX
235 "btsl %1,%0"
236 :"=m" (ADDR)
237 :"Ir" (nr));
238 }
240 /**
241 * clear_bit - Clears a bit in memory
242 * @nr: Bit to clear
243 * @addr: Address to start counting from
244 *
245 * clear_bit() is atomic and may not be reordered. However, it does
246 * not contain a memory barrier, so if it is used for locking purposes,
247 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
248 * in order to ensure changes are visible on other processors.
249 */
250 static __inline__ void clear_bit(int nr, volatile void * addr)
251 {
252 __asm__ __volatile__( LOCK_PREFIX
253 "btrl %1,%0"
254 :"=m" (ADDR)
255 :"Ir" (nr));
256 }
258 /**
259 * atomic_inc - increment atomic variable
260 * @v: pointer of type atomic_t
261 *
262 * Atomically increments @v by 1. Note that the guaranteed
263 * useful range of an atomic_t is only 24 bits.
264 */
265 static __inline__ void atomic_inc(atomic_t *v)
266 {
267 __asm__ __volatile__(
268 LOCK "incl %0"
269 :"=m" (v->counter)
270 :"m" (v->counter));
271 }
274 #define rdtscll(val) \
275 __asm__ __volatile__("rdtsc" : "=A" (val))
278 #endif /* !__ASSEMBLY__ */
280 #endif /* _OS_H_ */