ia64/xen-unstable

view extras/mini-os/h/os.h @ 4072:ee7313088070

bitkeeper revision 1.1159.258.38 (4230628bX3yukmWYtNDh925BckMoHQ)

Merge ssh://xenbk@gandalf.hpl.hp.com//var/bk/xen-2.0-testing.bk
into tetris.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
author iap10@tetris.cl.cam.ac.uk
date Thu Mar 10 15:06:51 2005 +0000 (2005-03-10)
parents 3f929065a1d1
children 0a4b76b6b5a0
line source
1 /******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 */
7 #ifndef _OS_H_
8 #define _OS_H_
10 #define NULL 0
12 /* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
13 a mechanism by which the user can annotate likely branch directions and
14 expect the blocks to be reordered appropriately. Define __builtin_expect
15 to nothing for earlier compilers. */
17 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
18 #define __builtin_expect(x, expected_value) (x)
19 #endif
21 /*
22 * These are the segment descriptors provided for us by the hypervisor.
23 * For now, these are hardwired -- guest OSes cannot update the GDT
24 * or LDT.
25 *
26 * It shouldn't be hard to support descriptor-table frobbing -- let me
27 * know if the BSD or XP ports require flexibility here.
28 */
31 /*
32 * these are also defined in xen-public/xen.h but can't be pulled in as
33 * they are used in start of day assembly. Need to clean up the .h files
34 * a bit more...
35 */
37 #ifndef FLAT_RING1_CS
38 #define FLAT_RING1_CS 0x0819
39 #define FLAT_RING1_DS 0x0821
40 #define FLAT_RING3_CS 0x082b
41 #define FLAT_RING3_DS 0x0833
42 #endif
44 #define __KERNEL_CS FLAT_RING1_CS
45 #define __KERNEL_DS FLAT_RING1_DS
47 /* Everything below this point is not included by assembler (.S) files. */
48 #ifndef __ASSEMBLY__
50 #include <types.h>
51 #include <xen-public/xen.h>
54 /* this struct defines the way the registers are stored on the
55 stack during an exception or interrupt. */
56 struct pt_regs {
57 long ebx;
58 long ecx;
59 long edx;
60 long esi;
61 long edi;
62 long ebp;
63 long eax;
64 int xds;
65 int xes;
66 long orig_eax;
67 long eip;
68 int xcs;
69 long eflags;
70 long esp;
71 int xss;
72 };
74 /* some function prototypes */
75 void trap_init(void);
76 void dump_regs(struct pt_regs *regs);
79 /*
80 * STI/CLI equivalents. These basically set and clear the virtual
81 * event_enable flag in teh shared_info structure. Note that when
82 * the enable bit is set, there may be pending events to be handled.
83 * We may therefore call into do_hypervisor_callback() directly.
84 */
85 #define unlikely(x) __builtin_expect((x),0)
86 #define __save_flags(x) \
87 do { \
88 (x) = test_bit(EVENTS_MASTER_ENABLE_BIT, \
89 &HYPERVISOR_shared_info->events_mask); \
90 barrier(); \
91 } while (0)
93 #define __restore_flags(x) \
94 do { \
95 shared_info_t *_shared = HYPERVISOR_shared_info; \
96 if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
97 barrier(); \
98 if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL); \
99 } while (0)
101 #define __cli() \
102 do { \
103 clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
104 barrier(); \
105 } while (0)
107 #define __sti() \
108 do { \
109 shared_info_t *_shared = HYPERVISOR_shared_info; \
110 set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
111 barrier(); \
112 if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL); \
113 } while (0)
114 #define cli() __cli()
115 #define sti() __sti()
116 #define save_flags(x) __save_flags(x)
117 #define restore_flags(x) __restore_flags(x)
118 #define save_and_cli(x) __save_and_cli(x)
119 #define save_and_sti(x) __save_and_sti(x)
123 /* This is a barrier for the compiler only, NOT the processor! */
124 #define barrier() __asm__ __volatile__("": : :"memory")
126 #define LOCK_PREFIX ""
127 #define LOCK ""
128 #define ADDR (*(volatile long *) addr)
129 /*
130 * Make sure gcc doesn't try to be clever and move things around
131 * on us. We need to use _exactly_ the address the user gave us,
132 * not some alias that contains the same information.
133 */
134 typedef struct { volatile int counter; } atomic_t;
137 #define xchg(ptr,v) \
138 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
139 struct __xchg_dummy { unsigned long a[100]; };
140 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
141 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
142 int size)
143 {
144 switch (size) {
145 case 1:
146 __asm__ __volatile__("xchgb %b0,%1"
147 :"=q" (x)
148 :"m" (*__xg(ptr)), "0" (x)
149 :"memory");
150 break;
151 case 2:
152 __asm__ __volatile__("xchgw %w0,%1"
153 :"=r" (x)
154 :"m" (*__xg(ptr)), "0" (x)
155 :"memory");
156 break;
157 case 4:
158 __asm__ __volatile__("xchgl %0,%1"
159 :"=r" (x)
160 :"m" (*__xg(ptr)), "0" (x)
161 :"memory");
162 break;
163 }
164 return x;
165 }
167 /**
168 * test_and_clear_bit - Clear a bit and return its old value
169 * @nr: Bit to set
170 * @addr: Address to count from
171 *
172 * This operation is atomic and cannot be reordered.
173 * It also implies a memory barrier.
174 */
175 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
176 {
177 int oldbit;
179 __asm__ __volatile__( LOCK_PREFIX
180 "btrl %2,%1\n\tsbbl %0,%0"
181 :"=r" (oldbit),"=m" (ADDR)
182 :"Ir" (nr) : "memory");
183 return oldbit;
184 }
186 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
187 {
188 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
189 }
191 static __inline__ int variable_test_bit(int nr, volatile void * addr)
192 {
193 int oldbit;
195 __asm__ __volatile__(
196 "btl %2,%1\n\tsbbl %0,%0"
197 :"=r" (oldbit)
198 :"m" (ADDR),"Ir" (nr));
199 return oldbit;
200 }
202 #define test_bit(nr,addr) \
203 (__builtin_constant_p(nr) ? \
204 constant_test_bit((nr),(addr)) : \
205 variable_test_bit((nr),(addr)))
208 /**
209 * set_bit - Atomically set a bit in memory
210 * @nr: the bit to set
211 * @addr: the address to start counting from
212 *
213 * This function is atomic and may not be reordered. See __set_bit()
214 * if you do not require the atomic guarantees.
215 * Note that @nr may be almost arbitrarily large; this function is not
216 * restricted to acting on a single-word quantity.
217 */
218 static __inline__ void set_bit(int nr, volatile void * addr)
219 {
220 __asm__ __volatile__( LOCK_PREFIX
221 "btsl %1,%0"
222 :"=m" (ADDR)
223 :"Ir" (nr));
224 }
226 /**
227 * clear_bit - Clears a bit in memory
228 * @nr: Bit to clear
229 * @addr: Address to start counting from
230 *
231 * clear_bit() is atomic and may not be reordered. However, it does
232 * not contain a memory barrier, so if it is used for locking purposes,
233 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
234 * in order to ensure changes are visible on other processors.
235 */
236 static __inline__ void clear_bit(int nr, volatile void * addr)
237 {
238 __asm__ __volatile__( LOCK_PREFIX
239 "btrl %1,%0"
240 :"=m" (ADDR)
241 :"Ir" (nr));
242 }
244 /**
245 * atomic_inc - increment atomic variable
246 * @v: pointer of type atomic_t
247 *
248 * Atomically increments @v by 1. Note that the guaranteed
249 * useful range of an atomic_t is only 24 bits.
250 */
251 static __inline__ void atomic_inc(atomic_t *v)
252 {
253 __asm__ __volatile__(
254 LOCK "incl %0"
255 :"=m" (v->counter)
256 :"m" (v->counter));
257 }
260 #define rdtscll(val) \
261 __asm__ __volatile__("rdtsc" : "=A" (val))
264 #endif /* !__ASSEMBLY__ */
266 #endif /* _OS_H_ */