ia64/xen-unstable

view linux-2.4.29-xen-sparse/include/asm-xen/system.h @ 3887:4385894c52ae

bitkeeper revision 1.1230.2.4 (421a95cepOZORm0EbZfqBeZ6PZ8MwA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xen-unstable.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author iap10@freefall.cl.cam.ac.uk
date Tue Feb 22 02:15:42 2005 +0000 (2005-02-22)
parents 0a4b76b6b5a0
children 86b610094dd0 a5931595eca4
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/init.h>
7 #include <linux/bitops.h>
8 #include <asm/synch_bitops.h>
9 #include <asm/segment.h>
10 #include <asm/hypervisor.h>
11 #include <asm/evtchn.h>
13 #ifdef __KERNEL__
15 struct task_struct;
16 extern void FASTCALL(__switch_to(struct task_struct *prev,
17 struct task_struct *next));
19 #define prepare_to_switch() \
20 do { \
21 struct thread_struct *__t = &current->thread; \
22 __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (*(int *)&__t->fs) ); \
23 __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (*(int *)&__t->gs) ); \
24 } while (0)
25 #define switch_to(prev,next,last) do { \
26 asm volatile("pushl %%esi\n\t" \
27 "pushl %%edi\n\t" \
28 "pushl %%ebp\n\t" \
29 "movl %%esp,%0\n\t" /* save ESP */ \
30 "movl %3,%%esp\n\t" /* restore ESP */ \
31 "movl $1f,%1\n\t" /* save EIP */ \
32 "pushl %4\n\t" /* restore EIP */ \
33 "jmp __switch_to\n" \
34 "1:\t" \
35 "popl %%ebp\n\t" \
36 "popl %%edi\n\t" \
37 "popl %%esi\n\t" \
38 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
39 "=b" (last) \
40 :"m" (next->thread.esp),"m" (next->thread.eip), \
41 "a" (prev), "d" (next), \
42 "b" (prev)); \
43 } while (0)
45 #define _set_base(addr,base) do { unsigned long __pr; \
46 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
47 "rorl $16,%%edx\n\t" \
48 "movb %%dl,%2\n\t" \
49 "movb %%dh,%3" \
50 :"=&d" (__pr) \
51 :"m" (*((addr)+2)), \
52 "m" (*((addr)+4)), \
53 "m" (*((addr)+7)), \
54 "0" (base) \
55 ); } while(0)
57 #define _set_limit(addr,limit) do { unsigned long __lr; \
58 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
59 "rorl $16,%%edx\n\t" \
60 "movb %2,%%dh\n\t" \
61 "andb $0xf0,%%dh\n\t" \
62 "orb %%dh,%%dl\n\t" \
63 "movb %%dl,%2" \
64 :"=&d" (__lr) \
65 :"m" (*(addr)), \
66 "m" (*((addr)+6)), \
67 "0" (limit) \
68 ); } while(0)
70 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
71 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
73 static inline unsigned long _get_base(char * addr)
74 {
75 unsigned long __base;
76 __asm__("movb %3,%%dh\n\t"
77 "movb %2,%%dl\n\t"
78 "shll $16,%%edx\n\t"
79 "movw %1,%%dx"
80 :"=&d" (__base)
81 :"m" (*((addr)+2)),
82 "m" (*((addr)+4)),
83 "m" (*((addr)+7)));
84 return __base;
85 }
87 #define get_base(ldt) _get_base( ((char *)&(ldt)) )
89 /*
90 * Load a segment. Fall back on loading the zero
91 * segment if something goes wrong..
92 */
93 #define loadsegment(seg,value) \
94 asm volatile("\n" \
95 "1:\t" \
96 "movl %0,%%" #seg "\n" \
97 "2:\n" \
98 ".section .fixup,\"ax\"\n" \
99 "3:\t" \
100 "pushl $0\n\t" \
101 "popl %%" #seg "\n\t" \
102 "jmp 2b\n" \
103 ".previous\n" \
104 ".section __ex_table,\"a\"\n\t" \
105 ".align 4\n\t" \
106 ".long 1b,3b\n" \
107 ".previous" \
108 : :"m" (*(unsigned int *)&(value)))
110 /* NB. 'clts' is done for us by Xen during virtual trap. */
111 #define clts() ((void)0)
112 #define stts() (HYPERVISOR_fpu_taskswitch())
114 #endif /* __KERNEL__ */
116 static inline unsigned long get_limit(unsigned long segment)
117 {
118 unsigned long __limit;
119 __asm__("lsll %1,%0"
120 :"=r" (__limit):"r" (segment));
121 return __limit+1;
122 }
124 #define nop() __asm__ __volatile__ ("nop")
126 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
128 #define tas(ptr) (xchg((ptr),1))
130 struct __xchg_dummy { unsigned long a[100]; };
131 #define __xg(x) ((struct __xchg_dummy *)(x))
134 /*
135 * The semantics of XCHGCMP8B are a bit strange, this is why
136 * there is a loop and the loading of %%eax and %%edx has to
137 * be inside. This inlines well in most cases, the cached
138 * cost is around ~38 cycles. (in the future we might want
139 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
140 * might have an implicit FPU-save as a cost, so it's not
141 * clear which path to go.)
142 *
143 * chmxchg8b must be used with the lock prefix here to allow
144 * the instruction to be executed atomically, see page 3-102
145 * of the instruction set reference 24319102.pdf. We need
146 * the reader side to see the coherent 64bit value.
147 */
148 static inline void __set_64bit (unsigned long long * ptr,
149 unsigned int low, unsigned int high)
150 {
151 __asm__ __volatile__ (
152 "\n1:\t"
153 "movl (%0), %%eax\n\t"
154 "movl 4(%0), %%edx\n\t"
155 "lock cmpxchg8b (%0)\n\t"
156 "jnz 1b"
157 : /* no outputs */
158 : "D"(ptr),
159 "b"(low),
160 "c"(high)
161 : "ax","dx","memory");
162 }
164 static inline void __set_64bit_constant (unsigned long long *ptr,
165 unsigned long long value)
166 {
167 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
168 }
169 #define ll_low(x) *(((unsigned int*)&(x))+0)
170 #define ll_high(x) *(((unsigned int*)&(x))+1)
172 static inline void __set_64bit_var (unsigned long long *ptr,
173 unsigned long long value)
174 {
175 __set_64bit(ptr,ll_low(value), ll_high(value));
176 }
178 #define set_64bit(ptr,value) \
179 (__builtin_constant_p(value) ? \
180 __set_64bit_constant(ptr, value) : \
181 __set_64bit_var(ptr, value) )
183 #define _set_64bit(ptr,value) \
184 (__builtin_constant_p(value) ? \
185 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
186 __set_64bit(ptr, ll_low(value), ll_high(value)) )
188 /*
189 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
190 * Note 2: xchg has side effect, so that attribute volatile is necessary,
191 * but generally the primitive is invalid, *ptr is output argument. --ANK
192 */
193 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
194 {
195 switch (size) {
196 case 1:
197 __asm__ __volatile__("xchgb %b0,%1"
198 :"=q" (x)
199 :"m" (*__xg(ptr)), "0" (x)
200 :"memory");
201 break;
202 case 2:
203 __asm__ __volatile__("xchgw %w0,%1"
204 :"=r" (x)
205 :"m" (*__xg(ptr)), "0" (x)
206 :"memory");
207 break;
208 case 4:
209 __asm__ __volatile__("xchgl %0,%1"
210 :"=r" (x)
211 :"m" (*__xg(ptr)), "0" (x)
212 :"memory");
213 break;
214 }
215 return x;
216 }
218 /*
219 * Atomic compare and exchange. Compare OLD with MEM, if identical,
220 * store NEW in MEM. Return the initial value in MEM. Success is
221 * indicated by comparing RETURN with OLD.
222 */
224 #ifdef CONFIG_X86_CMPXCHG
225 #define __HAVE_ARCH_CMPXCHG 1
227 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
228 unsigned long new, int size)
229 {
230 unsigned long prev;
231 switch (size) {
232 case 1:
233 __asm__ __volatile__("lock cmpxchgb %b1,%2"
234 : "=a"(prev)
235 : "q"(new), "m"(*__xg(ptr)), "0"(old)
236 : "memory");
237 return prev;
238 case 2:
239 __asm__ __volatile__("lock cmpxchgw %w1,%2"
240 : "=a"(prev)
241 : "q"(new), "m"(*__xg(ptr)), "0"(old)
242 : "memory");
243 return prev;
244 case 4:
245 __asm__ __volatile__("lock cmpxchgl %1,%2"
246 : "=a"(prev)
247 : "q"(new), "m"(*__xg(ptr)), "0"(old)
248 : "memory");
249 return prev;
250 }
251 return old;
252 }
254 #define cmpxchg(ptr,o,n)\
255 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
256 (unsigned long)(n),sizeof(*(ptr))))
258 #else
259 /* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
260 #endif
262 /*
263 * Force strict CPU ordering.
264 * And yes, this is required on UP too when we're talking
265 * to devices.
266 *
267 * For now, "wmb()" doesn't actually do anything, as all
268 * Intel CPU's follow what Intel calls a *Processor Order*,
269 * in which all writes are seen in the program order even
270 * outside the CPU.
271 *
272 * I expect future Intel CPU's to have a weaker ordering,
273 * but I'd also expect them to finally get their act together
274 * and add some real memory barriers if so.
275 *
276 * Some non intel clones support out of order store. wmb() ceases to be a
277 * nop for these.
278 */
280 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
281 #define rmb() mb()
283 #ifdef CONFIG_X86_OOSTORE
284 #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
285 #else
286 #define wmb() __asm__ __volatile__ ("": : :"memory")
287 #endif
289 #ifdef CONFIG_SMP
290 #define smp_mb() mb()
291 #define smp_rmb() rmb()
292 #define smp_wmb() wmb()
293 #define set_mb(var, value) do { xchg(&var, value); } while (0)
294 #else
295 #define smp_mb() barrier()
296 #define smp_rmb() barrier()
297 #define smp_wmb() barrier()
298 #define set_mb(var, value) do { var = value; barrier(); } while (0)
299 #endif
301 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
303 #define safe_halt() ((void)0)
305 /*
306 * The use of 'barrier' in the following reflects their use as local-lock
307 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
308 * critical operations are executed. All critical operatiosn must complete
309 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
310 * includes these barriers, for example.
311 */
313 #define __cli() \
314 do { \
315 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
316 barrier(); \
317 } while (0)
319 #define __sti() \
320 do { \
321 shared_info_t *_shared = HYPERVISOR_shared_info; \
322 barrier(); \
323 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
324 barrier(); /* unmask then check (avoid races) */ \
325 if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
326 force_evtchn_callback(); \
327 } while (0)
329 #define __save_flags(x) \
330 do { \
331 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
332 } while (0)
334 #define __restore_flags(x) \
335 do { \
336 shared_info_t *_shared = HYPERVISOR_shared_info; \
337 barrier(); \
338 if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 ) { \
339 barrier(); /* unmask then check (avoid races) */ \
340 if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
341 force_evtchn_callback(); \
342 } \
343 } while (0)
345 #define __save_and_cli(x) \
346 do { \
347 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
348 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
349 barrier(); \
350 } while (0)
352 #define __save_and_sti(x) \
353 do { \
354 shared_info_t *_shared = HYPERVISOR_shared_info; \
355 barrier(); \
356 (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
357 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
358 barrier(); /* unmask then check (avoid races) */ \
359 if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
360 force_evtchn_callback(); \
361 } while (0)
363 #define local_irq_save(x) __save_and_cli(x)
364 #define local_irq_set(x) __save_and_sti(x)
365 #define local_irq_restore(x) __restore_flags(x)
366 #define local_irq_disable() __cli()
367 #define local_irq_enable() __sti()
370 #ifdef CONFIG_SMP
371 #error no SMP
372 extern void __global_cli(void);
373 extern void __global_sti(void);
374 extern unsigned long __global_save_flags(void);
375 extern void __global_restore_flags(unsigned long);
376 #define cli() __global_cli()
377 #define sti() __global_sti()
378 #define save_flags(x) ((x)=__global_save_flags())
379 #define restore_flags(x) __global_restore_flags(x)
380 #define save_and_cli(x) do { save_flags(x); cli(); } while(0);
381 #define save_and_sti(x) do { save_flags(x); sti(); } while(0);
383 #else
385 #define cli() __cli()
386 #define sti() __sti()
387 #define save_flags(x) __save_flags(x)
388 #define restore_flags(x) __restore_flags(x)
389 #define save_and_cli(x) __save_and_cli(x)
390 #define save_and_sti(x) __save_and_sti(x)
392 #endif
394 /*
395 * disable hlt during certain critical i/o operations
396 */
397 #define HAVE_DISABLE_HLT
398 void disable_hlt(void);
399 void enable_hlt(void);
401 extern unsigned long dmi_broken;
402 extern int is_sony_vaio_laptop;
404 #define BROKEN_ACPI_Sx 0x0001
405 #define BROKEN_INIT_AFTER_S1 0x0002
406 #define BROKEN_PNP_BIOS 0x0004
408 #endif