direct-io.hg

view linux-2.4.30-xen-sparse/include/asm-xen/system.h @ 5517:10e9028c8e3d

bitkeeper revision 1.1718.1.10 (42b7b19aqOS_1M8I4pIOFjiTPYWV-g)

Merge bk://xenbits.xensource.com/xen-unstable.bk
into spot.cl.cam.ac.uk:C:/Documents and Settings/iap10/xen-unstable.bk
author iap10@spot.cl.cam.ac.uk
date Tue Jun 21 06:20:10 2005 +0000 (2005-06-21)
parents 85fcf3b1b7a5
children 56a63f9f378f
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/init.h>
7 #include <linux/bitops.h>
8 #include <asm/synch_bitops.h>
9 #include <asm/segment.h>
10 #include <asm/hypervisor.h>
11 #include <asm/evtchn.h>
13 #ifdef __KERNEL__
15 struct task_struct;
16 extern void FASTCALL(__switch_to(struct task_struct *prev,
17 struct task_struct *next));
19 #define prepare_to_switch() \
20 do { \
21 struct thread_struct *__t = &current->thread; \
22 __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (*(int *)&__t->fs) ); \
23 __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (*(int *)&__t->gs) ); \
24 } while (0)
25 #define switch_to(prev,next,last) do { \
26 asm volatile("pushl %%esi\n\t" \
27 "pushl %%edi\n\t" \
28 "pushl %%ebp\n\t" \
29 "movl %%esp,%0\n\t" /* save ESP */ \
30 "movl %3,%%esp\n\t" /* restore ESP */ \
31 "movl $1f,%1\n\t" /* save EIP */ \
32 "pushl %4\n\t" /* restore EIP */ \
33 "jmp __switch_to\n" \
34 "1:\t" \
35 "popl %%ebp\n\t" \
36 "popl %%edi\n\t" \
37 "popl %%esi\n\t" \
38 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
39 "=b" (last) \
40 :"m" (next->thread.esp),"m" (next->thread.eip), \
41 "a" (prev), "d" (next), \
42 "b" (prev)); \
43 } while (0)
45 #define _set_base(addr,base) do { unsigned long __pr; \
46 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
47 "rorl $16,%%edx\n\t" \
48 "movb %%dl,%2\n\t" \
49 "movb %%dh,%3" \
50 :"=&d" (__pr) \
51 :"m" (*((addr)+2)), \
52 "m" (*((addr)+4)), \
53 "m" (*((addr)+7)), \
54 "0" (base) \
55 ); } while(0)
57 #define _set_limit(addr,limit) do { unsigned long __lr; \
58 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
59 "rorl $16,%%edx\n\t" \
60 "movb %2,%%dh\n\t" \
61 "andb $0xf0,%%dh\n\t" \
62 "orb %%dh,%%dl\n\t" \
63 "movb %%dl,%2" \
64 :"=&d" (__lr) \
65 :"m" (*(addr)), \
66 "m" (*((addr)+6)), \
67 "0" (limit) \
68 ); } while(0)
70 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
71 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
73 static inline unsigned long _get_base(char * addr)
74 {
75 unsigned long __base;
76 __asm__("movb %3,%%dh\n\t"
77 "movb %2,%%dl\n\t"
78 "shll $16,%%edx\n\t"
79 "movw %1,%%dx"
80 :"=&d" (__base)
81 :"m" (*((addr)+2)),
82 "m" (*((addr)+4)),
83 "m" (*((addr)+7)));
84 return __base;
85 }
87 #define get_base(ldt) _get_base( ((char *)&(ldt)) )
89 /*
90 * Load a segment. Fall back on loading the zero
91 * segment if something goes wrong..
92 */
93 #define loadsegment(seg,value) \
94 asm volatile("\n" \
95 "1:\t" \
96 "movl %0,%%" #seg "\n" \
97 "2:\n" \
98 ".section .fixup,\"ax\"\n" \
99 "3:\t" \
100 "pushl $0\n\t" \
101 "popl %%" #seg "\n\t" \
102 "jmp 2b\n" \
103 ".previous\n" \
104 ".section __ex_table,\"a\"\n\t" \
105 ".align 4\n\t" \
106 ".long 1b,3b\n" \
107 ".previous" \
108 : :"m" (*(unsigned int *)&(value)))
110 /* NB. 'clts' is done for us by Xen during virtual trap. */
111 #define clts() ((void)0)
112 #define stts() (HYPERVISOR_fpu_taskswitch(1))
114 #endif /* __KERNEL__ */
116 /**
117 * __ffs - find first bit in word.
118 * @word: The word to search
119 *
120 * Undefined if no bit exists, so code should check against 0 first.
121 *
122 * Taken from 2.6 for Xen.
123 */
124 static inline unsigned long __ffs(unsigned long word)
125 {
126 __asm__("bsfl %1,%0"
127 :"=r" (word)
128 :"rm" (word));
129 return word;
130 }
132 static inline unsigned long get_limit(unsigned long segment)
133 {
134 unsigned long __limit;
135 __asm__("lsll %1,%0"
136 :"=r" (__limit):"r" (segment));
137 return __limit+1;
138 }
140 #define nop() __asm__ __volatile__ ("nop")
142 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
144 #define tas(ptr) (xchg((ptr),1))
146 struct __xchg_dummy { unsigned long a[100]; };
147 #define __xg(x) ((struct __xchg_dummy *)(x))
150 /*
151 * The semantics of XCHGCMP8B are a bit strange, this is why
152 * there is a loop and the loading of %%eax and %%edx has to
153 * be inside. This inlines well in most cases, the cached
154 * cost is around ~38 cycles. (in the future we might want
155 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
156 * might have an implicit FPU-save as a cost, so it's not
157 * clear which path to go.)
158 *
159 * chmxchg8b must be used with the lock prefix here to allow
160 * the instruction to be executed atomically, see page 3-102
161 * of the instruction set reference 24319102.pdf. We need
162 * the reader side to see the coherent 64bit value.
163 */
164 static inline void __set_64bit (unsigned long long * ptr,
165 unsigned int low, unsigned int high)
166 {
167 __asm__ __volatile__ (
168 "\n1:\t"
169 "movl (%0), %%eax\n\t"
170 "movl 4(%0), %%edx\n\t"
171 "lock cmpxchg8b (%0)\n\t"
172 "jnz 1b"
173 : /* no outputs */
174 : "D"(ptr),
175 "b"(low),
176 "c"(high)
177 : "ax","dx","memory");
178 }
180 static inline void __set_64bit_constant (unsigned long long *ptr,
181 unsigned long long value)
182 {
183 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
184 }
185 #define ll_low(x) *(((unsigned int*)&(x))+0)
186 #define ll_high(x) *(((unsigned int*)&(x))+1)
188 static inline void __set_64bit_var (unsigned long long *ptr,
189 unsigned long long value)
190 {
191 __set_64bit(ptr,ll_low(value), ll_high(value));
192 }
194 #define set_64bit(ptr,value) \
195 (__builtin_constant_p(value) ? \
196 __set_64bit_constant(ptr, value) : \
197 __set_64bit_var(ptr, value) )
199 #define _set_64bit(ptr,value) \
200 (__builtin_constant_p(value) ? \
201 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
202 __set_64bit(ptr, ll_low(value), ll_high(value)) )
204 /*
205 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
206 * Note 2: xchg has side effect, so that attribute volatile is necessary,
207 * but generally the primitive is invalid, *ptr is output argument. --ANK
208 */
209 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
210 {
211 switch (size) {
212 case 1:
213 __asm__ __volatile__("xchgb %b0,%1"
214 :"=q" (x)
215 :"m" (*__xg(ptr)), "0" (x)
216 :"memory");
217 break;
218 case 2:
219 __asm__ __volatile__("xchgw %w0,%1"
220 :"=r" (x)
221 :"m" (*__xg(ptr)), "0" (x)
222 :"memory");
223 break;
224 case 4:
225 __asm__ __volatile__("xchgl %0,%1"
226 :"=r" (x)
227 :"m" (*__xg(ptr)), "0" (x)
228 :"memory");
229 break;
230 }
231 return x;
232 }
234 /*
235 * Atomic compare and exchange. Compare OLD with MEM, if identical,
236 * store NEW in MEM. Return the initial value in MEM. Success is
237 * indicated by comparing RETURN with OLD.
238 */
240 #ifdef CONFIG_X86_CMPXCHG
241 #define __HAVE_ARCH_CMPXCHG 1
243 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
244 unsigned long new, int size)
245 {
246 unsigned long prev;
247 switch (size) {
248 case 1:
249 __asm__ __volatile__("lock cmpxchgb %b1,%2"
250 : "=a"(prev)
251 : "q"(new), "m"(*__xg(ptr)), "0"(old)
252 : "memory");
253 return prev;
254 case 2:
255 __asm__ __volatile__("lock cmpxchgw %w1,%2"
256 : "=a"(prev)
257 : "q"(new), "m"(*__xg(ptr)), "0"(old)
258 : "memory");
259 return prev;
260 case 4:
261 __asm__ __volatile__("lock cmpxchgl %1,%2"
262 : "=a"(prev)
263 : "q"(new), "m"(*__xg(ptr)), "0"(old)
264 : "memory");
265 return prev;
266 }
267 return old;
268 }
270 #define cmpxchg(ptr,o,n)\
271 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
272 (unsigned long)(n),sizeof(*(ptr))))
274 #else
275 /* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
276 #endif
278 /*
279 * Force strict CPU ordering.
280 * And yes, this is required on UP too when we're talking
281 * to devices.
282 *
283 * For now, "wmb()" doesn't actually do anything, as all
284 * Intel CPU's follow what Intel calls a *Processor Order*,
285 * in which all writes are seen in the program order even
286 * outside the CPU.
287 *
288 * I expect future Intel CPU's to have a weaker ordering,
289 * but I'd also expect them to finally get their act together
290 * and add some real memory barriers if so.
291 *
292 * Some non intel clones support out of order store. wmb() ceases to be a
293 * nop for these.
294 */
296 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
297 #define rmb() mb()
299 #ifdef CONFIG_X86_OOSTORE
300 #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
301 #else
302 #define wmb() __asm__ __volatile__ ("": : :"memory")
303 #endif
305 #ifdef CONFIG_SMP
306 #define smp_mb() mb()
307 #define smp_rmb() rmb()
308 #define smp_wmb() wmb()
309 #define set_mb(var, value) do { xchg(&var, value); } while (0)
310 #else
311 #define smp_mb() barrier()
312 #define smp_rmb() barrier()
313 #define smp_wmb() barrier()
314 #define set_mb(var, value) do { var = value; barrier(); } while (0)
315 #endif
317 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
319 #define safe_halt() ((void)0)
321 /*
322 * The use of 'barrier' in the following reflects their use as local-lock
323 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
324 * critical operations are executed. All critical operatiosn must complete
325 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
326 * includes these barriers, for example.
327 */
329 #define __cli() \
330 do { \
331 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
332 barrier(); \
333 } while (0)
335 #define __sti() \
336 do { \
337 shared_info_t *_shared = HYPERVISOR_shared_info; \
338 barrier(); \
339 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
340 barrier(); /* unmask then check (avoid races) */ \
341 if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
342 force_evtchn_callback(); \
343 } while (0)
345 #define __save_flags(x) \
346 do { \
347 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
348 } while (0)
350 #define __restore_flags(x) \
351 do { \
352 shared_info_t *_shared = HYPERVISOR_shared_info; \
353 barrier(); \
354 if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 ) { \
355 barrier(); /* unmask then check (avoid races) */ \
356 if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
357 force_evtchn_callback(); \
358 } \
359 } while (0)
361 #define __save_and_cli(x) \
362 do { \
363 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
364 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
365 barrier(); \
366 } while (0)
368 #define __save_and_sti(x) \
369 do { \
370 shared_info_t *_shared = HYPERVISOR_shared_info; \
371 barrier(); \
372 (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
373 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
374 barrier(); /* unmask then check (avoid races) */ \
375 if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
376 force_evtchn_callback(); \
377 } while (0)
379 #define local_irq_save(x) __save_and_cli(x)
380 #define local_irq_set(x) __save_and_sti(x)
381 #define local_irq_restore(x) __restore_flags(x)
382 #define local_irq_disable() __cli()
383 #define local_irq_enable() __sti()
386 #ifdef CONFIG_SMP
387 #error no SMP
388 extern void __global_cli(void);
389 extern void __global_sti(void);
390 extern unsigned long __global_save_flags(void);
391 extern void __global_restore_flags(unsigned long);
392 #define cli() __global_cli()
393 #define sti() __global_sti()
394 #define save_flags(x) ((x)=__global_save_flags())
395 #define restore_flags(x) __global_restore_flags(x)
396 #define save_and_cli(x) do { save_flags(x); cli(); } while(0);
397 #define save_and_sti(x) do { save_flags(x); sti(); } while(0);
399 #else
401 #define cli() __cli()
402 #define sti() __sti()
403 #define save_flags(x) __save_flags(x)
404 #define restore_flags(x) __restore_flags(x)
405 #define save_and_cli(x) __save_and_cli(x)
406 #define save_and_sti(x) __save_and_sti(x)
408 #endif
410 /*
411 * disable hlt during certain critical i/o operations
412 */
413 #define HAVE_DISABLE_HLT
414 void disable_hlt(void);
415 void enable_hlt(void);
417 extern unsigned long dmi_broken;
418 extern int is_sony_vaio_laptop;
420 #define BROKEN_ACPI_Sx 0x0001
421 #define BROKEN_INIT_AFTER_S1 0x0002
422 #define BROKEN_PNP_BIOS 0x0004
424 #endif