ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h @ 13341:3040ba0f2d3d

When booting via xm, only run the bootloader if it's in non-interactive mode:
otherwise we lose the user's named kernel and try to bootload the temporary
file pygrub returned.

Signed-off-by: John Levon <john.levon@sun.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jan 09 13:24:45 2007 +0000 (2007-01-09)
parents b198bbfeec10
children 4fad820a2233
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/bitops.h>
7 #include <asm/synch_bitops.h>
8 #include <asm/segment.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hypervisor.h>
11 #include <asm/smp_alt.h>
13 #ifdef __KERNEL__
15 #ifdef CONFIG_SMP
16 #define __vcpu_id smp_processor_id()
17 #else
18 #define __vcpu_id 0
19 #endif
21 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
22 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
24 #define switch_to(prev,next,last) do { \
25 unsigned long esi,edi; \
26 asm volatile("pushl %%ebp\n\t" \
27 "movl %%esp,%0\n\t" /* save ESP */ \
28 "movl %5,%%esp\n\t" /* restore ESP */ \
29 "movl $1f,%1\n\t" /* save EIP */ \
30 "pushl %6\n\t" /* restore EIP */ \
31 "jmp __switch_to\n" \
32 "1:\t" \
33 "popl %%ebp\n\t" \
34 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
35 "=a" (last),"=S" (esi),"=D" (edi) \
36 :"m" (next->thread.esp),"m" (next->thread.eip), \
37 "2" (prev), "d" (next)); \
38 } while (0)
40 #define _set_base(addr,base) do { unsigned long __pr; \
41 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
42 "rorl $16,%%edx\n\t" \
43 "movb %%dl,%2\n\t" \
44 "movb %%dh,%3" \
45 :"=&d" (__pr) \
46 :"m" (*((addr)+2)), \
47 "m" (*((addr)+4)), \
48 "m" (*((addr)+7)), \
49 "0" (base) \
50 ); } while(0)
52 #define _set_limit(addr,limit) do { unsigned long __lr; \
53 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
54 "rorl $16,%%edx\n\t" \
55 "movb %2,%%dh\n\t" \
56 "andb $0xf0,%%dh\n\t" \
57 "orb %%dh,%%dl\n\t" \
58 "movb %%dl,%2" \
59 :"=&d" (__lr) \
60 :"m" (*(addr)), \
61 "m" (*((addr)+6)), \
62 "0" (limit) \
63 ); } while(0)
65 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
66 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
68 /*
69 * Load a segment. Fall back on loading the zero
70 * segment if something goes wrong..
71 */
72 #define loadsegment(seg,value) \
73 asm volatile("\n" \
74 "1:\t" \
75 "mov %0,%%" #seg "\n" \
76 "2:\n" \
77 ".section .fixup,\"ax\"\n" \
78 "3:\t" \
79 "pushl $0\n\t" \
80 "popl %%" #seg "\n\t" \
81 "jmp 2b\n" \
82 ".previous\n" \
83 ".section __ex_table,\"a\"\n\t" \
84 ".align 4\n\t" \
85 ".long 1b,3b\n" \
86 ".previous" \
87 : :"rm" (value))
89 /*
90 * Save a segment register away
91 */
92 #define savesegment(seg, value) \
93 asm volatile("mov %%" #seg ",%0":"=rm" (value))
95 /*
96 * Clear and set 'TS' bit respectively
97 */
98 #define clts() (HYPERVISOR_fpu_taskswitch(0))
99 #define read_cr0() ({ \
100 unsigned int __dummy; \
101 __asm__ __volatile__( \
102 "movl %%cr0,%0\n\t" \
103 :"=r" (__dummy)); \
104 __dummy; \
105 })
106 #define write_cr0(x) \
107 __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
109 #define read_cr2() \
110 (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
111 #define write_cr2(x) \
112 __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
114 #define read_cr3() ({ \
115 unsigned int __dummy; \
116 __asm__ ( \
117 "movl %%cr3,%0\n\t" \
118 :"=r" (__dummy)); \
119 __dummy = xen_cr3_to_pfn(__dummy); \
120 mfn_to_pfn(__dummy) << PAGE_SHIFT; \
121 })
122 #define write_cr3(x) ({ \
123 unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
124 __dummy = xen_pfn_to_cr3(__dummy); \
125 __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
126 })
128 #define read_cr4() ({ \
129 unsigned int __dummy; \
130 __asm__( \
131 "movl %%cr4,%0\n\t" \
132 :"=r" (__dummy)); \
133 __dummy; \
134 })
136 #define read_cr4_safe() ({ \
137 unsigned int __dummy; \
138 /* This could fault if %cr4 does not exist */ \
139 __asm__("1: movl %%cr4, %0 \n" \
140 "2: \n" \
141 ".section __ex_table,\"a\" \n" \
142 ".long 1b,2b \n" \
143 ".previous \n" \
144 : "=r" (__dummy): "0" (0)); \
145 __dummy; \
146 })
148 #define write_cr4(x) \
149 __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
150 #define stts() (HYPERVISOR_fpu_taskswitch(1))
152 #endif /* __KERNEL__ */
154 #define wbinvd() \
155 __asm__ __volatile__ ("wbinvd": : :"memory");
157 static inline unsigned long get_limit(unsigned long segment)
158 {
159 unsigned long __limit;
160 __asm__("lsll %1,%0"
161 :"=r" (__limit):"r" (segment));
162 return __limit+1;
163 }
165 #define nop() __asm__ __volatile__ ("nop")
167 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
169 #define tas(ptr) (xchg((ptr),1))
171 struct __xchg_dummy { unsigned long a[100]; };
172 #define __xg(x) ((struct __xchg_dummy *)(x))
175 #ifdef CONFIG_X86_CMPXCHG64
177 /*
178 * The semantics of XCHGCMP8B are a bit strange, this is why
179 * there is a loop and the loading of %%eax and %%edx has to
180 * be inside. This inlines well in most cases, the cached
181 * cost is around ~38 cycles. (in the future we might want
182 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
183 * might have an implicit FPU-save as a cost, so it's not
184 * clear which path to go.)
185 *
186 * cmpxchg8b must be used with the lock prefix here to allow
187 * the instruction to be executed atomically, see page 3-102
188 * of the instruction set reference 24319102.pdf. We need
189 * the reader side to see the coherent 64bit value.
190 */
191 static inline void __set_64bit (unsigned long long * ptr,
192 unsigned int low, unsigned int high)
193 {
194 __asm__ __volatile__ (
195 "\n1:\t"
196 "movl (%0), %%eax\n\t"
197 "movl 4(%0), %%edx\n\t"
198 "lock cmpxchg8b (%0)\n\t"
199 "jnz 1b"
200 : /* no outputs */
201 : "D"(ptr),
202 "b"(low),
203 "c"(high)
204 : "ax","dx","memory");
205 }
207 static inline void __set_64bit_constant (unsigned long long *ptr,
208 unsigned long long value)
209 {
210 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
211 }
212 #define ll_low(x) *(((unsigned int*)&(x))+0)
213 #define ll_high(x) *(((unsigned int*)&(x))+1)
215 static inline void __set_64bit_var (unsigned long long *ptr,
216 unsigned long long value)
217 {
218 __set_64bit(ptr,ll_low(value), ll_high(value));
219 }
221 #define set_64bit(ptr,value) \
222 (__builtin_constant_p(value) ? \
223 __set_64bit_constant(ptr, value) : \
224 __set_64bit_var(ptr, value) )
226 #define _set_64bit(ptr,value) \
227 (__builtin_constant_p(value) ? \
228 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
229 __set_64bit(ptr, ll_low(value), ll_high(value)) )
231 #endif
233 /*
234 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
235 * Note 2: xchg has side effect, so that attribute volatile is necessary,
236 * but generally the primitive is invalid, *ptr is output argument. --ANK
237 */
238 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
239 {
240 switch (size) {
241 case 1:
242 __asm__ __volatile__("xchgb %b0,%1"
243 :"=q" (x)
244 :"m" (*__xg(ptr)), "0" (x)
245 :"memory");
246 break;
247 case 2:
248 __asm__ __volatile__("xchgw %w0,%1"
249 :"=r" (x)
250 :"m" (*__xg(ptr)), "0" (x)
251 :"memory");
252 break;
253 case 4:
254 __asm__ __volatile__("xchgl %0,%1"
255 :"=r" (x)
256 :"m" (*__xg(ptr)), "0" (x)
257 :"memory");
258 break;
259 }
260 return x;
261 }
263 /*
264 * Atomic compare and exchange. Compare OLD with MEM, if identical,
265 * store NEW in MEM. Return the initial value in MEM. Success is
266 * indicated by comparing RETURN with OLD.
267 */
269 #ifdef CONFIG_X86_CMPXCHG
270 #define __HAVE_ARCH_CMPXCHG 1
271 #define cmpxchg(ptr,o,n)\
272 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
273 (unsigned long)(n),sizeof(*(ptr))))
274 #endif
276 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
277 unsigned long new, int size)
278 {
279 unsigned long prev;
280 switch (size) {
281 case 1:
282 __asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
283 : "=a"(prev)
284 : "q"(new), "m"(*__xg(ptr)), "0"(old)
285 : "memory");
286 return prev;
287 case 2:
288 __asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
289 : "=a"(prev)
290 : "r"(new), "m"(*__xg(ptr)), "0"(old)
291 : "memory");
292 return prev;
293 case 4:
294 __asm__ __volatile__(LOCK "cmpxchgl %1,%2"
295 : "=a"(prev)
296 : "r"(new), "m"(*__xg(ptr)), "0"(old)
297 : "memory");
298 return prev;
299 }
300 return old;
301 }
303 #ifndef CONFIG_X86_CMPXCHG
304 /*
305 * Building a kernel capable running on 80386. It may be necessary to
306 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
307 * a function for each of the sizes we support.
308 */
310 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
311 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
312 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
314 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
315 unsigned long new, int size)
316 {
317 switch (size) {
318 case 1:
319 return cmpxchg_386_u8(ptr, old, new);
320 case 2:
321 return cmpxchg_386_u16(ptr, old, new);
322 case 4:
323 return cmpxchg_386_u32(ptr, old, new);
324 }
325 return old;
326 }
328 #define cmpxchg(ptr,o,n) \
329 ({ \
330 __typeof__(*(ptr)) __ret; \
331 if (likely(boot_cpu_data.x86 > 3)) \
332 __ret = __cmpxchg((ptr), (unsigned long)(o), \
333 (unsigned long)(n), sizeof(*(ptr))); \
334 else \
335 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
336 (unsigned long)(n), sizeof(*(ptr))); \
337 __ret; \
338 })
339 #endif
341 #ifdef CONFIG_X86_CMPXCHG64
343 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
344 unsigned long long new)
345 {
346 unsigned long long prev;
347 __asm__ __volatile__(LOCK "cmpxchg8b %3"
348 : "=A"(prev)
349 : "b"((unsigned long)new),
350 "c"((unsigned long)(new >> 32)),
351 "m"(*__xg(ptr)),
352 "0"(old)
353 : "memory");
354 return prev;
355 }
357 #define cmpxchg64(ptr,o,n)\
358 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
359 (unsigned long long)(n)))
361 #endif
363 #ifdef __KERNEL__
364 struct alt_instr {
365 __u8 *instr; /* original instruction */
366 __u8 *replacement;
367 __u8 cpuid; /* cpuid bit set for replacement */
368 __u8 instrlen; /* length of original instruction */
369 __u8 replacementlen; /* length of new instruction, <= instrlen */
370 __u8 pad;
371 };
372 #endif
374 /*
375 * Alternative instructions for different CPU types or capabilities.
376 *
377 * This allows to use optimized instructions even on generic binary
378 * kernels.
379 *
380 * length of oldinstr must be longer or equal the length of newinstr
381 * It can be padded with nops as needed.
382 *
383 * For non barrier like inlines please define new variants
384 * without volatile and memory clobber.
385 */
386 #define alternative(oldinstr, newinstr, feature) \
387 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
388 ".section .altinstructions,\"a\"\n" \
389 " .align 4\n" \
390 " .long 661b\n" /* label */ \
391 " .long 663f\n" /* new instruction */ \
392 " .byte %c0\n" /* feature bit */ \
393 " .byte 662b-661b\n" /* sourcelen */ \
394 " .byte 664f-663f\n" /* replacementlen */ \
395 ".previous\n" \
396 ".section .altinstr_replacement,\"ax\"\n" \
397 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
398 ".previous" :: "i" (feature) : "memory")
400 /*
401 * Alternative inline assembly with input.
402 *
403 * Pecularities:
404 * No memory clobber here.
405 * Argument numbers start with 1.
406 * Best is to use constraints that are fixed size (like (%1) ... "r")
407 * If you use variable sized constraints like "m" or "g" in the
408 * replacement maake sure to pad to the worst case length.
409 */
410 #define alternative_input(oldinstr, newinstr, feature, input...) \
411 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
412 ".section .altinstructions,\"a\"\n" \
413 " .align 4\n" \
414 " .long 661b\n" /* label */ \
415 " .long 663f\n" /* new instruction */ \
416 " .byte %c0\n" /* feature bit */ \
417 " .byte 662b-661b\n" /* sourcelen */ \
418 " .byte 664f-663f\n" /* replacementlen */ \
419 ".previous\n" \
420 ".section .altinstr_replacement,\"ax\"\n" \
421 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
422 ".previous" :: "i" (feature), ##input)
424 /*
425 * Force strict CPU ordering.
426 * And yes, this is required on UP too when we're talking
427 * to devices.
428 *
429 * For now, "wmb()" doesn't actually do anything, as all
430 * Intel CPU's follow what Intel calls a *Processor Order*,
431 * in which all writes are seen in the program order even
432 * outside the CPU.
433 *
434 * I expect future Intel CPU's to have a weaker ordering,
435 * but I'd also expect them to finally get their act together
436 * and add some real memory barriers if so.
437 *
438 * Some non intel clones support out of order store. wmb() ceases to be a
439 * nop for these.
440 */
443 /*
444 * Actually only lfence would be needed for mb() because all stores done
445 * by the kernel should be already ordered. But keep a full barrier for now.
446 */
448 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
449 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
451 /**
452 * read_barrier_depends - Flush all pending reads that subsequents reads
453 * depend on.
454 *
455 * No data-dependent reads from memory-like regions are ever reordered
456 * over this barrier. All reads preceding this primitive are guaranteed
457 * to access memory (but not necessarily other CPUs' caches) before any
458 * reads following this primitive that depend on the data return by
459 * any of the preceding reads. This primitive is much lighter weight than
460 * rmb() on most CPUs, and is never heavier weight than is
461 * rmb().
462 *
463 * These ordering constraints are respected by both the local CPU
464 * and the compiler.
465 *
466 * Ordering is not guaranteed by anything other than these primitives,
467 * not even by data dependencies. See the documentation for
468 * memory_barrier() for examples and URLs to more information.
469 *
470 * For example, the following code would force ordering (the initial
471 * value of "a" is zero, "b" is one, and "p" is "&a"):
472 *
473 * <programlisting>
474 * CPU 0 CPU 1
475 *
476 * b = 2;
477 * memory_barrier();
478 * p = &b; q = p;
479 * read_barrier_depends();
480 * d = *q;
481 * </programlisting>
482 *
483 * because the read of "*q" depends on the read of "p" and these
484 * two reads are separated by a read_barrier_depends(). However,
485 * the following code, with the same initial values for "a" and "b":
486 *
487 * <programlisting>
488 * CPU 0 CPU 1
489 *
490 * a = 2;
491 * memory_barrier();
492 * b = 3; y = b;
493 * read_barrier_depends();
494 * x = a;
495 * </programlisting>
496 *
497 * does not enforce ordering, since there is no data dependency between
498 * the read of "a" and the read of "b". Therefore, on some CPUs, such
499 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
500 * in cases like thiswhere there are no data dependencies.
501 **/
503 #define read_barrier_depends() do { } while(0)
505 #ifdef CONFIG_X86_OOSTORE
506 /* Actually there are no OOO store capable CPUs for now that do SSE,
507 but make it already an possibility. */
508 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
509 #else
510 #define wmb() __asm__ __volatile__ ("": : :"memory")
511 #endif
513 #ifdef CONFIG_SMP
514 #define smp_wmb() wmb()
515 #if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
516 #define smp_alt_mb(instr) \
517 __asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
518 ".section __smp_alternatives,\"a\"\n" \
519 ".long 6667b\n" \
520 ".long 6673f\n" \
521 ".previous\n" \
522 ".section __smp_replacements,\"a\"\n" \
523 "6673:.byte 6668b-6667b\n" \
524 ".byte 6670f-6669f\n" \
525 ".byte 6671f-6670f\n" \
526 ".byte 0\n" \
527 ".byte %c0\n" \
528 "6669:lock;addl $0,0(%%esp)\n" \
529 "6670:" instr "\n" \
530 "6671:\n" \
531 ".previous\n" \
532 : \
533 : "i" (X86_FEATURE_XMM2) \
534 : "memory")
535 #define smp_rmb() smp_alt_mb("lfence")
536 #define smp_mb() smp_alt_mb("mfence")
537 #define set_mb(var, value) do { \
538 unsigned long __set_mb_temp; \
539 __asm__ __volatile__("6667:movl %1, %0\n6668:\n" \
540 ".section __smp_alternatives,\"a\"\n" \
541 ".long 6667b\n" \
542 ".long 6673f\n" \
543 ".previous\n" \
544 ".section __smp_replacements,\"a\"\n" \
545 "6673: .byte 6668b-6667b\n" \
546 ".byte 6670f-6669f\n" \
547 ".byte 0\n" \
548 ".byte 6671f-6670f\n" \
549 ".byte -1\n" \
550 "6669: xchg %1, %0\n" \
551 "6670:movl %1, %0\n" \
552 "6671:\n" \
553 ".previous\n" \
554 : "=m" (var), "=r" (__set_mb_temp) \
555 : "1" (value) \
556 : "memory"); } while (0)
557 #else
558 #define smp_rmb() rmb()
559 #define smp_mb() mb()
560 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
561 #endif
562 #define smp_read_barrier_depends() read_barrier_depends()
563 #else
564 #define smp_mb() barrier()
565 #define smp_rmb() barrier()
566 #define smp_wmb() barrier()
567 #define smp_read_barrier_depends() do { } while(0)
568 #define set_mb(var, value) do { var = value; barrier(); } while (0)
569 #endif
571 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
573 /* interrupt control.. */
575 /*
576 * The use of 'barrier' in the following reflects their use as local-lock
577 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
578 * critical operations are executed. All critical operations must complete
579 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
580 * includes these barriers, for example.
581 */
583 #define __cli() \
584 do { \
585 vcpu_info_t *_vcpu; \
586 preempt_disable(); \
587 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
588 _vcpu->evtchn_upcall_mask = 1; \
589 preempt_enable_no_resched(); \
590 barrier(); \
591 } while (0)
593 #define __sti() \
594 do { \
595 vcpu_info_t *_vcpu; \
596 barrier(); \
597 preempt_disable(); \
598 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
599 _vcpu->evtchn_upcall_mask = 0; \
600 barrier(); /* unmask then check (avoid races) */ \
601 if (unlikely(_vcpu->evtchn_upcall_pending)) \
602 force_evtchn_callback(); \
603 preempt_enable(); \
604 } while (0)
606 #define __save_flags(x) \
607 do { \
608 vcpu_info_t *_vcpu; \
609 preempt_disable(); \
610 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
611 (x) = _vcpu->evtchn_upcall_mask; \
612 preempt_enable(); \
613 } while (0)
615 #define __restore_flags(x) \
616 do { \
617 vcpu_info_t *_vcpu; \
618 barrier(); \
619 preempt_disable(); \
620 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
621 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
622 barrier(); /* unmask then check (avoid races) */ \
623 if (unlikely(_vcpu->evtchn_upcall_pending)) \
624 force_evtchn_callback(); \
625 preempt_enable(); \
626 } else \
627 preempt_enable_no_resched(); \
628 } while (0)
630 void safe_halt(void);
631 void halt(void);
633 #define __save_and_cli(x) \
634 do { \
635 vcpu_info_t *_vcpu; \
636 preempt_disable(); \
637 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
638 (x) = _vcpu->evtchn_upcall_mask; \
639 _vcpu->evtchn_upcall_mask = 1; \
640 preempt_enable_no_resched(); \
641 barrier(); \
642 } while (0)
644 #define local_irq_save(x) __save_and_cli(x)
645 #define local_irq_restore(x) __restore_flags(x)
646 #define local_save_flags(x) __save_flags(x)
647 #define local_irq_disable() __cli()
648 #define local_irq_enable() __sti()
650 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
651 #define irqs_disabled() \
652 ({ int ___x; \
653 vcpu_info_t *_vcpu; \
654 preempt_disable(); \
655 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
656 ___x = (_vcpu->evtchn_upcall_mask != 0); \
657 preempt_enable_no_resched(); \
658 ___x; })
660 /*
661 * disable hlt during certain critical i/o operations
662 */
663 #define HAVE_DISABLE_HLT
664 void disable_hlt(void);
665 void enable_hlt(void);
667 extern int es7000_plat;
668 void cpu_idle_wait(void);
670 /*
671 * On SMP systems, when the scheduler does migration-cost autodetection,
672 * it needs a way to flush as much of the CPU's caches as possible:
673 */
674 static inline void sched_cacheflush(void)
675 {
676 wbinvd();
677 }
679 extern unsigned long arch_align_stack(unsigned long sp);
681 #endif