ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/synch_bitops.h @ 13341:3040ba0f2d3d

When booting via xm, only run the bootloader if it's in non-interactive mode:
otherwise we lose the user's named kernel and try to bootload the temporary
file pygrub returned.

Signed-off-by: John Levon <john.levon@sun.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jan 09 13:24:45 2007 +0000 (2007-01-09)
parents 206e20b7b54e
children e47738923a05
line source
1 #ifndef __XEN_SYNCH_BITOPS_H__
2 #define __XEN_SYNCH_BITOPS_H__
4 /*
5 * Copyright 1992, Linus Torvalds.
6 * Heavily modified to provide guaranteed strong synchronisation
7 * when communicating with Xen or other guest OSes running on other CPUs.
8 */
10 #include <linux/config.h>
12 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
13 #include <xen/platform-compat.h>
14 #endif
16 #define ADDR (*(volatile long *) addr)
18 static __inline__ void synch_set_bit(int nr, volatile void * addr)
19 {
20 __asm__ __volatile__ (
21 "lock btsl %1,%0"
22 : "+m" (ADDR) : "Ir" (nr) : "memory" );
23 }
25 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
26 {
27 __asm__ __volatile__ (
28 "lock btrl %1,%0"
29 : "+m" (ADDR) : "Ir" (nr) : "memory" );
30 }
32 static __inline__ void synch_change_bit(int nr, volatile void * addr)
33 {
34 __asm__ __volatile__ (
35 "lock btcl %1,%0"
36 : "+m" (ADDR) : "Ir" (nr) : "memory" );
37 }
39 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
40 {
41 int oldbit;
42 __asm__ __volatile__ (
43 "lock btsl %2,%1\n\tsbbl %0,%0"
44 : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
45 return oldbit;
46 }
48 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
49 {
50 int oldbit;
51 __asm__ __volatile__ (
52 "lock btrl %2,%1\n\tsbbl %0,%0"
53 : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
54 return oldbit;
55 }
57 static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
58 {
59 int oldbit;
61 __asm__ __volatile__ (
62 "lock btcl %2,%1\n\tsbbl %0,%0"
63 : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
64 return oldbit;
65 }
67 struct __synch_xchg_dummy { unsigned long a[100]; };
68 #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
70 #define synch_cmpxchg(ptr, old, new) \
71 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
72 (unsigned long)(old), \
73 (unsigned long)(new), \
74 sizeof(*(ptr))))
76 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
77 unsigned long old,
78 unsigned long new, int size)
79 {
80 unsigned long prev;
81 switch (size) {
82 case 1:
83 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
84 : "=a"(prev)
85 : "q"(new), "m"(*__synch_xg(ptr)),
86 "0"(old)
87 : "memory");
88 return prev;
89 case 2:
90 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
91 : "=a"(prev)
92 : "r"(new), "m"(*__synch_xg(ptr)),
93 "0"(old)
94 : "memory");
95 return prev;
96 #ifdef CONFIG_X86_64
97 case 4:
98 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
99 : "=a"(prev)
100 : "r"(new), "m"(*__synch_xg(ptr)),
101 "0"(old)
102 : "memory");
103 return prev;
104 case 8:
105 __asm__ __volatile__("lock; cmpxchgq %1,%2"
106 : "=a"(prev)
107 : "r"(new), "m"(*__synch_xg(ptr)),
108 "0"(old)
109 : "memory");
110 return prev;
111 #else
112 case 4:
113 __asm__ __volatile__("lock; cmpxchgl %1,%2"
114 : "=a"(prev)
115 : "r"(new), "m"(*__synch_xg(ptr)),
116 "0"(old)
117 : "memory");
118 return prev;
119 #endif
120 }
121 return old;
122 }
124 static __always_inline int synch_const_test_bit(int nr,
125 const volatile void * addr)
126 {
127 return ((1UL << (nr & 31)) &
128 (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
129 }
131 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
132 {
133 int oldbit;
134 __asm__ __volatile__ (
135 "btl %2,%1\n\tsbbl %0,%0"
136 : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
137 return oldbit;
138 }
140 #define synch_test_bit(nr,addr) \
141 (__builtin_constant_p(nr) ? \
142 synch_const_test_bit((nr),(addr)) : \
143 synch_var_test_bit((nr),(addr)))
145 #define synch_cmpxchg_subword synch_cmpxchg
147 #endif /* __XEN_SYNCH_BITOPS_H__ */