ia64/linux-2.6.18-xen.hg

view include/asm-m68k/atomic.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
5 #include <asm/system.h> /* local_irq_XXX() */
7 /*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
12 /*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
16 typedef struct { int counter; } atomic_t;
17 #define ATOMIC_INIT(i) { (i) }
19 #define atomic_read(v) ((v)->counter)
20 #define atomic_set(v, i) (((v)->counter) = i)
22 static inline void atomic_add(int i, atomic_t *v)
23 {
24 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
25 }
27 static inline void atomic_sub(int i, atomic_t *v)
28 {
29 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
30 }
32 static inline void atomic_inc(atomic_t *v)
33 {
34 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
35 }
37 static inline void atomic_dec(atomic_t *v)
38 {
39 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
40 }
42 static inline int atomic_dec_and_test(atomic_t *v)
43 {
44 char c;
45 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
46 return c != 0;
47 }
49 static inline int atomic_inc_and_test(atomic_t *v)
50 {
51 char c;
52 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
53 return c != 0;
54 }
56 #ifdef CONFIG_RMW_INSNS
58 static inline int atomic_add_return(int i, atomic_t *v)
59 {
60 int t, tmp;
62 __asm__ __volatile__(
63 "1: movel %2,%1\n"
64 " addl %3,%1\n"
65 " casl %2,%1,%0\n"
66 " jne 1b"
67 : "+m" (*v), "=&d" (t), "=&d" (tmp)
68 : "g" (i), "2" (atomic_read(v)));
69 return t;
70 }
72 static inline int atomic_sub_return(int i, atomic_t *v)
73 {
74 int t, tmp;
76 __asm__ __volatile__(
77 "1: movel %2,%1\n"
78 " subl %3,%1\n"
79 " casl %2,%1,%0\n"
80 " jne 1b"
81 : "+m" (*v), "=&d" (t), "=&d" (tmp)
82 : "g" (i), "2" (atomic_read(v)));
83 return t;
84 }
86 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
87 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
89 #else /* !CONFIG_RMW_INSNS */
91 static inline int atomic_add_return(int i, atomic_t * v)
92 {
93 unsigned long flags;
94 int t;
96 local_irq_save(flags);
97 t = atomic_read(v);
98 t += i;
99 atomic_set(v, t);
100 local_irq_restore(flags);
102 return t;
103 }
105 static inline int atomic_sub_return(int i, atomic_t * v)
106 {
107 unsigned long flags;
108 int t;
110 local_irq_save(flags);
111 t = atomic_read(v);
112 t -= i;
113 atomic_set(v, t);
114 local_irq_restore(flags);
116 return t;
117 }
119 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
120 {
121 unsigned long flags;
122 int prev;
124 local_irq_save(flags);
125 prev = atomic_read(v);
126 if (prev == old)
127 atomic_set(v, new);
128 local_irq_restore(flags);
129 return prev;
130 }
132 static inline int atomic_xchg(atomic_t *v, int new)
133 {
134 unsigned long flags;
135 int prev;
137 local_irq_save(flags);
138 prev = atomic_read(v);
139 atomic_set(v, new);
140 local_irq_restore(flags);
141 return prev;
142 }
144 #endif /* !CONFIG_RMW_INSNS */
146 #define atomic_dec_return(v) atomic_sub_return(1, (v))
147 #define atomic_inc_return(v) atomic_add_return(1, (v))
149 static inline int atomic_sub_and_test(int i, atomic_t *v)
150 {
151 char c;
152 __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
153 return c != 0;
154 }
156 static inline int atomic_add_negative(int i, atomic_t *v)
157 {
158 char c;
159 __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
160 return c != 0;
161 }
163 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
164 {
165 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
166 }
168 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
169 {
170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
171 }
173 #define atomic_add_unless(v, a, u) \
174 ({ \
175 int c, old; \
176 c = atomic_read(v); \
177 for (;;) { \
178 if (unlikely(c == (u))) \
179 break; \
180 old = atomic_cmpxchg((v), c, c + (a)); \
181 if (likely(old == c)) \
182 break; \
183 c = old; \
184 } \
185 c != (u); \
186 })
187 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
189 /* Atomic operations are already serializing */
190 #define smp_mb__before_atomic_dec() barrier()
191 #define smp_mb__after_atomic_dec() barrier()
192 #define smp_mb__before_atomic_inc() barrier()
193 #define smp_mb__after_atomic_inc() barrier()
195 #include <asm-generic/atomic.h>
196 #endif /* __ARCH_M68K_ATOMIC __ */