ia64/linux-2.6.18-xen.hg

view include/asm-h8300/atomic.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ARCH_H8300_ATOMIC__
2 #define __ARCH_H8300_ATOMIC__
4 /*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 */
9 typedef struct { int counter; } atomic_t;
10 #define ATOMIC_INIT(i) { (i) }
12 #define atomic_read(v) ((v)->counter)
13 #define atomic_set(v, i) (((v)->counter) = i)
15 #include <asm/system.h>
16 #include <linux/kernel.h>
18 static __inline__ int atomic_add_return(int i, atomic_t *v)
19 {
20 int ret,flags;
21 local_irq_save(flags);
22 ret = v->counter += i;
23 local_irq_restore(flags);
24 return ret;
25 }
27 #define atomic_add(i, v) atomic_add_return(i, v)
28 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
30 static __inline__ int atomic_sub_return(int i, atomic_t *v)
31 {
32 int ret,flags;
33 local_irq_save(flags);
34 ret = v->counter -= i;
35 local_irq_restore(flags);
36 return ret;
37 }
39 #define atomic_sub(i, v) atomic_sub_return(i, v)
41 static __inline__ int atomic_inc_return(atomic_t *v)
42 {
43 int ret,flags;
44 local_irq_save(flags);
45 v->counter++;
46 ret = v->counter;
47 local_irq_restore(flags);
48 return ret;
49 }
51 #define atomic_inc(v) atomic_inc_return(v)
53 /*
54 * atomic_inc_and_test - increment and test
55 * @v: pointer of type atomic_t
56 *
57 * Atomically increments @v by 1
58 * and returns true if the result is zero, or false for all
59 * other cases.
60 */
61 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
63 static __inline__ int atomic_dec_return(atomic_t *v)
64 {
65 int ret,flags;
66 local_irq_save(flags);
67 --v->counter;
68 ret = v->counter;
69 local_irq_restore(flags);
70 return ret;
71 }
73 #define atomic_dec(v) atomic_dec_return(v)
75 static __inline__ int atomic_dec_and_test(atomic_t *v)
76 {
77 int ret,flags;
78 local_irq_save(flags);
79 --v->counter;
80 ret = v->counter;
81 local_irq_restore(flags);
82 return ret == 0;
83 }
85 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
86 {
87 int ret;
88 unsigned long flags;
90 local_irq_save(flags);
91 ret = v->counter;
92 if (likely(ret == old))
93 v->counter = new;
94 local_irq_restore(flags);
95 return ret;
96 }
98 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
100 static inline int atomic_add_unless(atomic_t *v, int a, int u)
101 {
102 int ret;
103 unsigned long flags;
105 local_irq_save(flags);
106 ret = v->counter;
107 if (ret != u)
108 v->counter += a;
109 local_irq_restore(flags);
110 return ret != u;
111 }
112 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
114 static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
115 {
116 __asm__ __volatile__("stc ccr,r1l\n\t"
117 "orc #0x80,ccr\n\t"
118 "mov.l %0,er0\n\t"
119 "and.l %1,er0\n\t"
120 "mov.l er0,%0\n\t"
121 "ldc r1l,ccr"
122 : "=m" (*v) : "g" (~(mask)) :"er0","er1");
123 }
125 static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
126 {
127 __asm__ __volatile__("stc ccr,r1l\n\t"
128 "orc #0x80,ccr\n\t"
129 "mov.l %0,er0\n\t"
130 "or.l %1,er0\n\t"
131 "mov.l er0,%0\n\t"
132 "ldc r1l,ccr"
133 : "=m" (*v) : "g" (mask) :"er0","er1");
134 }
136 /* Atomic operations are already serializing */
137 #define smp_mb__before_atomic_dec() barrier()
138 #define smp_mb__after_atomic_dec() barrier()
139 #define smp_mb__before_atomic_inc() barrier()
140 #define smp_mb__after_atomic_inc() barrier()
142 #include <asm-generic/atomic.h>
143 #endif /* __ARCH_H8300_ATOMIC __ */