ia64/linux-2.6.18-xen.hg

view include/asm-sh64/atomic.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ASM_SH64_ATOMIC_H
2 #define __ASM_SH64_ATOMIC_H
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/atomic.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
16 /*
17 * Atomic operations that C can't guarantee us. Useful for
18 * resource counting etc..
19 *
20 */
22 typedef struct { volatile int counter; } atomic_t;
24 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
26 #define atomic_read(v) ((v)->counter)
27 #define atomic_set(v,i) ((v)->counter = (i))
29 #include <asm/system.h>
31 /*
32 * To get proper branch prediction for the main line, we must branch
33 * forward to code at the end of this object's .text section, then
34 * branch back to restart the operation.
35 */
37 static __inline__ void atomic_add(int i, atomic_t * v)
38 {
39 unsigned long flags;
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44 }
46 static __inline__ void atomic_sub(int i, atomic_t *v)
47 {
48 unsigned long flags;
50 local_irq_save(flags);
51 *(long *)v -= i;
52 local_irq_restore(flags);
53 }
55 static __inline__ int atomic_add_return(int i, atomic_t * v)
56 {
57 unsigned long temp, flags;
59 local_irq_save(flags);
60 temp = *(long *)v;
61 temp += i;
62 *(long *)v = temp;
63 local_irq_restore(flags);
65 return temp;
66 }
68 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
70 static __inline__ int atomic_sub_return(int i, atomic_t * v)
71 {
72 unsigned long temp, flags;
74 local_irq_save(flags);
75 temp = *(long *)v;
76 temp -= i;
77 *(long *)v = temp;
78 local_irq_restore(flags);
80 return temp;
81 }
83 #define atomic_dec_return(v) atomic_sub_return(1,(v))
84 #define atomic_inc_return(v) atomic_add_return(1,(v))
86 /*
87 * atomic_inc_and_test - increment and test
88 * @v: pointer of type atomic_t
89 *
90 * Atomically increments @v by 1
91 * and returns true if the result is zero, or false for all
92 * other cases.
93 */
94 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
96 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
97 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
99 #define atomic_inc(v) atomic_add(1,(v))
100 #define atomic_dec(v) atomic_sub(1,(v))
102 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103 {
104 int ret;
105 unsigned long flags;
107 local_irq_save(flags);
108 ret = v->counter;
109 if (likely(ret == old))
110 v->counter = new;
111 local_irq_restore(flags);
113 return ret;
114 }
116 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
118 static inline int atomic_add_unless(atomic_t *v, int a, int u)
119 {
120 int ret;
121 unsigned long flags;
123 local_irq_save(flags);
124 ret = v->counter;
125 if (ret != u)
126 v->counter += a;
127 local_irq_restore(flags);
129 return ret != u;
130 }
131 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
133 static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
134 {
135 unsigned long flags;
137 local_irq_save(flags);
138 *(long *)v &= ~mask;
139 local_irq_restore(flags);
140 }
142 static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
143 {
144 unsigned long flags;
146 local_irq_save(flags);
147 *(long *)v |= mask;
148 local_irq_restore(flags);
149 }
151 /* Atomic operations are already serializing on SH */
152 #define smp_mb__before_atomic_dec() barrier()
153 #define smp_mb__after_atomic_dec() barrier()
154 #define smp_mb__before_atomic_inc() barrier()
155 #define smp_mb__after_atomic_inc() barrier()
157 #include <asm-generic/atomic.h>
158 #endif /* __ASM_SH64_ATOMIC_H */