ia64/linux-2.6.18-xen.hg

view include/asm-v850/atomic.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * include/asm-v850/atomic.h -- Atomic operations
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
14 #ifndef __V850_ATOMIC_H__
15 #define __V850_ATOMIC_H__
18 #include <asm/system.h>
20 #ifdef CONFIG_SMP
21 #error SMP not supported
22 #endif
24 typedef struct { int counter; } atomic_t;
26 #define ATOMIC_INIT(i) { (i) }
28 #ifdef __KERNEL__
30 #define atomic_read(v) ((v)->counter)
31 #define atomic_set(v,i) (((v)->counter) = (i))
33 static inline int atomic_add_return (int i, volatile atomic_t *v)
34 {
35 unsigned long flags;
36 int res;
38 local_irq_save (flags);
39 res = v->counter + i;
40 v->counter = res;
41 local_irq_restore (flags);
43 return res;
44 }
46 static __inline__ int atomic_sub_return (int i, volatile atomic_t *v)
47 {
48 unsigned long flags;
49 int res;
51 local_irq_save (flags);
52 res = v->counter - i;
53 v->counter = res;
54 local_irq_restore (flags);
56 return res;
57 }
59 static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *addr)
60 {
61 unsigned long flags;
63 local_irq_save (flags);
64 *addr &= ~mask;
65 local_irq_restore (flags);
66 }
68 #endif
70 #define atomic_add(i, v) atomic_add_return ((i), (v))
71 #define atomic_sub(i, v) atomic_sub_return ((i), (v))
73 #define atomic_dec_return(v) atomic_sub_return (1, (v))
74 #define atomic_inc_return(v) atomic_add_return (1, (v))
75 #define atomic_inc(v) atomic_inc_return (v)
76 #define atomic_dec(v) atomic_dec_return (v)
78 /*
79 * atomic_inc_and_test - increment and test
80 * @v: pointer of type atomic_t
81 *
82 * Atomically increments @v by 1
83 * and returns true if the result is zero, or false for all
84 * other cases.
85 */
86 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
88 #define atomic_sub_and_test(i,v) (atomic_sub_return ((i), (v)) == 0)
89 #define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0)
90 #define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0)
92 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
93 {
94 int ret;
95 unsigned long flags;
97 local_irq_save(flags);
98 ret = v->counter;
99 if (likely(ret == old))
100 v->counter = new;
101 local_irq_restore(flags);
103 return ret;
104 }
106 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
108 static inline int atomic_add_unless(atomic_t *v, int a, int u)
109 {
110 int ret;
111 unsigned long flags;
113 local_irq_save(flags);
114 ret = v->counter;
115 if (ret != u)
116 v->counter += a;
117 local_irq_restore(flags);
119 return ret != u;
120 }
122 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
124 /* Atomic operations are already serializing on ARM */
125 #define smp_mb__before_atomic_dec() barrier()
126 #define smp_mb__after_atomic_dec() barrier()
127 #define smp_mb__before_atomic_inc() barrier()
128 #define smp_mb__after_atomic_inc() barrier()
130 #include <asm-generic/atomic.h>
131 #endif /* __V850_ATOMIC_H__ */