ia64/linux-2.6.18-xen.hg

view include/asm-i386/spinlock.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <linux/compiler.h>
9 /*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 *
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
14 *
15 * We make no fairness assumptions. They have a cost.
16 *
17 * (the type definitions are in asm/spinlock_types.h)
18 */
20 #define __raw_spin_is_locked(x) \
21 (*(volatile signed char *)(&(x)->slock) <= 0)
23 #define __raw_spin_lock_string \
24 "\n1:\t" \
25 LOCK_PREFIX " ; decb %0\n\t" \
26 "jns 3f\n" \
27 "2:\t" \
28 "rep;nop\n\t" \
29 "cmpb $0,%0\n\t" \
30 "jle 2b\n\t" \
31 "jmp 1b\n" \
32 "3:\n\t"
34 /*
35 * NOTE: there's an irqs-on section here, which normally would have to be
36 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
37 * __raw_spin_lock_string_flags().
38 */
39 #define __raw_spin_lock_string_flags \
40 "\n1:\t" \
41 LOCK_PREFIX " ; decb %0\n\t" \
42 "jns 5f\n" \
43 "2:\t" \
44 "testl $0x200, %1\n\t" \
45 "jz 4f\n\t" \
46 "sti\n" \
47 "3:\t" \
48 "rep;nop\n\t" \
49 "cmpb $0, %0\n\t" \
50 "jle 3b\n\t" \
51 "cli\n\t" \
52 "jmp 1b\n" \
53 "4:\t" \
54 "rep;nop\n\t" \
55 "cmpb $0, %0\n\t" \
56 "jg 1b\n\t" \
57 "jmp 4b\n" \
58 "5:\n\t"
60 static inline void __raw_spin_lock(raw_spinlock_t *lock)
61 {
62 asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
63 }
65 /*
66 * It is easier for the lock validator if interrupts are not re-enabled
67 * in the middle of a lock-acquire. This is a performance feature anyway
68 * so we turn it off:
69 */
70 #ifndef CONFIG_PROVE_LOCKING
71 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
72 {
73 asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
74 }
75 #endif
77 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
78 {
79 char oldval;
80 __asm__ __volatile__(
81 "xchgb %b0,%1"
82 :"=q" (oldval), "+m" (lock->slock)
83 :"0" (0) : "memory");
84 return oldval > 0;
85 }
87 /*
88 * __raw_spin_unlock based on writing $1 to the low byte.
89 * This method works. Despite all the confusion.
90 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
91 * (PPro errata 66, 92)
92 */
94 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
96 #define __raw_spin_unlock_string \
97 "movb $1,%0" \
98 :"+m" (lock->slock) : : "memory"
101 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
102 {
103 __asm__ __volatile__(
104 __raw_spin_unlock_string
105 );
106 }
108 #else
110 #define __raw_spin_unlock_string \
111 "xchgb %b0, %1" \
112 :"=q" (oldval), "+m" (lock->slock) \
113 :"0" (oldval) : "memory"
115 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
116 {
117 char oldval = 1;
119 __asm__ __volatile__(
120 __raw_spin_unlock_string
121 );
122 }
124 #endif
126 #define __raw_spin_unlock_wait(lock) \
127 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
129 /*
130 * Read-write spinlocks, allowing multiple readers
131 * but only one writer.
132 *
133 * NOTE! it is quite common to have readers in interrupts
134 * but no interrupt writers. For those circumstances we
135 * can "mix" irq-safe locks - any writer needs to get a
136 * irq-safe write-lock, but readers can get non-irqsafe
137 * read-locks.
138 *
139 * On x86, we implement read-write locks as a 32-bit counter
140 * with the high bit (sign) being the "contended" bit.
141 *
142 * The inline assembly is non-obvious. Think about it.
143 *
144 * Changed to use the same technique as rw semaphores. See
145 * semaphore.h for details. -ben
146 *
147 * the helpers are in arch/i386/kernel/semaphore.c
148 */
150 /**
151 * read_can_lock - would read_trylock() succeed?
152 * @lock: the rwlock in question.
153 */
154 #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
156 /**
157 * write_can_lock - would write_trylock() succeed?
158 * @lock: the rwlock in question.
159 */
160 #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
162 static inline void __raw_read_lock(raw_rwlock_t *rw)
163 {
164 __build_read_lock(rw, "__read_lock_failed");
165 }
167 static inline void __raw_write_lock(raw_rwlock_t *rw)
168 {
169 __build_write_lock(rw, "__write_lock_failed");
170 }
172 static inline int __raw_read_trylock(raw_rwlock_t *lock)
173 {
174 atomic_t *count = (atomic_t *)lock;
175 atomic_dec(count);
176 if (atomic_read(count) >= 0)
177 return 1;
178 atomic_inc(count);
179 return 0;
180 }
182 static inline int __raw_write_trylock(raw_rwlock_t *lock)
183 {
184 atomic_t *count = (atomic_t *)lock;
185 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
186 return 1;
187 atomic_add(RW_LOCK_BIAS, count);
188 return 0;
189 }
191 static inline void __raw_read_unlock(raw_rwlock_t *rw)
192 {
193 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
194 }
196 static inline void __raw_write_unlock(raw_rwlock_t *rw)
197 {
198 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
199 : "+m" (rw->lock) : : "memory");
200 }
202 #endif /* __ASM_SPINLOCK_H */