ia64/linux-2.6.18-xen.hg

view include/asm-i386/semaphore.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
6 #ifdef __KERNEL__
8 /*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
39 #include <asm/system.h>
40 #include <asm/atomic.h>
41 #include <linux/wait.h>
42 #include <linux/rwsem.h>
44 struct semaphore {
45 atomic_t count;
46 int sleepers;
47 wait_queue_head_t wait;
48 };
51 #define __SEMAPHORE_INITIALIZER(name, n) \
52 { \
53 .count = ATOMIC_INIT(n), \
54 .sleepers = 0, \
55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
56 }
58 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
59 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
61 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
62 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
64 static inline void sema_init (struct semaphore *sem, int val)
65 {
66 /*
67 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
68 *
69 * i'd rather use the more flexible initialization above, but sadly
70 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
71 */
72 atomic_set(&sem->count, val);
73 sem->sleepers = 0;
74 init_waitqueue_head(&sem->wait);
75 }
77 static inline void init_MUTEX (struct semaphore *sem)
78 {
79 sema_init(sem, 1);
80 }
82 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
83 {
84 sema_init(sem, 0);
85 }
87 fastcall void __down_failed(void /* special register calling convention */);
88 fastcall int __down_failed_interruptible(void /* params in registers */);
89 fastcall int __down_failed_trylock(void /* params in registers */);
90 fastcall void __up_wakeup(void /* special register calling convention */);
92 /*
93 * This is ugly, but we want the default case to fall through.
94 * "__down_failed" is a special asm handler that calls the C
95 * routine that actually waits. See arch/i386/kernel/semaphore.c
96 */
97 static inline void down(struct semaphore * sem)
98 {
99 might_sleep();
100 __asm__ __volatile__(
101 "# atomic down operation\n\t"
102 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
103 "js 2f\n"
104 "1:\n"
105 LOCK_SECTION_START("")
106 "2:\tlea %0,%%eax\n\t"
107 "call __down_failed\n\t"
108 "jmp 1b\n"
109 LOCK_SECTION_END
110 :"+m" (sem->count)
111 :
112 :"memory","ax");
113 }
115 /*
116 * Interruptible try to acquire a semaphore. If we obtained
117 * it, return zero. If we were interrupted, returns -EINTR
118 */
119 static inline int down_interruptible(struct semaphore * sem)
120 {
121 int result;
123 might_sleep();
124 __asm__ __volatile__(
125 "# atomic interruptible down operation\n\t"
126 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
127 "js 2f\n\t"
128 "xorl %0,%0\n"
129 "1:\n"
130 LOCK_SECTION_START("")
131 "2:\tlea %1,%%eax\n\t"
132 "call __down_failed_interruptible\n\t"
133 "jmp 1b\n"
134 LOCK_SECTION_END
135 :"=a" (result), "+m" (sem->count)
136 :
137 :"memory");
138 return result;
139 }
141 /*
142 * Non-blockingly attempt to down() a semaphore.
143 * Returns zero if we acquired it
144 */
145 static inline int down_trylock(struct semaphore * sem)
146 {
147 int result;
149 __asm__ __volatile__(
150 "# atomic interruptible down operation\n\t"
151 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
152 "js 2f\n\t"
153 "xorl %0,%0\n"
154 "1:\n"
155 LOCK_SECTION_START("")
156 "2:\tlea %1,%%eax\n\t"
157 "call __down_failed_trylock\n\t"
158 "jmp 1b\n"
159 LOCK_SECTION_END
160 :"=a" (result), "+m" (sem->count)
161 :
162 :"memory");
163 return result;
164 }
166 /*
167 * Note! This is subtle. We jump to wake people up only if
168 * the semaphore was negative (== somebody was waiting on it).
169 * The default case (no contention) will result in NO
170 * jumps for both down() and up().
171 */
172 static inline void up(struct semaphore * sem)
173 {
174 __asm__ __volatile__(
175 "# atomic up operation\n\t"
176 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
177 "jle 2f\n"
178 "1:\n"
179 LOCK_SECTION_START("")
180 "2:\tlea %0,%%eax\n\t"
181 "call __up_wakeup\n\t"
182 "jmp 1b\n"
183 LOCK_SECTION_END
184 ".subsection 0\n"
185 :"+m" (sem->count)
186 :
187 :"memory","ax");
188 }
190 #endif
191 #endif