ia64/linux-2.6.18-xen.hg

view include/asm-m68knommu/semaphore.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef _M68K_SEMAPHORE_H
2 #define _M68K_SEMAPHORE_H
4 #define RW_LOCK_BIAS 0x01000000
6 #ifndef __ASSEMBLY__
8 #include <linux/linkage.h>
9 #include <linux/wait.h>
10 #include <linux/spinlock.h>
11 #include <linux/rwsem.h>
13 #include <asm/system.h>
14 #include <asm/atomic.h>
16 /*
17 * Interrupt-safe semaphores..
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 *
21 * m68k version by Andreas Schwab
22 */
25 struct semaphore {
26 atomic_t count;
27 atomic_t waking;
28 wait_queue_head_t wait;
29 };
31 #define __SEMAPHORE_INITIALIZER(name, n) \
32 { \
33 .count = ATOMIC_INIT(n), \
34 .waking = ATOMIC_INIT(0), \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36 }
38 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
41 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
44 static inline void sema_init (struct semaphore *sem, int val)
45 {
46 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
47 }
49 static inline void init_MUTEX (struct semaphore *sem)
50 {
51 sema_init(sem, 1);
52 }
54 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
55 {
56 sema_init(sem, 0);
57 }
59 asmlinkage void __down_failed(void /* special register calling convention */);
60 asmlinkage int __down_failed_interruptible(void /* params in registers */);
61 asmlinkage int __down_failed_trylock(void /* params in registers */);
62 asmlinkage void __up_wakeup(void /* special register calling convention */);
64 asmlinkage void __down(struct semaphore * sem);
65 asmlinkage int __down_interruptible(struct semaphore * sem);
66 asmlinkage int __down_trylock(struct semaphore * sem);
67 asmlinkage void __up(struct semaphore * sem);
69 extern spinlock_t semaphore_wake_lock;
71 /*
72 * This is ugly, but we want the default case to fall through.
73 * "down_failed" is a special asm handler that calls the C
74 * routine that actually waits. See arch/m68k/lib/semaphore.S
75 */
76 static inline void down(struct semaphore * sem)
77 {
78 might_sleep();
79 __asm__ __volatile__(
80 "| atomic down operation\n\t"
81 "movel %0, %%a1\n\t"
82 "lea %%pc@(1f), %%a0\n\t"
83 "subql #1, %%a1@\n\t"
84 "jmi __down_failed\n"
85 "1:"
86 : /* no outputs */
87 : "g" (sem)
88 : "cc", "%a0", "%a1", "memory");
89 }
91 static inline int down_interruptible(struct semaphore * sem)
92 {
93 int ret;
95 might_sleep();
96 __asm__ __volatile__(
97 "| atomic down operation\n\t"
98 "movel %1, %%a1\n\t"
99 "lea %%pc@(1f), %%a0\n\t"
100 "subql #1, %%a1@\n\t"
101 "jmi __down_failed_interruptible\n\t"
102 "clrl %%d0\n"
103 "1: movel %%d0, %0\n"
104 : "=d" (ret)
105 : "g" (sem)
106 : "cc", "%d0", "%a0", "%a1", "memory");
107 return(ret);
108 }
110 static inline int down_trylock(struct semaphore * sem)
111 {
112 register struct semaphore *sem1 __asm__ ("%a1") = sem;
113 register int result __asm__ ("%d0");
115 __asm__ __volatile__(
116 "| atomic down trylock operation\n\t"
117 "subql #1,%1@\n\t"
118 "jmi 2f\n\t"
119 "clrl %0\n"
120 "1:\n"
121 ".section .text.lock,\"ax\"\n"
122 ".even\n"
123 "2:\tpea 1b\n\t"
124 "jbra __down_failed_trylock\n"
125 ".previous"
126 : "=d" (result)
127 : "a" (sem1)
128 : "memory");
129 return result;
130 }
132 /*
133 * Note! This is subtle. We jump to wake people up only if
134 * the semaphore was negative (== somebody was waiting on it).
135 * The default case (no contention) will result in NO
136 * jumps for both down() and up().
137 */
138 static inline void up(struct semaphore * sem)
139 {
140 __asm__ __volatile__(
141 "| atomic up operation\n\t"
142 "movel %0, %%a1\n\t"
143 "lea %%pc@(1f), %%a0\n\t"
144 "addql #1, %%a1@\n\t"
145 "jle __up_wakeup\n"
146 "1:"
147 : /* no outputs */
148 : "g" (sem)
149 : "cc", "%a0", "%a1", "memory");
150 }
152 #endif /* __ASSEMBLY__ */
154 #endif