ia64/linux-2.6.18-xen.hg

view include/asm-m32r/semaphore.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef _ASM_M32R_SEMAPHORE_H
2 #define _ASM_M32R_SEMAPHORE_H
4 #include <linux/linkage.h>
6 #ifdef __KERNEL__
8 /*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * Copyright (C) 1996 Linus Torvalds
12 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 */
15 #include <linux/wait.h>
16 #include <linux/rwsem.h>
17 #include <asm/assembler.h>
18 #include <asm/system.h>
19 #include <asm/atomic.h>
21 struct semaphore {
22 atomic_t count;
23 int sleepers;
24 wait_queue_head_t wait;
25 };
27 #define __SEMAPHORE_INITIALIZER(name, n) \
28 { \
29 .count = ATOMIC_INIT(n), \
30 .sleepers = 0, \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32 }
34 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
40 static inline void sema_init (struct semaphore *sem, int val)
41 {
42 /*
43 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
44 *
45 * i'd rather use the more flexible initialization above, but sadly
46 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
47 */
48 atomic_set(&sem->count, val);
49 sem->sleepers = 0;
50 init_waitqueue_head(&sem->wait);
51 }
53 static inline void init_MUTEX (struct semaphore *sem)
54 {
55 sema_init(sem, 1);
56 }
58 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
59 {
60 sema_init(sem, 0);
61 }
63 asmlinkage void __down_failed(void /* special register calling convention */);
64 asmlinkage int __down_failed_interruptible(void /* params in registers */);
65 asmlinkage int __down_failed_trylock(void /* params in registers */);
66 asmlinkage void __up_wakeup(void /* special register calling convention */);
68 asmlinkage void __down(struct semaphore * sem);
69 asmlinkage int __down_interruptible(struct semaphore * sem);
70 asmlinkage int __down_trylock(struct semaphore * sem);
71 asmlinkage void __up(struct semaphore * sem);
73 /*
74 * Atomically decrement the semaphore's count. If it goes negative,
75 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
76 */
77 static inline void down(struct semaphore * sem)
78 {
79 might_sleep();
80 if (unlikely(atomic_dec_return(&sem->count) < 0))
81 __down(sem);
82 }
84 /*
85 * Interruptible try to acquire a semaphore. If we obtained
86 * it, return zero. If we were interrupted, returns -EINTR
87 */
88 static inline int down_interruptible(struct semaphore * sem)
89 {
90 int result = 0;
92 might_sleep();
93 if (unlikely(atomic_dec_return(&sem->count) < 0))
94 result = __down_interruptible(sem);
96 return result;
97 }
99 /*
100 * Non-blockingly attempt to down() a semaphore.
101 * Returns zero if we acquired it
102 */
103 static inline int down_trylock(struct semaphore * sem)
104 {
105 unsigned long flags;
106 long count;
107 int result = 0;
109 local_irq_save(flags);
110 __asm__ __volatile__ (
111 "# down_trylock \n\t"
112 DCACHE_CLEAR("%0", "r4", "%1")
113 M32R_LOCK" %0, @%1; \n\t"
114 "addi %0, #-1; \n\t"
115 M32R_UNLOCK" %0, @%1; \n\t"
116 : "=&r" (count)
117 : "r" (&sem->count)
118 : "memory"
119 #ifdef CONFIG_CHIP_M32700_TS1
120 , "r4"
121 #endif /* CONFIG_CHIP_M32700_TS1 */
122 );
123 local_irq_restore(flags);
125 if (unlikely(count < 0))
126 result = __down_trylock(sem);
128 return result;
129 }
131 /*
132 * Note! This is subtle. We jump to wake people up only if
133 * the semaphore was negative (== somebody was waiting on it).
134 * The default case (no contention) will result in NO
135 * jumps for both down() and up().
136 */
137 static inline void up(struct semaphore * sem)
138 {
139 if (unlikely(atomic_inc_return(&sem->count) <= 0))
140 __up(sem);
141 }
143 #endif /* __KERNEL__ */
145 #endif /* _ASM_M32R_SEMAPHORE_H */