ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/spinlock.h @ 19659:411ecf6d1f19

[IA64] add ia64 _raw_rw_is_write_locked

This patch fixes the following link error.

xen/common/built_in.o: In function `_rw_is_write_locked':
xen/common/spinlock.c:249: undefined reference to
`_raw_rw_is_write_locked'
make[3]: *** [xen/xen-syms] Error 1

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 27 12:00:32 2009 +0100 (2009-05-27)
parents d669f5d1f876
children
line source
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
4 /*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 *
9 * This file is used for SMP configurations only.
10 */
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
15 #include <asm/atomic.h>
16 #include <asm/bitops.h>
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
20 #define DEBUG_SPINLOCK
22 typedef struct {
23 volatile unsigned int lock;
24 } raw_spinlock_t;
26 #define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 }
28 #define _raw_spin_is_locked(x) ((x)->lock != 0)
29 #define _raw_spin_unlock(x) do { barrier(); (x)->lock = 0; } while (0)
30 #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
32 typedef struct {
33 volatile unsigned int read_counter : 31;
34 volatile unsigned int write_lock : 1;
35 } raw_rwlock_t;
36 #define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
38 #define _raw_read_lock(rw) \
39 do { \
40 raw_rwlock_t *__read_lock_ptr = (rw); \
41 \
42 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
43 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
44 while (*(volatile int *)__read_lock_ptr < 0) \
45 cpu_relax(); \
46 } \
47 } while (0)
49 #define _raw_read_unlock(rw) \
50 do { \
51 raw_rwlock_t *__read_lock_ptr = (rw); \
52 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
53 } while (0)
55 #ifdef ASM_SUPPORTED
56 #define _raw_write_lock(rw) \
57 do { \
58 __asm__ __volatile__ ( \
59 "mov ar.ccv = r0\n" \
60 "dep r29 = -1, r0, 31, 1;;\n" \
61 "1:\n" \
62 "ld4 r2 = [%0];;\n" \
63 "cmp4.eq p0,p7 = r0,r2\n" \
64 "(p7) br.cond.spnt.few 1b \n" \
65 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
66 "cmp4.eq p0,p7 = r0, r2\n" \
67 "(p7) br.cond.spnt.few 1b;;\n" \
68 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
69 } while(0)
71 #define _raw_write_trylock(rw) \
72 ({ \
73 register long result; \
74 \
75 __asm__ __volatile__ ( \
76 "mov ar.ccv = r0\n" \
77 "dep r29 = -1, r0, 31, 1;;\n" \
78 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
79 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
80 (result == 0); \
81 })
83 #else /* !ASM_SUPPORTED */
85 #define _raw_write_lock(l) \
86 ({ \
87 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
88 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
89 do { \
90 while (*ia64_write_lock_ptr) \
91 ia64_barrier(); \
92 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
93 } while (ia64_val); \
94 })
96 #define _raw_write_trylock(rw) \
97 ({ \
98 __u64 ia64_val; \
99 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
100 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
101 (ia64_val == 0); \
102 })
104 #endif /* !ASM_SUPPORTED */
106 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
108 #define _raw_write_unlock(x) \
109 ({ \
110 smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
111 clear_bit(31, (x)); \
112 })
114 #define _raw_rw_is_locked(x) (*(int *)(x) != 0)
115 #define _raw_rw_is_write_locked(x) (test_bit(31, (x)))
117 #endif /* _ASM_IA64_SPINLOCK_H */