ia64/xen-unstable
changeset 12008:fa4281cb7a5b
[IA64] move xen atomic.h from linux under linux-xen to modify.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | awilliam@xenbuild.aw |
---|---|
date | Tue Oct 31 22:25:12 2006 -0700 (2006-10-31) |
parents | 622bb65e2011 |
children | 37a8dbe24201 |
files | xen/include/asm-ia64/linux-xen/asm/README.origin xen/include/asm-ia64/linux-xen/asm/atomic.h xen/include/asm-ia64/linux/asm/README.origin xen/include/asm-ia64/linux/asm/atomic.h |
line diff
1.1 --- a/xen/include/asm-ia64/linux-xen/asm/README.origin Sun Oct 29 11:18:17 2006 -0700 1.2 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin Tue Oct 31 22:25:12 2006 -0700 1.3 @@ -7,6 +7,7 @@ 1.4 1.5 acpi.h -> linux/include/asm-ia64/acpi.h 1.6 asmmacro.h -> linux/include/asm-ia64/asmmacro.h 1.7 +atomic.h -> linux/include/asm-ia64/atomic.h 1.8 cache.h -> linux/include/asm-ia64/cache.h 1.9 gcc_intrin.h -> linux/include/asm-ia64/gcc_intrin.h 1.10 ia64regs.h -> linux/include/asm-ia64/ia64regs.h
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/xen/include/asm-ia64/linux-xen/asm/atomic.h Tue Oct 31 22:25:12 2006 -0700 2.3 @@ -0,0 +1,183 @@ 2.4 +#ifndef _ASM_IA64_ATOMIC_H 2.5 +#define _ASM_IA64_ATOMIC_H 2.6 + 2.7 +/* 2.8 + * Atomic operations that C can't guarantee us. Useful for 2.9 + * resource counting etc.. 2.10 + * 2.11 + * NOTE: don't mess with the types below! The "unsigned long" and 2.12 + * "int" types were carefully placed so as to ensure proper operation 2.13 + * of the macros. 2.14 + * 2.15 + * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co 2.16 + * David Mosberger-Tang <davidm@hpl.hp.com> 2.17 + */ 2.18 +#include <linux/types.h> 2.19 + 2.20 +#include <asm/intrinsics.h> 2.21 + 2.22 +/* 2.23 + * On IA-64, counter must always be volatile to ensure that that the 2.24 + * memory accesses are ordered. 2.25 + */ 2.26 +typedef struct { volatile __s32 counter; } atomic_t; 2.27 +typedef struct { volatile __s64 counter; } atomic64_t; 2.28 + 2.29 +#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 2.30 +#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 2.31 + 2.32 +#define atomic_read(v) ((v)->counter) 2.33 +#define atomic64_read(v) ((v)->counter) 2.34 + 2.35 +#define atomic_set(v,i) (((v)->counter) = (i)) 2.36 +#define atomic64_set(v,i) (((v)->counter) = (i)) 2.37 + 2.38 +static __inline__ int 2.39 +ia64_atomic_add (int i, atomic_t *v) 2.40 +{ 2.41 + __s32 old, new; 2.42 + CMPXCHG_BUGCHECK_DECL 2.43 + 2.44 + do { 2.45 + CMPXCHG_BUGCHECK(v); 2.46 + old = atomic_read(v); 2.47 + new = old + i; 2.48 + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 2.49 + return new; 2.50 +} 2.51 + 2.52 +static __inline__ int 2.53 +ia64_atomic64_add (__s64 i, atomic64_t *v) 2.54 +{ 2.55 + __s64 old, new; 2.56 + CMPXCHG_BUGCHECK_DECL 2.57 + 2.58 + do { 2.59 + CMPXCHG_BUGCHECK(v); 2.60 + old = atomic_read(v); 2.61 + new = old + i; 2.62 + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 2.63 + return new; 2.64 +} 2.65 + 2.66 +static __inline__ int 2.67 +ia64_atomic_sub (int i, atomic_t *v) 2.68 +{ 2.69 + __s32 old, new; 2.70 + CMPXCHG_BUGCHECK_DECL 2.71 + 2.72 + do { 2.73 + CMPXCHG_BUGCHECK(v); 2.74 + old = atomic_read(v); 2.75 + new = old - i; 2.76 + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 2.77 + return new; 2.78 +} 2.79 + 2.80 +static __inline__ int 2.81 +ia64_atomic64_sub (__s64 i, atomic64_t *v) 2.82 +{ 2.83 + __s64 old, new; 2.84 + CMPXCHG_BUGCHECK_DECL 2.85 + 2.86 + do { 2.87 + CMPXCHG_BUGCHECK(v); 2.88 + old = atomic_read(v); 2.89 + new = old - i; 2.90 + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 2.91 + return new; 2.92 +} 2.93 + 2.94 +#define atomic_add_return(i,v) \ 2.95 +({ \ 2.96 + int __ia64_aar_i = (i); \ 2.97 + (__builtin_constant_p(i) \ 2.98 + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 2.99 + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 2.100 + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 2.101 + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 2.102 + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 2.103 + : ia64_atomic_add(__ia64_aar_i, v); \ 2.104 +}) 2.105 + 2.106 +#define atomic64_add_return(i,v) \ 2.107 +({ \ 2.108 + long __ia64_aar_i = (i); \ 2.109 + (__builtin_constant_p(i) \ 2.110 + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 2.111 + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 2.112 + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 2.113 + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 2.114 + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 2.115 + : ia64_atomic64_add(__ia64_aar_i, v); \ 2.116 +}) 2.117 + 2.118 +/* 2.119 + * Atomically add I to V and return TRUE if the resulting value is 2.120 + * negative. 2.121 + */ 2.122 +static __inline__ int 2.123 +atomic_add_negative (int i, atomic_t *v) 2.124 +{ 2.125 + return atomic_add_return(i, v) < 0; 2.126 +} 2.127 + 2.128 +static __inline__ int 2.129 +atomic64_add_negative (__s64 i, atomic64_t *v) 2.130 +{ 2.131 + return atomic64_add_return(i, v) < 0; 2.132 +} 2.133 + 2.134 +#define atomic_sub_return(i,v) \ 2.135 +({ \ 2.136 + int __ia64_asr_i = (i); \ 2.137 + (__builtin_constant_p(i) \ 2.138 + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 2.139 + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 2.140 + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 2.141 + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 2.142 + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 2.143 + : ia64_atomic_sub(__ia64_asr_i, v); \ 2.144 +}) 2.145 + 2.146 +#define atomic64_sub_return(i,v) \ 2.147 +({ \ 2.148 + long __ia64_asr_i = (i); \ 2.149 + (__builtin_constant_p(i) \ 2.150 + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 2.151 + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 2.152 + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 2.153 + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 2.154 + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 2.155 + : ia64_atomic64_sub(__ia64_asr_i, v); \ 2.156 +}) 2.157 + 2.158 +#define atomic_dec_return(v) atomic_sub_return(1, (v)) 2.159 +#define atomic_inc_return(v) atomic_add_return(1, (v)) 2.160 +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 2.161 +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) 2.162 + 2.163 +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 2.164 +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 2.165 +#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 2.166 +#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 2.167 +#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 2.168 +#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 2.169 + 2.170 +#define atomic_add(i,v) atomic_add_return((i), (v)) 2.171 +#define atomic_sub(i,v) atomic_sub_return((i), (v)) 2.172 +#define atomic_inc(v) atomic_add(1, (v)) 2.173 +#define atomic_dec(v) atomic_sub(1, (v)) 2.174 + 2.175 +#define atomic64_add(i,v) atomic64_add_return((i), (v)) 2.176 +#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) 2.177 +#define atomic64_inc(v) atomic64_add(1, (v)) 2.178 +#define atomic64_dec(v) atomic64_sub(1, (v)) 2.179 + 2.180 +/* Atomic operations are already serializing */ 2.181 +#define smp_mb__before_atomic_dec() barrier() 2.182 +#define smp_mb__after_atomic_dec() barrier() 2.183 +#define smp_mb__before_atomic_inc() barrier() 2.184 +#define smp_mb__after_atomic_inc() barrier() 2.185 + 2.186 +#endif /* _ASM_IA64_ATOMIC_H */
3.1 --- a/xen/include/asm-ia64/linux/asm/README.origin Sun Oct 29 11:18:17 2006 -0700 3.2 +++ b/xen/include/asm-ia64/linux/asm/README.origin Tue Oct 31 22:25:12 2006 -0700 3.3 @@ -4,7 +4,6 @@ 3.4 # needs to be changed, move it to ../linux-xen and follow 3.5 # the instructions in the README there. 3.6 3.7 -atomic.h -> linux/include/asm-ia64/atomic.h 3.8 bitops.h -> linux/include/asm-ia64/bitops.h 3.9 break.h -> linux/include/asm-ia64/break.h 3.10 byteorder.h -> linux/include/asm-ia64/byteorder.h
4.1 --- a/xen/include/asm-ia64/linux/asm/atomic.h Sun Oct 29 11:18:17 2006 -0700 4.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 4.3 @@ -1,183 +0,0 @@ 4.4 -#ifndef _ASM_IA64_ATOMIC_H 4.5 -#define _ASM_IA64_ATOMIC_H 4.6 - 4.7 -/* 4.8 - * Atomic operations that C can't guarantee us. Useful for 4.9 - * resource counting etc.. 4.10 - * 4.11 - * NOTE: don't mess with the types below! The "unsigned long" and 4.12 - * "int" types were carefully placed so as to ensure proper operation 4.13 - * of the macros. 4.14 - * 4.15 - * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co 4.16 - * David Mosberger-Tang <davidm@hpl.hp.com> 4.17 - */ 4.18 -#include <linux/types.h> 4.19 - 4.20 -#include <asm/intrinsics.h> 4.21 - 4.22 -/* 4.23 - * On IA-64, counter must always be volatile to ensure that that the 4.24 - * memory accesses are ordered. 4.25 - */ 4.26 -typedef struct { volatile __s32 counter; } atomic_t; 4.27 -typedef struct { volatile __s64 counter; } atomic64_t; 4.28 - 4.29 -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 4.30 -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 4.31 - 4.32 -#define atomic_read(v) ((v)->counter) 4.33 -#define atomic64_read(v) ((v)->counter) 4.34 - 4.35 -#define atomic_set(v,i) (((v)->counter) = (i)) 4.36 -#define atomic64_set(v,i) (((v)->counter) = (i)) 4.37 - 4.38 -static __inline__ int 4.39 -ia64_atomic_add (int i, atomic_t *v) 4.40 -{ 4.41 - __s32 old, new; 4.42 - CMPXCHG_BUGCHECK_DECL 4.43 - 4.44 - do { 4.45 - CMPXCHG_BUGCHECK(v); 4.46 - old = atomic_read(v); 4.47 - new = old + i; 4.48 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 4.49 - return new; 4.50 -} 4.51 - 4.52 -static __inline__ int 4.53 -ia64_atomic64_add (__s64 i, atomic64_t *v) 4.54 -{ 4.55 - __s64 old, new; 4.56 - CMPXCHG_BUGCHECK_DECL 4.57 - 4.58 - do { 4.59 - CMPXCHG_BUGCHECK(v); 4.60 - old = atomic_read(v); 4.61 - new = old + i; 4.62 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 4.63 - return new; 4.64 -} 4.65 - 4.66 -static __inline__ int 4.67 -ia64_atomic_sub (int i, atomic_t *v) 4.68 -{ 4.69 - __s32 old, new; 4.70 - CMPXCHG_BUGCHECK_DECL 4.71 - 4.72 - do { 4.73 - CMPXCHG_BUGCHECK(v); 4.74 - old = atomic_read(v); 4.75 - new = old - i; 4.76 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 4.77 - return new; 4.78 -} 4.79 - 4.80 -static __inline__ int 4.81 -ia64_atomic64_sub (__s64 i, atomic64_t *v) 4.82 -{ 4.83 - __s64 old, new; 4.84 - CMPXCHG_BUGCHECK_DECL 4.85 - 4.86 - do { 4.87 - CMPXCHG_BUGCHECK(v); 4.88 - old = atomic_read(v); 4.89 - new = old - i; 4.90 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 4.91 - return new; 4.92 -} 4.93 - 4.94 -#define atomic_add_return(i,v) \ 4.95 -({ \ 4.96 - int __ia64_aar_i = (i); \ 4.97 - (__builtin_constant_p(i) \ 4.98 - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 4.99 - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 4.100 - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 4.101 - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 4.102 - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 4.103 - : ia64_atomic_add(__ia64_aar_i, v); \ 4.104 -}) 4.105 - 4.106 -#define atomic64_add_return(i,v) \ 4.107 -({ \ 4.108 - long __ia64_aar_i = (i); \ 4.109 - (__builtin_constant_p(i) \ 4.110 - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 4.111 - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 4.112 - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 4.113 - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 4.114 - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 4.115 - : ia64_atomic64_add(__ia64_aar_i, v); \ 4.116 -}) 4.117 - 4.118 -/* 4.119 - * Atomically add I to V and return TRUE if the resulting value is 4.120 - * negative. 4.121 - */ 4.122 -static __inline__ int 4.123 -atomic_add_negative (int i, atomic_t *v) 4.124 -{ 4.125 - return atomic_add_return(i, v) < 0; 4.126 -} 4.127 - 4.128 -static __inline__ int 4.129 -atomic64_add_negative (__s64 i, atomic64_t *v) 4.130 -{ 4.131 - return atomic64_add_return(i, v) < 0; 4.132 -} 4.133 - 4.134 -#define atomic_sub_return(i,v) \ 4.135 -({ \ 4.136 - int __ia64_asr_i = (i); \ 4.137 - (__builtin_constant_p(i) \ 4.138 - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 4.139 - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 4.140 - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 4.141 - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 4.142 - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 4.143 - : ia64_atomic_sub(__ia64_asr_i, v); \ 4.144 -}) 4.145 - 4.146 -#define atomic64_sub_return(i,v) \ 4.147 -({ \ 4.148 - long __ia64_asr_i = (i); \ 4.149 - (__builtin_constant_p(i) \ 4.150 - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 4.151 - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 4.152 - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 4.153 - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 4.154 - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 4.155 - : ia64_atomic64_sub(__ia64_asr_i, v); \ 4.156 -}) 4.157 - 4.158 -#define atomic_dec_return(v) atomic_sub_return(1, (v)) 4.159 -#define atomic_inc_return(v) atomic_add_return(1, (v)) 4.160 -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 4.161 -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) 4.162 - 4.163 -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 4.164 -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 4.165 -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 4.166 -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 4.167 -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 4.168 -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 4.169 - 4.170 -#define atomic_add(i,v) atomic_add_return((i), (v)) 4.171 -#define atomic_sub(i,v) atomic_sub_return((i), (v)) 4.172 -#define atomic_inc(v) atomic_add(1, (v)) 4.173 -#define atomic_dec(v) atomic_sub(1, (v)) 4.174 - 4.175 -#define atomic64_add(i,v) atomic64_add_return((i), (v)) 4.176 -#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) 4.177 -#define atomic64_inc(v) atomic64_add(1, (v)) 4.178 -#define atomic64_dec(v) atomic64_sub(1, (v)) 4.179 - 4.180 -/* Atomic operations are already serializing */ 4.181 -#define smp_mb__before_atomic_dec() barrier() 4.182 -#define smp_mb__after_atomic_dec() barrier() 4.183 -#define smp_mb__before_atomic_inc() barrier() 4.184 -#define smp_mb__after_atomic_inc() barrier() 4.185 - 4.186 -#endif /* _ASM_IA64_ATOMIC_H */