ia64/xen-unstable

changeset 5192:2df0e546014d

bitkeeper revision 1.1583 (42983f5dF7YXNy2i-9EFwNZ_eZer4g)

Clean up bitops.h: all the basic atomic bitops access a 32-bit
memory location, even on x86/64.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat May 28 09:52:29 2005 +0000 (2005-05-28)
parents b9f77360e9fc
children 94a63704b6c4
files xen/include/asm-x86/bitops.h
line diff
     1.1 --- a/xen/include/asm-x86/bitops.h	Sat May 28 09:23:56 2005 +0000
     1.2 +++ b/xen/include/asm-x86/bitops.h	Sat May 28 09:52:29 2005 +0000
     1.3 @@ -38,10 +38,10 @@
     1.4   * Note that @nr may be almost arbitrarily large; this function is not
     1.5   * restricted to acting on a single-word quantity.
     1.6   */
     1.7 -static __inline__ void set_bit(long nr, volatile void * addr)
     1.8 +static __inline__ void set_bit(int nr, volatile void * addr)
     1.9  {
    1.10  	__asm__ __volatile__( LOCK_PREFIX
    1.11 -		"bts"__OS" %1,%0"
    1.12 +		"btsl %1,%0"
    1.13  		:"=m" (ADDR)
    1.14  		:"dIr" (nr));
    1.15  }
    1.16 @@ -55,10 +55,10 @@ static __inline__ void set_bit(long nr, 
    1.17   * If it's called on the same region of memory simultaneously, the effect
    1.18   * may be that only one operation succeeds.
    1.19   */
    1.20 -static __inline__ void __set_bit(long nr, volatile void * addr)
    1.21 +static __inline__ void __set_bit(int nr, volatile void * addr)
    1.22  {
    1.23  	__asm__(
    1.24 -		"bts"__OS" %1,%0"
    1.25 +		"btsl %1,%0"
    1.26  		:"=m" (ADDR)
    1.27  		:"dIr" (nr));
    1.28  }
    1.29 @@ -73,10 +73,10 @@ static __inline__ void __set_bit(long nr
    1.30   * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
    1.31   * in order to ensure changes are visible on other processors.
    1.32   */
    1.33 -static __inline__ void clear_bit(long nr, volatile void * addr)
    1.34 +static __inline__ void clear_bit(int nr, volatile void * addr)
    1.35  {
    1.36  	__asm__ __volatile__( LOCK_PREFIX
    1.37 -		"btr"__OS" %1,%0"
    1.38 +		"btrl %1,%0"
    1.39  		:"=m" (ADDR)
    1.40  		:"dIr" (nr));
    1.41  }
    1.42 @@ -92,10 +92,10 @@ static __inline__ void clear_bit(long nr
    1.43   * If it's called on the same region of memory simultaneously, the effect
    1.44   * may be that only one operation succeeds.
    1.45   */
    1.46 -static __inline__ void __change_bit(long nr, volatile void * addr)
    1.47 +static __inline__ void __change_bit(int nr, volatile void * addr)
    1.48  {
    1.49  	__asm__ __volatile__(
    1.50 -		"btc"__OS" %1,%0"
    1.51 +		"btcl %1,%0"
    1.52  		:"=m" (ADDR)
    1.53  		:"dIr" (nr));
    1.54  }
    1.55 @@ -109,10 +109,10 @@ static __inline__ void __change_bit(long
    1.56   * Note that @nr may be almost arbitrarily large; this function is not
    1.57   * restricted to acting on a single-word quantity.
    1.58   */
    1.59 -static __inline__ void change_bit(long nr, volatile void * addr)
    1.60 +static __inline__ void change_bit(int nr, volatile void * addr)
    1.61  {
    1.62  	__asm__ __volatile__( LOCK_PREFIX
    1.63 -		"btc"__OS" %1,%0"
    1.64 +		"btcl %1,%0"
    1.65  		:"=m" (ADDR)
    1.66  		:"dIr" (nr));
    1.67  }
    1.68 @@ -125,12 +125,12 @@ static __inline__ void change_bit(long n
    1.69   * This operation is atomic and cannot be reordered.  
    1.70   * It also implies a memory barrier.
    1.71   */
    1.72 -static __inline__ int test_and_set_bit(long nr, volatile void * addr)
    1.73 +static __inline__ int test_and_set_bit(int nr, volatile void * addr)
    1.74  {
    1.75 -	long oldbit;
    1.76 +	int oldbit;
    1.77  
    1.78  	__asm__ __volatile__( LOCK_PREFIX
    1.79 -		"bts"__OS" %2,%1\n\tsbb"__OS" %0,%0"
    1.80 +		"btsl %2,%1\n\tsbbl %0,%0"
    1.81  		:"=r" (oldbit),"=m" (ADDR)
    1.82  		:"dIr" (nr) : "memory");
    1.83  	return oldbit;
    1.84 @@ -145,12 +145,12 @@ static __inline__ int test_and_set_bit(l
    1.85   * If two examples of this operation race, one can appear to succeed
    1.86   * but actually fail.  You must protect multiple accesses with a lock.
    1.87   */
    1.88 -static __inline__ int __test_and_set_bit(long nr, volatile void * addr)
    1.89 +static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
    1.90  {
    1.91 -	long oldbit;
    1.92 +	int oldbit;
    1.93  
    1.94  	__asm__(
    1.95 -		"bts"__OS" %2,%1\n\tsbb"__OS" %0,%0"
    1.96 +		"btsl %2,%1\n\tsbbl %0,%0"
    1.97  		:"=r" (oldbit),"=m" (ADDR)
    1.98  		:"dIr" (nr));
    1.99  	return oldbit;
   1.100 @@ -164,12 +164,12 @@ static __inline__ int __test_and_set_bit
   1.101   * This operation is atomic and cannot be reordered.  
   1.102   * It also implies a memory barrier.
   1.103   */
   1.104 -static __inline__ int test_and_clear_bit(long nr, volatile void * addr)
   1.105 +static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
   1.106  {
   1.107 -	long oldbit;
   1.108 +	int oldbit;
   1.109  
   1.110  	__asm__ __volatile__( LOCK_PREFIX
   1.111 -		"btr"__OS" %2,%1\n\tsbb"__OS" %0,%0"
   1.112 +		"btrl %2,%1\n\tsbbl %0,%0"
   1.113  		:"=r" (oldbit),"=m" (ADDR)
   1.114  		:"dIr" (nr) : "memory");
   1.115  	return oldbit;
   1.116 @@ -184,24 +184,24 @@ static __inline__ int test_and_clear_bit
   1.117   * If two examples of this operation race, one can appear to succeed
   1.118   * but actually fail.  You must protect multiple accesses with a lock.
   1.119   */
   1.120 -static __inline__ int __test_and_clear_bit(long nr, volatile void * addr)
   1.121 +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
   1.122  {
   1.123 -	long oldbit;
   1.124 +	int oldbit;
   1.125  
   1.126  	__asm__(
   1.127 -		"btr"__OS" %2,%1\n\tsbb"__OS" %0,%0"
   1.128 +		"btrl %2,%1\n\tsbbl %0,%0"
   1.129  		:"=r" (oldbit),"=m" (ADDR)
   1.130  		:"dIr" (nr));
   1.131  	return oldbit;
   1.132  }
   1.133  
   1.134  /* WARNING: non atomic and it can be reordered! */
   1.135 -static __inline__ int __test_and_change_bit(long nr, volatile void * addr)
   1.136 +static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
   1.137  {
   1.138 -	long oldbit;
   1.139 +	int oldbit;
   1.140  
   1.141  	__asm__ __volatile__(
   1.142 -		"btc"__OS" %2,%1\n\tsbb"__OS" %0,%0"
   1.143 +		"btcl %2,%1\n\tsbbl %0,%0"
   1.144  		:"=r" (oldbit),"=m" (ADDR)
   1.145  		:"dIr" (nr) : "memory");
   1.146  	return oldbit;
   1.147 @@ -215,29 +215,29 @@ static __inline__ int __test_and_change_
   1.148   * This operation is atomic and cannot be reordered.  
   1.149   * It also implies a memory barrier.
   1.150   */
   1.151 -static __inline__ int test_and_change_bit(long nr, volatile void * addr)
   1.152 +static __inline__ int test_and_change_bit(int nr, volatile void * addr)
   1.153  {
   1.154 -	long oldbit;
   1.155 +	int oldbit;
   1.156  
   1.157  	__asm__ __volatile__( LOCK_PREFIX
   1.158 -		"btc"__OS" %2,%1\n\tsbb"__OS" %0,%0"
   1.159 +		"btcl %2,%1\n\tsbbl %0,%0"
   1.160  		:"=r" (oldbit),"=m" (ADDR)
   1.161  		:"dIr" (nr) : "memory");
   1.162  	return oldbit;
   1.163  }
   1.164  
   1.165  
   1.166 -static __inline__ int constant_test_bit(long nr, const volatile void * addr)
   1.167 +static __inline__ int constant_test_bit(int nr, const volatile void * addr)
   1.168  {
   1.169  	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
   1.170  }
   1.171  
   1.172 -static __inline__ int variable_test_bit(long nr, volatile void * addr)
   1.173 +static __inline__ int variable_test_bit(int nr, volatile void * addr)
   1.174  {
   1.175 -	long oldbit;
   1.176 +	int oldbit;
   1.177  
   1.178  	__asm__ __volatile__(
   1.179 -		"bt"__OS" %2,%1\n\tsbb"__OS" %0,%0"
   1.180 +		"btl %2,%1\n\tsbbl %0,%0"
   1.181  		:"=r" (oldbit)
   1.182  		:"m" (ADDR),"dIr" (nr));
   1.183  	return oldbit;