ia64/xen-unstable

changeset 19459:cd6b3af19191

x86: Clean up atomic.h comments and asm specifiers.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:47:20 2009 +0100 (2009-03-31)
parents 80ecfc3d6a8e
children 2269d8704139
files xen/include/asm-x86/atomic.h
line diff
     1.1 --- a/xen/include/asm-x86/atomic.h	Tue Mar 31 11:41:13 2009 +0100
     1.2 +++ b/xen/include/asm-x86/atomic.h	Tue Mar 31 11:47:20 2009 +0100
     1.3 @@ -23,8 +23,7 @@ typedef struct { int counter; } atomic_t
     1.4   * atomic_read - read atomic variable
     1.5   * @v: pointer of type atomic_t
     1.6   * 
     1.7 - * Atomically reads the value of @v.  Note that the guaranteed
     1.8 - * useful range of an atomic_t is only 24 bits.
     1.9 + * Atomically reads the value of @v.
    1.10   */
    1.11  #define _atomic_read(v)		((v).counter)
    1.12  #define atomic_read(v)		(*(volatile int *)&((v)->counter))
    1.13 @@ -34,8 +33,7 @@ typedef struct { int counter; } atomic_t
    1.14   * @v: pointer of type atomic_t
    1.15   * @i: required value
    1.16   * 
    1.17 - * Atomically sets the value of @v to @i.  Note that the guaranteed
    1.18 - * useful range of an atomic_t is only 24 bits.
    1.19 + * Atomically sets the value of @v to @i.
    1.20   */ 
    1.21  #define _atomic_set(v,i)	(((v).counter) = (i))
    1.22  #define atomic_set(v,i)		(*(volatile int *)&((v)->counter) = (i))
    1.23 @@ -45,12 +43,11 @@ typedef struct { int counter; } atomic_t
    1.24   * @i: integer value to add
    1.25   * @v: pointer of type atomic_t
    1.26   * 
    1.27 - * Atomically adds @i to @v.  Note that the guaranteed useful range
    1.28 - * of an atomic_t is only 24 bits.
    1.29 + * Atomically adds @i to @v.
    1.30   */
    1.31  static __inline__ void atomic_add(int i, atomic_t *v)
    1.32  {
    1.33 -	__asm__ __volatile__(
    1.34 +	asm volatile(
    1.35  		LOCK "addl %1,%0"
    1.36  		:"=m" (*(volatile int *)&v->counter)
    1.37  		:"ir" (i), "m" (*(volatile int *)&v->counter));
    1.38 @@ -61,12 +58,11 @@ static __inline__ void atomic_add(int i,
    1.39   * @i: integer value to subtract
    1.40   * @v: pointer of type atomic_t
    1.41   * 
    1.42 - * Atomically subtracts @i from @v.  Note that the guaranteed
    1.43 - * useful range of an atomic_t is only 24 bits.
    1.44 + * Atomically subtracts @i from @v.
    1.45   */
    1.46  static __inline__ void atomic_sub(int i, atomic_t *v)
    1.47  {
    1.48 -	__asm__ __volatile__(
    1.49 +	asm volatile(
    1.50  		LOCK "subl %1,%0"
    1.51  		:"=m" (*(volatile int *)&v->counter)
    1.52  		:"ir" (i), "m" (*(volatile int *)&v->counter));
    1.53 @@ -79,14 +75,13 @@ static __inline__ void atomic_sub(int i,
    1.54   * 
    1.55   * Atomically subtracts @i from @v and returns
    1.56   * true if the result is zero, or false for all
    1.57 - * other cases.  Note that the guaranteed
    1.58 - * useful range of an atomic_t is only 24 bits.
    1.59 + * other cases.
    1.60   */
    1.61  static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
    1.62  {
    1.63  	unsigned char c;
    1.64  
    1.65 -	__asm__ __volatile__(
    1.66 +	asm volatile(
    1.67  		LOCK "subl %2,%0; sete %1"
    1.68  		:"=m" (*(volatile int *)&v->counter), "=qm" (c)
    1.69  		:"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
    1.70 @@ -97,12 +92,11 @@ static __inline__ int atomic_sub_and_tes
    1.71   * atomic_inc - increment atomic variable
    1.72   * @v: pointer of type atomic_t
    1.73   * 
    1.74 - * Atomically increments @v by 1.  Note that the guaranteed
    1.75 - * useful range of an atomic_t is only 24 bits.
    1.76 + * Atomically increments @v by 1.
    1.77   */ 
    1.78  static __inline__ void atomic_inc(atomic_t *v)
    1.79  {
    1.80 -	__asm__ __volatile__(
    1.81 +	asm volatile(
    1.82  		LOCK "incl %0"
    1.83  		:"=m" (*(volatile int *)&v->counter)
    1.84  		:"m" (*(volatile int *)&v->counter));
    1.85 @@ -112,12 +106,11 @@ static __inline__ void atomic_inc(atomic
    1.86   * atomic_dec - decrement atomic variable
    1.87   * @v: pointer of type atomic_t
    1.88   * 
    1.89 - * Atomically decrements @v by 1.  Note that the guaranteed
    1.90 - * useful range of an atomic_t is only 24 bits.
    1.91 + * Atomically decrements @v by 1.
    1.92   */ 
    1.93  static __inline__ void atomic_dec(atomic_t *v)
    1.94  {
    1.95 -	__asm__ __volatile__(
    1.96 +	asm volatile(
    1.97  		LOCK "decl %0"
    1.98  		:"=m" (*(volatile int *)&v->counter)
    1.99  		:"m" (*(volatile int *)&v->counter));
   1.100 @@ -129,14 +122,13 @@ static __inline__ void atomic_dec(atomic
   1.101   * 
   1.102   * Atomically decrements @v by 1 and
   1.103   * returns true if the result is 0, or false for all other
   1.104 - * cases.  Note that the guaranteed
   1.105 - * useful range of an atomic_t is only 24 bits.
   1.106 + * cases.
   1.107   */ 
   1.108  static __inline__ int atomic_dec_and_test(atomic_t *v)
   1.109  {
   1.110  	unsigned char c;
   1.111  
   1.112 -	__asm__ __volatile__(
   1.113 +	asm volatile(
   1.114  		LOCK "decl %0; sete %1"
   1.115  		:"=m" (*(volatile int *)&v->counter), "=qm" (c)
   1.116  		:"m" (*(volatile int *)&v->counter) : "memory");
   1.117 @@ -149,14 +141,13 @@ static __inline__ int atomic_dec_and_tes
   1.118   * 
   1.119   * Atomically increments @v by 1
   1.120   * and returns true if the result is zero, or false for all
   1.121 - * other cases.  Note that the guaranteed
   1.122 - * useful range of an atomic_t is only 24 bits.
   1.123 + * other cases.
   1.124   */ 
   1.125  static __inline__ int atomic_inc_and_test(atomic_t *v)
   1.126  {
   1.127  	unsigned char c;
   1.128  
   1.129 -	__asm__ __volatile__(
   1.130 +	asm volatile(
   1.131  		LOCK "incl %0; sete %1"
   1.132  		:"=m" (*(volatile int *)&v->counter), "=qm" (c)
   1.133  		:"m" (*(volatile int *)&v->counter) : "memory");
   1.134 @@ -170,14 +161,13 @@ static __inline__ int atomic_inc_and_tes
   1.135   * 
   1.136   * Atomically adds @i to @v and returns true
   1.137   * if the result is negative, or false when
   1.138 - * result is greater than or equal to zero.  Note that the guaranteed
   1.139 - * useful range of an atomic_t is only 24 bits.
   1.140 + * result is greater than or equal to zero.
   1.141   */ 
   1.142  static __inline__ int atomic_add_negative(int i, atomic_t *v)
   1.143  {
   1.144  	unsigned char c;
   1.145  
   1.146 -	__asm__ __volatile__(
   1.147 +	asm volatile(
   1.148  		LOCK "addl %2,%0; sets %1"
   1.149  		:"=m" (*(volatile int *)&v->counter), "=qm" (c)
   1.150  		:"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");