]> xenbits.xensource.com Git - people/vhanquez/xen.git/commitdiff
x86: Define atomic_{read,write}{8,16,32,64} accessor functions.
authorKeir Fraser <keir@xen.org>
Thu, 16 Dec 2010 20:10:32 +0000 (20:10 +0000)
committerKeir Fraser <keir@xen.org>
Thu, 16 Dec 2010 20:10:32 +0000 (20:10 +0000)
These absolutely guarantee to read/write a uint*_t with a single
atomic
processor instruction.

Also re-define atomic_read/atomic_write (act on atomic_t) similarly.

Signed-off-by: Keir Fraser <keir@xen.org>
xen-unstable changeset:   22564:aa33ab320f7e
xen-unstable date:        Thu Dec 16 19:29:08 2010 +0000

xen/include/asm-x86/atomic.h
xen/include/asm-x86/x86_32/system.h
xen/include/asm-x86/x86_64/system.h

index 17becc5a7e283252df49f6feec3bc11cb2e70ac1..3578c33568faf1df29d65ee915bfa61385fc75fb 100644 (file)
 #define LOCK ""
 #endif
 
+#define build_atomic_read(name, size, type, reg, barrier) \
+static inline type name(const volatile type *addr) \
+{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
+:"m" (*(volatile type *)addr) barrier); return ret; }
+
+#define build_atomic_write(name, size, type, reg, barrier) \
+static inline void name(volatile type *addr, type val) \
+{ asm volatile("mov" size " %0,%1": :reg (val), \
+"m" (*(volatile type *)addr) barrier); }
+
+build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
+build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
+build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
+build_atomic_read(atomic_read_int, "l", int, "=r", )
+
+build_atomic_write(atomic_write8, "b", uint8_t, "q", )
+build_atomic_write(atomic_write16, "w", uint16_t, "r", )
+build_atomic_write(atomic_write32, "l", uint32_t, "r", )
+build_atomic_write(atomic_write_int, "l", int, "r", )
+
+#ifdef __x86_64__
+build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
+build_atomic_write(atomic_write64, "q", uint64_t, "r", )
+#else
+static inline uint64_t atomic_read64(const volatile uint64_t *addr)
+{
+    uint64_t *__addr = (uint64_t *)addr;
+    return __cmpxchg8b(__addr, 0, 0);
+}
+static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
+{
+    uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
+    while ( (old = __cmpxchg8b(__addr, old, val)) != old )
+        old = new;
+}
+#endif
+
+#undef build_atomic_read
+#undef build_atomic_write
+
 /*
  * NB. I've pushed the volatile qualifier into the operations. This allows
  * fast accessors such as _atomic_read() and _atomic_set() which don't give
@@ -26,7 +66,7 @@ typedef struct { int counter; } atomic_t;
  * Atomically reads the value of @v.
  */
 #define _atomic_read(v)                ((v).counter)
-#define atomic_read(v)         (*(volatile int *)&((v)->counter))
+#define atomic_read(v)         atomic_read_int(&((v)->counter))
 
 /**
  * atomic_set - set atomic variable
@@ -36,7 +76,7 @@ typedef struct { int counter; } atomic_t;
  * Atomically sets the value of @v to @i.
  */ 
 #define _atomic_set(v,i)       (((v).counter) = (i))
-#define atomic_set(v,i)                (*(volatile int *)&((v)->counter) = (i))
+#define atomic_set(v,i)                atomic_write_int(&((v)->counter), (i))
 
 /**
  * atomic_add - add integer to atomic variable
index 56ef751ec7fbe06e15b962e358fc7c377d67b0cc..0cde9f8933e25a2285924288170b1507299e1208 100644 (file)
@@ -91,13 +91,6 @@ static always_inline unsigned long long __cmpxchg8b(
     _rc;                                                                \
 })
 
-static inline void atomic_write64(uint64_t *p, uint64_t v)
-{
-    uint64_t w = *p, x;
-    while ( (x = __cmpxchg8b(p, w, v)) != w )
-        w = x;
-}
-
 #define mb()                    \
     asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
 
index fa9b3118b063b4a78e086c40d70b7c286915f25d..d4623ba0fec6762e04c0d964def809e8c01457c1 100644 (file)
     _rc;                                                                \
 })
 
-static inline void atomic_write64(uint64_t *p, uint64_t v)
-{
-    *p = v;
-}
-
 #define mb()                    \
     asm volatile ( "mfence" : : : "memory" )