]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
x86: simplify a few macros / inline functions
authorJan Beulich <jbeulich@suse.com>
Fri, 8 May 2015 09:07:42 +0000 (11:07 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 8 May 2015 09:07:42 +0000 (11:07 +0200)
Drop pointless casts and write_atomic()'s bogus and unused "return
value".

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/include/asm-x86/atomic.h
xen/include/asm-x86/system.h

index aadded0695d7e070922270df712e2ef20f85e8dd..52af08392b19e8e2fcecb36984724569c7b0a94e 100644 (file)
@@ -17,12 +17,11 @@ static inline void name(volatile type *addr, type val) \
 build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
 build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
 build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 
 build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
 build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
 build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
-
-build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
 
 #undef build_read_atomic
@@ -30,29 +29,28 @@ build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
 
 void __bad_atomic_size(void);
 
-#define read_atomic(p) ({                                               \
-    unsigned long x_;                                                   \
-    switch ( sizeof(*(p)) ) {                                           \
-    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;                 \
-    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break;               \
-    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break;               \
-    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break;               \
-    default: x_ = 0; __bad_atomic_size(); break;                        \
-    }                                                                   \
-    (typeof(*(p)))x_;                                                   \
+#define read_atomic(p) ({                                 \
+    unsigned long x_;                                     \
+    switch ( sizeof(*(p)) ) {                             \
+    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;   \
+    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
+    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
+    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
+    default: x_ = 0; __bad_atomic_size(); break;          \
+    }                                                     \
+    (typeof(*(p)))x_;                                     \
 })
 
-#define write_atomic(p, x) ({                                           \
-    typeof(*(p)) __x = (x);                                             \
-    unsigned long x_ = (unsigned long)__x;                              \
-    switch ( sizeof(*(p)) ) {                                           \
-    case 1: write_u8_atomic((uint8_t *)(p), (uint8_t)x_); break;        \
-    case 2: write_u16_atomic((uint16_t *)(p), (uint16_t)x_); break;     \
-    case 4: write_u32_atomic((uint32_t *)(p), (uint32_t)x_); break;     \
-    case 8: write_u64_atomic((uint64_t *)(p), (uint64_t)x_); break;     \
-    default: __bad_atomic_size(); break;                                \
-    }                                                                   \
-    __x;                                                                \
+#define write_atomic(p, x) ({                             \
+    typeof(*(p)) __x = (x);                               \
+    unsigned long x_ = (unsigned long)__x;                \
+    switch ( sizeof(*(p)) ) {                             \
+    case 1: write_u8_atomic((uint8_t *)(p), x_); break;   \
+    case 2: write_u16_atomic((uint16_t *)(p), x_); break; \
+    case 4: write_u32_atomic((uint32_t *)(p), x_); break; \
+    case 8: write_u64_atomic((uint64_t *)(p), x_); break; \
+    default: __bad_atomic_size(); break;                  \
+    }                                                     \
 })
 
 /*
index efe721cb44b57c8b2d3ec0837f4b0dcb02e4cba9..9fb70f570482ada69f0676a47bc528008cd6caa1 100644 (file)
@@ -41,25 +41,25 @@ static always_inline unsigned long __xchg(
     case 1:
         asm volatile ( "xchgb %b0,%1"
                        : "=q" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 2:
         asm volatile ( "xchgw %w0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 4:
         asm volatile ( "xchgl %k0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 8:
         asm volatile ( "xchgq %0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     }
@@ -81,28 +81,28 @@ static always_inline unsigned long __cmpxchg(
     case 1:
         asm volatile ( "lock; cmpxchgb %b1,%2"
                        : "=a" (prev)
-                       : "q" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "q" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 2:
         asm volatile ( "lock; cmpxchgw %w1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 4:
         asm volatile ( "lock; cmpxchgl %k1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 8:
         asm volatile ( "lock; cmpxchgq %1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;