build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
-
-build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
#undef build_read_atomic
void __bad_atomic_size(void);
-#define read_atomic(p) ({ \
- unsigned long x_; \
- switch ( sizeof(*(p)) ) { \
- case 1: x_ = read_u8_atomic((uint8_t *)(p)); break; \
- case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
- case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
- case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
- default: x_ = 0; __bad_atomic_size(); break; \
- } \
- (typeof(*(p)))x_; \
+#define read_atomic(p) ({ \
+ unsigned long x_; \
+ switch ( sizeof(*(p)) ) { \
+ case 1: x_ = read_u8_atomic((uint8_t *)(p)); break; \
+ case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
+ case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
+ case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
+ default: x_ = 0; __bad_atomic_size(); break; \
+ } \
+ (typeof(*(p)))x_; \
})
-#define write_atomic(p, x) ({ \
- typeof(*(p)) __x = (x); \
- unsigned long x_ = (unsigned long)__x; \
- switch ( sizeof(*(p)) ) { \
- case 1: write_u8_atomic((uint8_t *)(p), (uint8_t)x_); break; \
- case 2: write_u16_atomic((uint16_t *)(p), (uint16_t)x_); break; \
- case 4: write_u32_atomic((uint32_t *)(p), (uint32_t)x_); break; \
- case 8: write_u64_atomic((uint64_t *)(p), (uint64_t)x_); break; \
- default: __bad_atomic_size(); break; \
- } \
- __x; \
+#define write_atomic(p, x) ({ \
+ typeof(*(p)) __x = (x); \
+ unsigned long x_ = (unsigned long)__x; \
+ switch ( sizeof(*(p)) ) { \
+ case 1: write_u8_atomic((uint8_t *)(p), x_); break; \
+ case 2: write_u16_atomic((uint16_t *)(p), x_); break; \
+ case 4: write_u32_atomic((uint32_t *)(p), x_); break; \
+ case 8: write_u64_atomic((uint64_t *)(p), x_); break; \
+ default: __bad_atomic_size(); break; \
+ } \
})
/*
case 1:
asm volatile ( "xchgb %b0,%1"
: "=q" (x)
- : "m" (*__xg((volatile void *)ptr)), "0" (x)
+ : "m" (*__xg(ptr)), "0" (x)
: "memory" );
break;
case 2:
asm volatile ( "xchgw %w0,%1"
: "=r" (x)
- : "m" (*__xg((volatile void *)ptr)), "0" (x)
+ : "m" (*__xg(ptr)), "0" (x)
: "memory" );
break;
case 4:
asm volatile ( "xchgl %k0,%1"
: "=r" (x)
- : "m" (*__xg((volatile void *)ptr)), "0" (x)
+ : "m" (*__xg(ptr)), "0" (x)
: "memory" );
break;
case 8:
asm volatile ( "xchgq %0,%1"
: "=r" (x)
- : "m" (*__xg((volatile void *)ptr)), "0" (x)
+ : "m" (*__xg(ptr)), "0" (x)
: "memory" );
break;
}
case 1:
asm volatile ( "lock; cmpxchgb %b1,%2"
: "=a" (prev)
- : "q" (new), "m" (*__xg((volatile void *)ptr)),
+ : "q" (new), "m" (*__xg(ptr)),
"0" (old)
: "memory" );
return prev;
case 2:
asm volatile ( "lock; cmpxchgw %w1,%2"
: "=a" (prev)
- : "r" (new), "m" (*__xg((volatile void *)ptr)),
+ : "r" (new), "m" (*__xg(ptr)),
"0" (old)
: "memory" );
return prev;
case 4:
asm volatile ( "lock; cmpxchgl %k1,%2"
: "=a" (prev)
- : "r" (new), "m" (*__xg((volatile void *)ptr)),
+ : "r" (new), "m" (*__xg(ptr)),
"0" (old)
: "memory" );
return prev;
case 8:
asm volatile ( "lock; cmpxchgq %1,%2"
: "=a" (prev)
- : "r" (new), "m" (*__xg((volatile void *)ptr)),
+ : "r" (new), "m" (*__xg(ptr)),
"0" (old)
: "memory" );
return prev;