static inline void atomic_add(int i, atomic_t *v)
{
asm volatile (
- "lock; addl %1,%0"
+ "lock addl %1,%0"
: "=m" (*(volatile int *)&v->counter)
: "ir" (i), "m" (*(volatile int *)&v->counter) );
}
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile (
- "lock; subl %1,%0"
+ "lock subl %1,%0"
: "=m" (*(volatile int *)&v->counter)
: "ir" (i), "m" (*(volatile int *)&v->counter) );
}
{
bool c;
- asm volatile ( "lock; subl %[i], %[counter]\n\t"
+ asm volatile ( "lock subl %[i], %[counter]\n\t"
ASM_FLAG_OUT(, "setz %[zf]\n\t")
: [counter] "+m" (*(volatile int *)&v->counter),
[zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
static inline void atomic_inc(atomic_t *v)
{
asm volatile (
- "lock; incl %0"
+ "lock incl %0"
: "=m" (*(volatile int *)&v->counter)
: "m" (*(volatile int *)&v->counter) );
}
{
bool c;
- asm volatile ( "lock; incl %[counter]\n\t"
+ asm volatile ( "lock incl %[counter]\n\t"
ASM_FLAG_OUT(, "setz %[zf]\n\t")
: [counter] "+m" (*(volatile int *)&v->counter),
[zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
static inline void atomic_dec(atomic_t *v)
{
asm volatile (
- "lock; decl %0"
+ "lock decl %0"
: "=m" (*(volatile int *)&v->counter)
: "m" (*(volatile int *)&v->counter) );
}
{
bool c;
- asm volatile ( "lock; decl %[counter]\n\t"
+ asm volatile ( "lock decl %[counter]\n\t"
ASM_FLAG_OUT(, "setz %[zf]\n\t")
: [counter] "+m" (*(volatile int *)&v->counter),
[zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
{
bool c;
- asm volatile ( "lock; addl %[i], %[counter]\n\t"
+ asm volatile ( "lock addl %[i], %[counter]\n\t"
ASM_FLAG_OUT(, "sets %[sf]\n\t")
: [counter] "+m" (*(volatile int *)&v->counter),
[sf] ASM_FLAG_OUT("=@ccs", "=qm") (c)
*/
static inline void set_bit(int nr, volatile void *addr)
{
- asm volatile ( "lock; btsl %1,%0"
+ asm volatile ( "lock btsl %1,%0"
: "+m" (ADDR) : "Ir" (nr) : "memory");
}
#define set_bit(nr, addr) ({ \
*/
static inline void clear_bit(int nr, volatile void *addr)
{
- asm volatile ( "lock; btrl %1,%0"
+ asm volatile ( "lock btrl %1,%0"
: "+m" (ADDR) : "Ir" (nr) : "memory");
}
#define clear_bit(nr, addr) ({ \
*/
static inline void change_bit(int nr, volatile void *addr)
{
- asm volatile ( "lock; btcl %1,%0"
+ asm volatile ( "lock btcl %1,%0"
: "+m" (ADDR) : "Ir" (nr) : "memory");
}
#define change_bit(nr, addr) ({ \
{
int oldbit;
- asm volatile ( "lock; btsl %[nr], %[addr]\n\t"
+ asm volatile ( "lock btsl %[nr], %[addr]\n\t"
ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
: [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
[addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
{
int oldbit;
- asm volatile ( "lock; btrl %[nr], %[addr]\n\t"
+ asm volatile ( "lock btrl %[nr], %[addr]\n\t"
ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
: [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
[addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
{
int oldbit;
- asm volatile ( "lock; btcl %[nr], %[addr]\n\t"
+ asm volatile ( "lock btcl %[nr], %[addr]\n\t"
ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
: [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
[addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
#define _raw_read_unlock(l) \
BUILD_BUG_ON(sizeof((l)->lock) != 4); /* Clang doesn't support %z in asm. */ \
- asm volatile ( "lock; decl %0" : "+m" ((l)->lock) :: "memory" )
+ asm volatile ( "lock decl %0" : "+m" ((l)->lock) :: "memory" )
/*
* On x86 the only reordering is of reads with older writes. In the