]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
rwlock: allow arch to override read_unlock() atomic
authorJan Beulich <jbeulich@suse.com>
Fri, 9 Jan 2015 16:31:05 +0000 (17:31 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 9 Jan 2015 16:31:05 +0000 (17:31 +0100)
On x86, LOCK DEC is cheaper than LOCK CMPXCHG and doesn't require a
retry loop around it.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/common/spinlock.c
xen/include/asm-x86/spinlock.h

index 13340a3f61cfccc4d469ff1c1c79156b0304e6fd..f6925ec5f76353309ae2c1b891bbcf336e45846b 100644 (file)
@@ -333,14 +333,18 @@ int _read_trylock(rwlock_t *lock)
     return 1;
 }
 
-void _read_unlock(rwlock_t *lock)
-{
-    uint32_t x, y;
+#ifndef _raw_read_unlock
+# define _raw_read_unlock(l) do {                      \
+    uint32_t x = (l)->lock, y;                         \
+    while ( (y = cmpxchg(&(l)->lock, x, x - 1)) != x ) \
+        x = y;                                         \
+} while (0)
+#endif
 
+inline void _read_unlock(rwlock_t *lock)
+{
     preempt_enable();
-    x = lock->lock;
-    while ( (y = cmpxchg(&lock->lock, x, x-1)) != x )
-        x = y;
+    _raw_read_unlock(lock);
 }
 
 void _read_unlock_irq(rwlock_t *lock)
index 06d9b048b27739a804aee548568cd0d2e96ac68f..757e20b86131505f9ed5f2abf0b3006223608acf 100644 (file)
@@ -31,4 +31,7 @@ static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
     return (oldval > 0);
 }
 
+#define _raw_read_unlock(l) \
+    asm volatile ( "lock; dec%z0 %0" : "+m" ((l)->lock) :: "memory" )
+
 #endif /* __ASM_SPINLOCK_H */