#define __RWLOCK_H__
#include <xen/percpu.h>
+#include <xen/smp.h>
#include <xen/spinlock.h>
#include <asm/atomic.h>
#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
-/*
- * Writer states & reader shift and bias.
- *
- * Writer field is 8 bit to allow for potential optimisation, see
- * _write_unlock().
- */
-#define _QW_WAITING 1 /* A writer is waiting */
-#define _QW_LOCKED 0xff /* A writer holds the lock */
-#define _QW_WMASK 0xff /* Writer mask.*/
-#define _QR_SHIFT 8 /* Reader count shift */
+/* Writer states & reader shift and bias. */
+#define _QW_CPUMASK 0xfffU /* Writer CPU mask */
+#define _QW_SHIFT 12 /* Writer flags shift */
+#define _QW_WAITING (1U << _QW_SHIFT) /* A writer is waiting */
+#define _QW_LOCKED (3U << _QW_SHIFT) /* A writer holds the lock */
+#define _QW_WMASK (3U << _QW_SHIFT) /* Writer mask */
+#define _QR_SHIFT 14 /* Reader count shift */
#define _QR_BIAS (1U << _QR_SHIFT)
void queue_read_lock_slowpath(rwlock_t *lock);
void queue_write_lock_slowpath(rwlock_t *lock);
+static inline bool _is_write_locked_by_me(unsigned int cnts)
+{
+ BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
+ return (cnts & _QW_WMASK) == _QW_LOCKED &&
+ (cnts & _QW_CPUMASK) == smp_processor_id();
+}
+
+static inline bool _can_read_lock(unsigned int cnts)
+{
+ return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
+}
+
/*
* _read_trylock - try to acquire read lock of a queue rwlock.
* @lock : Pointer to queue rwlock structure.
u32 cnts;
cnts = atomic_read(&lock->cnts);
- if ( likely(!(cnts & _QW_WMASK)) )
+ if ( likely(_can_read_lock(cnts)) )
{
cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
- if ( likely(!(cnts & _QW_WMASK)) )
+ if ( likely(_can_read_lock(cnts)) )
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
}
u32 cnts;
cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
- if ( likely(!(cnts & _QW_WMASK)) )
+ if ( likely(_can_read_lock(cnts)) )
return;
/* The slowpath will decrement the reader count, if necessary. */
return atomic_read(&lock->cnts);
}
+static inline unsigned int _write_lock_val(void)
+{
+ return _QW_LOCKED | smp_processor_id();
+}
+
/*
* queue_write_lock - acquire write lock of a queue rwlock.
* @lock : Pointer to queue rwlock structure.
static inline void _write_lock(rwlock_t *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
- if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
+ if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
return;
queue_write_lock_slowpath(lock);
if ( unlikely(cnts) )
return 0;
- return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
+ return likely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0);
}
static inline void _write_unlock(rwlock_t *lock)
{
- /*
- * If the writer field is atomic, it can be cleared directly.
- * Otherwise, an atomic subtraction will be used to clear it.
- */
- atomic_sub(_QW_LOCKED, &lock->cnts);
+ ASSERT(_is_write_locked_by_me(atomic_read(&lock->cnts)));
+ atomic_and(~(_QW_CPUMASK | _QW_WMASK), &lock->cnts);
}
static inline void _write_unlock_irq(rwlock_t *lock)