#include <asm/processor.h>
#include <asm/atomic.h>
+static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
+
#ifndef NDEBUG
static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
}
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ unsigned int cpu;
+ cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
+
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ /*
+ * First take the write lock to protect against other writers or slow
+ * path readers.
+ */
+ write_lock(&percpu_rwlock->rwlock);
+
+ /* Now set the global variable so that readers start using read_lock. */
+ percpu_rwlock->writer_activating = 1;
+ smp_mb();
+
+ /* Using a per cpu cpumask is only safe if there is no nesting. */
+ ASSERT(!in_irq());
+ cpumask_copy(rwlock_readers, &cpu_online_map);
+
+ /* Check if there are any percpu readers in progress on this rwlock. */
+ for ( ; ; )
+ {
+ for_each_cpu(cpu, rwlock_readers)
+ {
+ /*
+ * Remove any percpu readers not contending on this rwlock
+ * from our check mask.
+ */
+ if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
+ __cpumask_clear_cpu(cpu, rwlock_readers);
+ }
+ /* Check if we've cleared all percpu readers from check mask. */
+ if ( cpumask_empty(rwlock_readers) )
+ break;
+ /* Give the coherency fabric a break. */
+ cpu_relax();
+ };
+}
+
#ifdef LOCK_PROFILE
struct lock_profile_anc {
#include <asm/system.h>
#include <asm/spinlock.h>
+#include <asm/types.h>
+#include <xen/percpu.h>
#ifndef NDEBUG
struct lock_debug {
#define rw_is_locked(l) _rw_is_locked(l)
#define rw_is_write_locked(l) _rw_is_write_locked(l)
+typedef struct percpu_rwlock percpu_rwlock_t;
+
+struct percpu_rwlock {
+ rwlock_t rwlock;
+ bool_t writer_activating;
+#ifndef NDEBUG
+ percpu_rwlock_t **percpu_owner;
+#endif
+};
+
+#ifndef NDEBUG
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
+static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
+}
+#else
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
+#define _percpu_rwlock_owner_check(data, lock) ((void)0)
+#endif
+
+#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
+ percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
+#define percpu_rwlock_resource_init(l, owner) \
+ (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
+
+static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ /* We cannot support recursion on the same lock. */
+ ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
+ /*
+ * Detect using a second percpu_rwlock_t simulatenously and fallback
+ * to standard read_lock.
+ */
+ if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
+ {
+ read_lock(&percpu_rwlock->rwlock);
+ return;
+ }
+
+ /* Indicate this cpu is reading. */
+ this_cpu_ptr(per_cpudata) = percpu_rwlock;
+ smp_mb();
+ /* Check if a writer is waiting. */
+ if ( unlikely(percpu_rwlock->writer_activating) )
+ {
+ /* Let the waiting writer know we aren't holding the lock. */
+ this_cpu_ptr(per_cpudata) = NULL;
+ /* Wait using the read lock to keep the lock fair. */
+ read_lock(&percpu_rwlock->rwlock);
+ /* Set the per CPU data again and continue. */
+ this_cpu_ptr(per_cpudata) = percpu_rwlock;
+ /* Drop the read lock because we don't need it anymore. */
+ read_unlock(&percpu_rwlock->rwlock);
+ }
+}
+
+static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ /* Verify the read lock was taken for this lock */
+ ASSERT(this_cpu_ptr(per_cpudata) != NULL);
+ /*
+ * Detect using a second percpu_rwlock_t simulatenously and fallback
+ * to standard read_unlock.
+ */
+ if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
+ {
+ read_unlock(&percpu_rwlock->rwlock);
+ return;
+ }
+ this_cpu_ptr(per_cpudata) = NULL;
+ smp_wmb();
+}
+
+/* Don't inline percpu write lock as it's a complex function. */
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock);
+
+static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ ASSERT(percpu_rwlock->writer_activating);
+ percpu_rwlock->writer_activating = 0;
+ write_unlock(&percpu_rwlock->rwlock);
+}
+
+#define percpu_rw_is_write_locked(l) _rw_is_write_locked(&((l)->rwlock))
+
+#define percpu_read_lock(percpu, lock) \
+ _percpu_read_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_read_unlock(percpu, lock) \
+ _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_lock(percpu, lock) \
+ _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_unlock(percpu, lock) \
+ _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
+
+#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
+ name)
+#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
+ name)
+
#endif /* __SPINLOCK_H__ */