]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
spinlock: move rwlock API and per-cpu rwlocks into their own files
authorJennifer Herbert <jennifer.herbert@citrix.com>
Wed, 3 Feb 2016 13:09:09 +0000 (14:09 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 3 Feb 2016 13:09:09 +0000 (14:09 +0100)
In preparation for a replacement read-write lock implementation, move
the API and the per-cpu read-write locks into their own files.

Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/mm/mem_sharing.c
xen/common/Makefile
xen/common/rwlock.c [new file with mode: 0644]
xen/common/spinlock.c
xen/include/asm-x86/mm.h
xen/include/xen/grant_table.h
xen/include/xen/rwlock.h [new file with mode: 0644]
xen/include/xen/sched.h
xen/include/xen/spinlock.h
xen/xsm/flask/ss/services.c

index a95e10585b20dc6f35205b910a9f4d0526f42aea..a5224239df76f69d97d86fed44aa4ad1eed9980f 100644 (file)
@@ -23,6 +23,7 @@
 #include <xen/types.h>
 #include <xen/domain_page.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/mm.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
index 4df71eeb1b5752c3a43b283648be11fee606b583..6e82b33057bdefa7c86bcb5569a037f0abfeff6e 100644 (file)
@@ -30,6 +30,7 @@ obj-y += rangeset.o
 obj-y += radix-tree.o
 obj-y += rbtree.o
 obj-y += rcupdate.o
+obj-y += rwlock.o
 obj-$(CONFIG_SCHED_ARINC653) += sched_arinc653.o
 obj-$(CONFIG_SCHED_CREDIT) += sched_credit.o
 obj-$(CONFIG_SCHED_CREDIT2) += sched_credit2.o
diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c
new file mode 100644 (file)
index 0000000..410d4dc
--- /dev/null
@@ -0,0 +1,47 @@
+#include <xen/rwlock.h>
+#include <xen/irq.h>
+
+static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
+
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    unsigned int cpu;
+    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
+
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /*
+     * First take the write lock to protect against other writers or slow
+     * path readers.
+     */
+    write_lock(&percpu_rwlock->rwlock);
+
+    /* Now set the global variable so that readers start using read_lock. */
+    percpu_rwlock->writer_activating = 1;
+    smp_mb();
+
+    /* Using a per cpu cpumask is only safe if there is no nesting. */
+    ASSERT(!in_irq());
+    cpumask_copy(rwlock_readers, &cpu_online_map);
+
+    /* Check if there are any percpu readers in progress on this rwlock. */
+    for ( ; ; )
+    {
+        for_each_cpu(cpu, rwlock_readers)
+        {
+            /*
+             * Remove any percpu readers not contending on this rwlock
+             * from our check mask.
+             */
+            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
+                __cpumask_clear_cpu(cpu, rwlock_readers);
+        }
+        /* Check if we've cleared all percpu readers from check mask. */
+        if ( cpumask_empty(rwlock_readers) )
+            break;
+        /* Give the coherency fabric a break. */
+        cpu_relax();
+    };
+}
index bab1f95ff1f40bef24e1a859232228878bcb55d5..7b0cf6cdba423474f53a7a588b67eb4995e9bd00 100644 (file)
@@ -10,8 +10,6 @@
 #include <asm/processor.h>
 #include <asm/atomic.h>
 
-static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
-
 #ifndef NDEBUG
 
 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
@@ -494,49 +492,6 @@ int _rw_is_write_locked(rwlock_t *lock)
     return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
 }
 
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    unsigned int cpu;
-    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
-
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* 
-     * First take the write lock to protect against other writers or slow 
-     * path readers.
-     */
-    write_lock(&percpu_rwlock->rwlock);
-
-    /* Now set the global variable so that readers start using read_lock. */
-    percpu_rwlock->writer_activating = 1;
-    smp_mb();
-
-    /* Using a per cpu cpumask is only safe if there is no nesting. */
-    ASSERT(!in_irq());
-    cpumask_copy(rwlock_readers, &cpu_online_map);
-
-    /* Check if there are any percpu readers in progress on this rwlock. */
-    for ( ; ; )
-    {
-        for_each_cpu(cpu, rwlock_readers)
-        {
-            /* 
-             * Remove any percpu readers not contending on this rwlock
-             * from our check mask.
-             */
-            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
-                __cpumask_clear_cpu(cpu, rwlock_readers);
-        }
-        /* Check if we've cleared all percpu readers from check mask. */
-        if ( cpumask_empty(rwlock_readers) )
-            break;
-        /* Give the coherency fabric a break. */
-        cpu_relax();
-    };
-}
-
 #ifdef LOCK_PROFILE
 
 struct lock_profile_anc {
index 759841466211753d0f62b1cee84aa0760fb97106..4560debcf9fa50545f07076d645da973575491f9 100644 (file)
@@ -5,6 +5,7 @@
 #include <xen/config.h>
 #include <xen/list.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/x86_emulate.h>
index b4f064e36292145d093b8d6f3e969183d3e29ebf..32b0ecd435000c52a70599da82daea1d826bb60f 100644 (file)
@@ -23,6 +23,7 @@
 #ifndef __XEN_GRANT_TABLE_H__
 #define __XEN_GRANT_TABLE_H__
 
+#include <xen/rwlock.h>
 #include <public/grant_table.h>
 #include <asm/page.h>
 #include <asm/grant_table.h>
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
new file mode 100644 (file)
index 0000000..9d87783
--- /dev/null
@@ -0,0 +1,150 @@
+#ifndef __RWLOCK_H__
+#define __RWLOCK_H__
+
+#include <xen/spinlock.h>
+
+#define read_lock(l)                  _read_lock(l)
+#define read_lock_irq(l)              _read_lock_irq(l)
+#define read_lock_irqsave(l, f)                                 \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = _read_lock_irqsave(l));                          \
+    })
+
+#define read_unlock(l)                _read_unlock(l)
+#define read_unlock_irq(l)            _read_unlock_irq(l)
+#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
+#define read_trylock(l)               _read_trylock(l)
+
+#define write_lock(l)                 _write_lock(l)
+#define write_lock_irq(l)             _write_lock_irq(l)
+#define write_lock_irqsave(l, f)                                \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = _write_lock_irqsave(l));                         \
+    })
+#define write_trylock(l)              _write_trylock(l)
+
+#define write_unlock(l)               _write_unlock(l)
+#define write_unlock_irq(l)           _write_unlock_irq(l)
+#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
+
+#define rw_is_locked(l)               _rw_is_locked(l)
+#define rw_is_write_locked(l)         _rw_is_write_locked(l)
+
+
+typedef struct percpu_rwlock percpu_rwlock_t;
+
+struct percpu_rwlock {
+    rwlock_t            rwlock;
+    bool_t              writer_activating;
+#ifndef NDEBUG
+    percpu_rwlock_t     **percpu_owner;
+#endif
+};
+
+#ifndef NDEBUG
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
+static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
+}
+#else
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
+#define _percpu_rwlock_owner_check(data, lock) ((void)0)
+#endif
+
+#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
+    percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
+#define percpu_rwlock_resource_init(l, owner) \
+    (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
+
+static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* We cannot support recursion on the same lock. */
+    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_lock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
+    {
+        read_lock(&percpu_rwlock->rwlock);
+        return;
+    }
+
+    /* Indicate this cpu is reading. */
+    this_cpu_ptr(per_cpudata) = percpu_rwlock;
+    smp_mb();
+    /* Check if a writer is waiting. */
+    if ( unlikely(percpu_rwlock->writer_activating) )
+    {
+        /* Let the waiting writer know we aren't holding the lock. */
+        this_cpu_ptr(per_cpudata) = NULL;
+        /* Wait using the read lock to keep the lock fair. */
+        read_lock(&percpu_rwlock->rwlock);
+        /* Set the per CPU data again and continue. */
+        this_cpu_ptr(per_cpudata) = percpu_rwlock;
+        /* Drop the read lock because we don't need it anymore. */
+        read_unlock(&percpu_rwlock->rwlock);
+    }
+}
+
+static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* Verify the read lock was taken for this lock */
+    ASSERT(this_cpu_ptr(per_cpudata) != NULL);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_unlock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
+    {
+        read_unlock(&percpu_rwlock->rwlock);
+        return;
+    }
+    this_cpu_ptr(per_cpudata) = NULL;
+    smp_wmb();
+}
+
+/* Don't inline percpu write lock as it's a complex function. */
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+                        percpu_rwlock_t *percpu_rwlock);
+
+static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    ASSERT(percpu_rwlock->writer_activating);
+    percpu_rwlock->writer_activating = 0;
+    write_unlock(&percpu_rwlock->rwlock);
+}
+
+#define percpu_rw_is_write_locked(l)         _rw_is_write_locked(&((l)->rwlock))
+
+#define percpu_read_lock(percpu, lock) \
+    _percpu_read_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_read_unlock(percpu, lock) \
+    _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_lock(percpu, lock) \
+    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_unlock(percpu, lock) \
+    _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
+
+#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
+                                                         name)
+#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
+                                                           name)
+
+#endif /* __RWLOCK_H__ */
index 587074574d1a1ff22b14ce3f755424e8ad180569..b47a3fe7cd43bdc47e6b715662998bbde467f24d 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <xen/types.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/shared.h>
 #include <xen/timer.h>
 #include <xen/rangeset.h>
index 22c4fc2b2236ea972bac1d3dae82794f66258a3c..765db51a43d48ddc5bbf2fb74079288b633285c0 100644 (file)
@@ -236,147 +236,4 @@ int _rw_is_write_locked(rwlock_t *lock);
 #define spin_lock_recursive(l)        _spin_lock_recursive(l)
 #define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
 
-#define read_lock(l)                  _read_lock(l)
-#define read_lock_irq(l)              _read_lock_irq(l)
-#define read_lock_irqsave(l, f)                                 \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _read_lock_irqsave(l));                          \
-    })
-
-#define read_unlock(l)                _read_unlock(l)
-#define read_unlock_irq(l)            _read_unlock_irq(l)
-#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
-#define read_trylock(l)               _read_trylock(l)
-
-#define write_lock(l)                 _write_lock(l)
-#define write_lock_irq(l)             _write_lock_irq(l)
-#define write_lock_irqsave(l, f)                                \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _write_lock_irqsave(l));                         \
-    })
-#define write_trylock(l)              _write_trylock(l)
-
-#define write_unlock(l)               _write_unlock(l)
-#define write_unlock_irq(l)           _write_unlock_irq(l)
-#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
-
-#define rw_is_locked(l)               _rw_is_locked(l)
-#define rw_is_write_locked(l)         _rw_is_write_locked(l)
-
-typedef struct percpu_rwlock percpu_rwlock_t;
-
-struct percpu_rwlock {
-    rwlock_t            rwlock;
-    bool_t              writer_activating;
-#ifndef NDEBUG
-    percpu_rwlock_t     **percpu_owner;
-#endif
-};
-
-#ifndef NDEBUG
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
-static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
-{
-    ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
-}
-#else
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
-#define _percpu_rwlock_owner_check(data, lock) ((void)0)
-#endif
-
-#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
-    percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
-#define percpu_rwlock_resource_init(l, owner) \
-    (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
-
-static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* We cannot support recursion on the same lock. */
-    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
-    /* 
-     * Detect using a second percpu_rwlock_t simulatenously and fallback
-     * to standard read_lock.
-     */
-    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
-    {
-        read_lock(&percpu_rwlock->rwlock);
-        return;
-    }
-
-    /* Indicate this cpu is reading. */
-    this_cpu_ptr(per_cpudata) = percpu_rwlock;
-    smp_mb();
-    /* Check if a writer is waiting. */
-    if ( unlikely(percpu_rwlock->writer_activating) )
-    {
-        /* Let the waiting writer know we aren't holding the lock. */
-        this_cpu_ptr(per_cpudata) = NULL;
-        /* Wait using the read lock to keep the lock fair. */
-        read_lock(&percpu_rwlock->rwlock);
-        /* Set the per CPU data again and continue. */
-        this_cpu_ptr(per_cpudata) = percpu_rwlock;
-        /* Drop the read lock because we don't need it anymore. */
-        read_unlock(&percpu_rwlock->rwlock);
-    }
-}
-
-static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* Verify the read lock was taken for this lock */
-    ASSERT(this_cpu_ptr(per_cpudata) != NULL);
-    /* 
-     * Detect using a second percpu_rwlock_t simulatenously and fallback
-     * to standard read_unlock.
-     */
-    if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
-    {
-        read_unlock(&percpu_rwlock->rwlock);
-        return;
-    }
-    this_cpu_ptr(per_cpudata) = NULL;
-    smp_wmb();
-}
-
-/* Don't inline percpu write lock as it's a complex function. */
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
-                        percpu_rwlock_t *percpu_rwlock);
-
-static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    ASSERT(percpu_rwlock->writer_activating);
-    percpu_rwlock->writer_activating = 0;
-    write_unlock(&percpu_rwlock->rwlock);
-}
-
-#define percpu_rw_is_write_locked(l)         _rw_is_write_locked(&((l)->rwlock))
-
-#define percpu_read_lock(percpu, lock) \
-    _percpu_read_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_read_unlock(percpu, lock) \
-    _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_lock(percpu, lock) \
-    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_unlock(percpu, lock) \
-    _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
-
-#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
-                                                         name)
-#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
-                                                         name)
-
 #endif /* __SPINLOCK_H__ */
index f31d7d704ccaa15980e400cdd2778bb4b883ef83..9da358b295c60e5231c4795ab321017ad6c5f765 100644 (file)
@@ -40,6 +40,7 @@
 #include <xen/xmalloc.h>
 #include <xen/string.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/errno.h>
 #include "flask.h"
 #include "avc.h"