]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
Revert "x86,arm: remove asm/spinlock.h from all architectures"
authorJan Beulich <jbeulich@suse.com>
Thu, 28 May 2015 09:59:34 +0000 (11:59 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 28 May 2015 09:59:34 +0000 (11:59 +0200)
This reverts commit e62e49e6d5d4e8d22f3df0b75443ede65a812435 as
its prerequisite 45fcc4568c is going to be reverted.

xen/arch/arm/README.LinuxPrimitives
xen/include/asm-arm/arm32/spinlock.h [new file with mode: 0644]
xen/include/asm-arm/arm64/spinlock.h [new file with mode: 0644]
xen/include/asm-arm/spinlock.h [new file with mode: 0644]
xen/include/asm-x86/spinlock.h [new file with mode: 0644]
xen/include/xen/spinlock.h

index 3115f51d47c6081935aaf1f90361974bcf3f68cf..7f33fc723a0ccf69a3149550cbb75097510e4842 100644 (file)
@@ -25,6 +25,16 @@ linux/arch/arm64/include/asm/atomic.h   xen/include/asm-arm/arm64/atomic.h
 
 ---------------------------------------------------------------------
 
+spinlocks: last sync @ v3.16-rc6 (last commit: 95c4189689f9)
+
+linux/arch/arm64/include/asm/spinlock.h xen/include/asm-arm/arm64/spinlock.h
+
+Skipped:
+  5686b06 arm64: lockref: add support for lockless lockrefs using cmpxchg
+  52ea2a5 arm64: locks: introduce ticket-based spinlock implementation
+
+---------------------------------------------------------------------
+
 mem*: last sync @ v3.16-rc6 (last commit: d875c9b37240)
 
 linux/arch/arm64/lib/memchr.S           xen/arch/arm/arm64/lib/memchr.S
@@ -93,6 +103,24 @@ linux/arch/arm/include/asm/atomic.h     xen/include/asm-arm/arm32/atomic.h
 
 ---------------------------------------------------------------------
 
+spinlocks: last sync: 15e7e5c1ebf5
+
+linux/arch/arm/include/asm/spinlock.h   xen/include/asm-arm/arm32/spinlock.h
+
+*** Linux has switched to ticket locks but we still use bitlocks.
+
+resync to v3.14-rc7:
+
+  7c8746a ARM: 7955/1: spinlock: ensure we have a compiler barrier before sev
+  0cbad9c ARM: 7854/1: lockref: add support for lockless lockrefs using cmpxchg64
+  9bb17be ARM: locks: prefetch the destination word for write prior to strex
+  27a8479 ARM: smp_on_up: move inline asm ALT_SMP patching macro out of spinlock.
+  00efaa0 ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lo
+  afa31d8 ARM: 7811/1: locks: use early clobber in arch_spin_trylock
+  73a6fdc ARM: spinlock: use inner-shareable dsb variant prior to sev instruction
+
+---------------------------------------------------------------------
+
 mem*: last sync @ v3.16-rc6 (last commit: d98b90ea22b0)
 
 linux/arch/arm/lib/copy_template.S      xen/arch/arm/arm32/lib/copy_template.S
diff --git a/xen/include/asm-arm/arm32/spinlock.h b/xen/include/asm-arm/arm32/spinlock.h
new file mode 100644 (file)
index 0000000..bc0343c
--- /dev/null
@@ -0,0 +1,66 @@
+#ifndef __ASM_ARM32_SPINLOCK_H
+#define __ASM_ARM32_SPINLOCK_H
+
+static inline void dsb_sev(void)
+{
+    __asm__ __volatile__ (
+        "dsb\n"
+        "sev\n"
+        );
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"   str     %1, [%0]\n"
+    :
+    : "r" (&lock->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned long contended, res;
+
+    do {
+        __asm__ __volatile__(
+    "   ldrex   %0, [%2]\n"
+    "   teq     %0, #0\n"
+    "   strexeq %1, %3, [%2]\n"
+    "   movne   %1, #0\n"
+        : "=&r" (contended), "=r" (res)
+        : "r" (&lock->lock), "r" (1)
+        : "cc");
+    } while (res);
+
+    if (!contended) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/spinlock.h b/xen/include/asm-arm/arm64/spinlock.h
new file mode 100644 (file)
index 0000000..5ae034d
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Derived from Linux arch64 spinlock.h which is:
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM64_SPINLOCK_H
+#define __ASM_ARM64_SPINLOCK_H
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    asm volatile(
+        "       stlr    %w1, %0\n"
+        : "=Q" (lock->lock) : "r" (0) : "memory");
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned int tmp;
+
+    asm volatile(
+        "2:     ldaxr   %w0, %1\n"
+        "       cbnz    %w0, 1f\n"
+        "       stxr    %w0, %w2, %1\n"
+        "       cbnz    %w0, 2b\n"
+        "1:\n"
+        : "=&r" (tmp), "+Q" (lock->lock)
+        : "r" (1)
+        : "cc", "memory");
+
+    return !tmp;
+}
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/spinlock.h b/xen/include/asm-arm/spinlock.h
new file mode 100644 (file)
index 0000000..a064f73
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <xen/config.h>
+#include <xen/lib.h>
+
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/spinlock.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/spinlock.h>
+#else
+# error "unknown ARM variant"
+#endif
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/spinlock.h b/xen/include/asm-x86/spinlock.h
new file mode 100644 (file)
index 0000000..757e20b
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <asm/atomic.h>
+
+typedef struct {
+    volatile s16 lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 }
+
+#define _raw_spin_is_locked(x) ((x)->lock <= 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+    asm volatile (
+        "movw $1,%0" 
+        : "=m" (lock->lock) : : "memory" );
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    s16 oldval;
+    asm volatile (
+        "xchgw %w0,%1"
+        :"=r" (oldval), "=m" (lock->lock)
+        :"0" ((s16)0) : "memory" );
+    return (oldval > 0);
+}
+
+#define _raw_read_unlock(l) \
+    asm volatile ( "lock; dec%z0 %0" : "+m" ((l)->lock) :: "memory" )
+
+#endif /* __ASM_SPINLOCK_H */
index 9286543beab5a67c19ede8f71f0ef55a22bc9942..fb0438e5425cd822eb42f18b695dd5ee991f0ad2 100644 (file)
@@ -2,6 +2,7 @@
 #define __SPINLOCK_H__
 
 #include <asm/system.h>
+#include <asm/spinlock.h>
 
 #ifndef NDEBUG
 struct lock_debug {