]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
xen/arm64: cmpxchg: Simplify the cmpxchg implementation
authorJulien Grall <julien.grall@arm.com>
Wed, 22 May 2019 20:37:53 +0000 (13:37 -0700)
committerJulien Grall <julien.grall@arm.com>
Fri, 14 Jun 2019 13:38:40 +0000 (14:38 +0100)
The only difference between each case of the cmpxchg is the size of
used. Rather than duplicating the code, provide a macro to generate each
cases.

This makes the code easier to read and modify.

This is part of XSA-295.

Signed-off-by; Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Stefano Stabellini <stefanos@xilinx.com>
xen/include/asm-arm/arm64/cmpxchg.h

index ae42b2f5ff4cf850165845505b50bfdcdae7ee75..393fbca6a5ac5ee0ddebfb19f78351d6f6797fbe 100644 (file)
@@ -61,80 +61,54 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
        __ret; \
 })
 
-extern void __bad_cmpxchg(volatile void *ptr, int size);
+extern unsigned long __bad_cmpxchg(volatile void *ptr, int size);
+
+#define __CMPXCHG_CASE(w, sz, name)                                    \
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,  \
+                                                 unsigned long old,    \
+                                                 unsigned long new)    \
+{                                                                      \
+       unsigned long res, oldval;                                      \
+                                                                       \
+       do {                                                            \
+               asm volatile("// __cmpxchg_case_" #name "\n"            \
+               "       ldxr" #sz "     %" #w "1, %2\n"                 \
+               "       mov     %w0, #0\n"                              \
+               "       cmp     %" #w "1, %" #w "3\n"                   \
+               "       b.ne    1f\n"                                   \
+               "       stxr" #sz "     %w0, %" #w "4, %2\n"            \
+               "1:\n"                                                  \
+               : "=&r" (res), "=&r" (oldval),                          \
+                 "+Q" (*(unsigned long *)ptr)                          \
+               : "Ir" (old), "r" (new)                                 \
+               : "cc");                                                \
+       } while (res);                                                  \
+                                                                       \
+       return oldval;                                                  \
+}
+
+__CMPXCHG_CASE(w, b, 1)
+__CMPXCHG_CASE(w, h, 2)
+__CMPXCHG_CASE(w,  , 4)
+__CMPXCHG_CASE( ,  , 8)
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
                                      unsigned long new, int size)
 {
-       unsigned long oldval = 0, res;
-
        switch (size) {
        case 1:
-               do {
-                       asm volatile("// __cmpxchg1\n"
-                       "       ldxrb   %w1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %w1, %w3\n"
-                       "       b.ne    1f\n"
-                       "       stxrb   %w0, %w4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
+               return __cmpxchg_case_1(ptr, old, new);
        case 2:
-               do {
-                       asm volatile("// __cmpxchg2\n"
-                       "       ldxrh   %w1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %w1, %w3\n"
-                       "       b.ne    1f\n"
-                       "       stxrh   %w0, %w4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
+               return __cmpxchg_case_2(ptr, old, new);
        case 4:
-               do {
-                       asm volatile("// __cmpxchg4\n"
-                       "       ldxr    %w1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %w1, %w3\n"
-                       "       b.ne    1f\n"
-                       "       stxr    %w0, %w4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
+               return __cmpxchg_case_4(ptr, old, new);
        case 8:
-               do {
-                       asm volatile("// __cmpxchg8\n"
-                       "       ldxr    %1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %1, %3\n"
-                       "       b.ne    1f\n"
-                       "       stxr    %w0, %4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
+               return __cmpxchg_case_8(ptr, old, new);
        default:
-               __bad_cmpxchg(ptr, size);
-               oldval = 0;
+               return __bad_cmpxchg(ptr, size);
        }
 
-       return oldval;
+       ASSERT_UNREACHABLE();
 }
 
 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,