]> xenbits.xensource.com Git - xen.git/commitdiff
common: avoid atomic read-modify-write accesses in map_vcpu_info()
authorJan Beulich <jbeulich@suse.com>
Tue, 12 Mar 2019 13:40:56 +0000 (14:40 +0100)
committerJulien Grall <julien.grall@arm.com>
Fri, 14 Jun 2019 14:45:59 +0000 (15:45 +0100)
There's no need to set the evtchn_pending_sel bits one by one. Simply
write full words with all ones.

For Arm this requires extending write_atomic() to also handle 64-bit
values; for symmetry read_atomic() gets adjusted as well.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Julien Grall <julien.grall@arm.com>
xen/common/domain.c
xen/include/asm-arm/atomic.h

index 314f880a9252b3627da457955923cf0375b7dcfd..740163ee77dba4c462722ccb2687d987190c39ce 100644 (file)
@@ -1153,7 +1153,6 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
     void *mapping;
     vcpu_info_t *new_info;
     struct page_info *page;
-    int i;
 
     if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )
         return -EINVAL;
@@ -1206,8 +1205,12 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
      * Mark everything as being pending just to make sure nothing gets
      * lost.  The domain will get a spurious event, but it can cope.
      */
-    for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ )
-        set_bit(i, &vcpu_info(v, evtchn_pending_sel));
+#ifdef CONFIG_COMPAT
+    if ( !has_32bit_shinfo(d) )
+        write_atomic(&new_info->native.evtchn_pending_sel, ~0);
+    else
+#endif
+        write_atomic(&vcpu_info(v, evtchn_pending_sel), ~0);
     vcpu_mark_events_pending(v);
 
     return 0;
index df9de6a74a9ae4e258a037a0e9f8d29cf5e24c66..4bfa71356cd099f4e88fb8ac7ddfc9a659eb67da 100644 (file)
@@ -56,6 +56,19 @@ build_atomic_write(write_int_atomic, "",  WORD, int, "r")
 #if defined (CONFIG_ARM_64)
 build_atomic_read(read_u64_atomic, "", "", uint64_t, "=r")
 build_atomic_write(write_u64_atomic, "", "", uint64_t, "r")
+#elif defined (CONFIG_ARM_32)
+static inline uint64_t read_u64_atomic(const volatile uint64_t *addr)
+{
+    uint64_t val;
+
+    asm volatile ( "ldrd %0,%H0,%1" : "=r" (val) : "m" (*addr) );
+
+    return val;
+}
+static inline void write_u64_atomic(volatile uint64_t *addr, uint64_t val)
+{
+    asm volatile ( "strd %1,%H1,%0" : "=m" (*addr) : "r" (val) );
+}
 #endif
 
 build_add_sized(add_u8_sized, "b", BYTE, uint8_t, "ri")
@@ -70,6 +83,7 @@ void __bad_atomic_size(void);
     case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
     case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
     case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
+    case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break;    \
     default: __x = 0; __bad_atomic_size(); break;                       \
     }                                                                   \
     __x;                                                                \
@@ -81,6 +95,7 @@ void __bad_atomic_size(void);
     case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
     case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
     case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
+    case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break;      \
     default: __bad_atomic_size(); break;                                \
     }                                                                   \
     __x;                                                                \