To x86 and Arm. This performs an atomic AND operation against an
atomic_t variable with the provided mask.
Requested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Julien Grall <julien@xen.org>
return result;
}
+static inline void atomic_and(int m, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ prefetchw(&v->counter);
+ __asm__ __volatile__("@ atomic_and\n"
+"1: ldrex %0, [%3]\n"
+" and %0, %0, %4\n"
+" strex %1, %0, [%3]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "Ir" (m)
+ : "cc");
+}
+
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
int oldval;
return result;
}
+static inline void atomic_and(int m, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ asm volatile("// atomic_and\n"
+"1: ldxr %w0, %2\n"
+" and %w0, %w0, %w3\n"
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 1b"
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (m));
+}
+
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long tmp;
return c;
}
+static inline void atomic_and(int m, atomic_t *v)
+{
+ asm volatile (
+ "lock andl %1, %0"
+ : "+m" (*(volatile int *)&v->counter)
+ : "ir" (m) );
+}
+
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#endif /* __ARCH_X86_ATOMIC__ */