From: Julien Grall Date: Sat, 10 Oct 2015 21:44:57 +0000 (+0100) Subject: x86/xen: Consolidate synch_bitops.h in a single file X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=refs%2Fheads%2Fconsole-x86-v1;p=people%2Fjulieng%2Ffreebsd.git x86/xen: Consolidate synch_bitops.h in a single file --- diff --git a/sys/amd64/include/xen/synch_bitops.h b/sys/amd64/include/xen/synch_bitops.h index 746687aa91bd..42436041bdaa 100644 --- a/sys/amd64/include/xen/synch_bitops.h +++ b/sys/amd64/include/xen/synch_bitops.h @@ -1,129 +1,6 @@ #ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ -/* - * Copyright 1992, Linus Torvalds. - * Heavily modified to provide guaranteed strong synchronisation - * when communicating with Xen or other guest OSes running on other CPUs. - */ - - -#define ADDR (*(volatile long *) addr) - -static __inline__ void synch_set_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btsl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ void synch_clear_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btrl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ void synch_change_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btcl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "lock btsl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "lock btrl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) -{ - int oldbit; - - __asm__ __volatile__ ( - "lock btcl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -struct __synch_xchg_dummy { unsigned long a[100]; }; -#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x)) - -#define synch_cmpxchg(ptr, old, new) \ -((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ - (unsigned long)(old), \ - (unsigned long)(new), \ - sizeof(*(ptr)))) - -static inline unsigned long __synch_cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 4: - __asm__ __volatile__("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 8: - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - } - return old; -} - -static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static __inline__ int synch_var_test_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "btl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); - return oldbit; -} - -#define synch_test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - synch_const_test_bit((nr),(addr)) : \ - synch_var_test_bit((nr),(addr))) +#include #endif /* __XEN_SYNCH_BITOPS_H__ */ diff --git a/sys/i386/include/xen/synch_bitops.h b/sys/i386/include/xen/synch_bitops.h index 1b61542bc7a0..42436041bdaa 100644 --- a/sys/i386/include/xen/synch_bitops.h +++ b/sys/i386/include/xen/synch_bitops.h @@ -1,139 +1,6 @@ #ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ -/* - * Copyright 1992, Linus Torvalds. - * Heavily modified to provide guaranteed strong synchronisation - * when communicating with Xen or other guest OSes running on other CPUs. - */ - - -#define ADDR (*(volatile long *) addr) - -static __inline__ void synch_set_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btsl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ void synch_clear_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btrl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ void synch_change_bit(int nr, volatile void * addr) -{ - __asm__ __volatile__ ( - "lock btcl %1,%0" - : "=m" (ADDR) : "Ir" (nr) : "memory" ); -} - -static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "lock btsl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "lock btrl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) -{ - int oldbit; - - __asm__ __volatile__ ( - "lock btcl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); - return oldbit; -} - -struct __synch_xchg_dummy { unsigned long a[100]; }; -#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x)) - -#define synch_cmpxchg(ptr, old, new) \ -((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ - (unsigned long)(old), \ - (unsigned long)(new), \ - sizeof(*(ptr)))) - -static inline unsigned long __synch_cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; -#ifdef CONFIG_X86_64 - case 4: - __asm__ __volatile__("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; - case 8: - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; -#else - case 4: - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a"(prev) - : "q"(new), "m"(*__synch_xg(ptr)), - "0"(old) - : "memory"); - return prev; -#endif - } - return old; -} - -static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static __inline__ int synch_var_test_bit(int nr, volatile void * addr) -{ - int oldbit; - __asm__ __volatile__ ( - "btl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); - return oldbit; -} - -#define synch_test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - synch_const_test_bit((nr),(addr)) : \ - synch_var_test_bit((nr),(addr))) +#include #endif /* __XEN_SYNCH_BITOPS_H__ */ diff --git a/sys/x86/include/xen/synch_bitops.h b/sys/x86/include/xen/synch_bitops.h new file mode 100644 index 000000000000..b1ec13cd153c --- /dev/null +++ b/sys/x86/include/xen/synch_bitops.h @@ -0,0 +1,139 @@ +#ifndef __MACHINE_X86_XEN_SYNCH_BITOPS_H__ +#define __MACHINE_X86_XEN_SYNCH_BITOPS_H__ + +/* + * Copyright 1992, Linus Torvalds. + * Heavily modified to provide guaranteed strong synchronisation + * when communicating with Xen or other guest OSes running on other CPUs. + */ + + +#define ADDR (*(volatile long *) addr) + +static __inline__ void synch_set_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__ ( + "lock btsl %1,%0" + : "=m" (ADDR) : "Ir" (nr) : "memory" ); +} + +static __inline__ void synch_clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__ ( + "lock btrl %1,%0" + : "=m" (ADDR) : "Ir" (nr) : "memory" ); +} + +static __inline__ void synch_change_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__ ( + "lock btcl %1,%0" + : "=m" (ADDR) : "Ir" (nr) : "memory" ); +} + +static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) +{ + int oldbit; + __asm__ __volatile__ ( + "lock btsl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); + return oldbit; +} + +static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) +{ + int oldbit; + __asm__ __volatile__ ( + "lock btrl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); + return oldbit; +} + +static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) +{ + int oldbit; + + __asm__ __volatile__ ( + "lock btcl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); + return oldbit; +} + +struct __synch_xchg_dummy { unsigned long a[100]; }; +#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x)) + +#define synch_cmpxchg(ptr, old, new) \ +((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ + (unsigned long)(old), \ + (unsigned long)(new), \ + sizeof(*(ptr)))) + +static inline unsigned long __synch_cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "q"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; +#ifdef __amd64__ + case 4: + __asm__ __volatile__("lock; cmpxchgl %k1,%2" + : "=a"(prev) + : "q"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; + case 8: + __asm__ __volatile__("lock; cmpxchgq %1,%2" + : "=a"(prev) + : "q"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; +#else + case 4: + __asm__ __volatile__("lock; cmpxchgl %1,%2" + : "=a"(prev) + : "q"(new), "m"(*__synch_xg(ptr)), + "0"(old) + : "memory"); + return prev; +#endif + } + return old; +} + +static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) +{ + return ((1UL << (nr & 31)) & + (((const volatile unsigned int *) addr)[nr >> 5])) != 0; +} + +static __inline__ int synch_var_test_bit(int nr, volatile void * addr) +{ + int oldbit; + __asm__ __volatile__ ( + "btl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); + return oldbit; +} + +#define synch_test_bit(nr,addr) \ +(__builtin_constant_p(nr) ? \ + synch_const_test_bit((nr),(addr)) : \ + synch_var_test_bit((nr),(addr))) + +#endif /* __MACHINE_X86_XEN_SYNCH_BITOPS_H__ */