ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h @ 8534:da7873110bbb

Tiny bootstrap cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:46:46 2006 +0100 (2006-01-09)
parents 06d84bf87159
children
line source
1 #ifndef __XEN_SYNCH_BITOPS_H__
2 #define __XEN_SYNCH_BITOPS_H__
4 /*
5 * Copyright 1992, Linus Torvalds.
6 * Heavily modified to provide guaranteed strong synchronisation
7 * when communicating with Xen or other guest OSes running on other CPUs.
8 */
10 #include <linux/config.h>
12 #define ADDR (*(volatile long *) addr)
14 static __inline__ void synch_set_bit(int nr, volatile void * addr)
15 {
16 __asm__ __volatile__ (
17 "lock btsl %1,%0"
18 : "=m" (ADDR) : "Ir" (nr) : "memory" );
19 }
21 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
22 {
23 __asm__ __volatile__ (
24 "lock btrl %1,%0"
25 : "=m" (ADDR) : "Ir" (nr) : "memory" );
26 }
28 static __inline__ void synch_change_bit(int nr, volatile void * addr)
29 {
30 __asm__ __volatile__ (
31 "lock btcl %1,%0"
32 : "=m" (ADDR) : "Ir" (nr) : "memory" );
33 }
35 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
36 {
37 int oldbit;
38 __asm__ __volatile__ (
39 "lock btsl %2,%1\n\tsbbl %0,%0"
40 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
41 return oldbit;
42 }
44 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
45 {
46 int oldbit;
47 __asm__ __volatile__ (
48 "lock btrl %2,%1\n\tsbbl %0,%0"
49 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
50 return oldbit;
51 }
53 static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
54 {
55 int oldbit;
57 __asm__ __volatile__ (
58 "lock btcl %2,%1\n\tsbbl %0,%0"
59 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
60 return oldbit;
61 }
63 struct __synch_xchg_dummy { unsigned long a[100]; };
64 #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
66 #define synch_cmpxchg(ptr, old, new) \
67 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
68 (unsigned long)(old), \
69 (unsigned long)(new), \
70 sizeof(*(ptr))))
72 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
73 unsigned long old,
74 unsigned long new, int size)
75 {
76 unsigned long prev;
77 switch (size) {
78 case 1:
79 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
80 : "=a"(prev)
81 : "q"(new), "m"(*__synch_xg(ptr)),
82 "0"(old)
83 : "memory");
84 return prev;
85 case 2:
86 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
87 : "=a"(prev)
88 : "q"(new), "m"(*__synch_xg(ptr)),
89 "0"(old)
90 : "memory");
91 return prev;
92 #ifdef CONFIG_X86_64
93 case 4:
94 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
95 : "=a"(prev)
96 : "q"(new), "m"(*__synch_xg(ptr)),
97 "0"(old)
98 : "memory");
99 return prev;
100 case 8:
101 __asm__ __volatile__("lock; cmpxchgq %1,%2"
102 : "=a"(prev)
103 : "q"(new), "m"(*__synch_xg(ptr)),
104 "0"(old)
105 : "memory");
106 return prev;
107 #else
108 case 4:
109 __asm__ __volatile__("lock; cmpxchgl %1,%2"
110 : "=a"(prev)
111 : "q"(new), "m"(*__synch_xg(ptr)),
112 "0"(old)
113 : "memory");
114 return prev;
115 #endif
116 }
117 return old;
118 }
120 static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
121 {
122 return ((1UL << (nr & 31)) &
123 (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
124 }
126 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
127 {
128 int oldbit;
129 __asm__ __volatile__ (
130 "btl %2,%1\n\tsbbl %0,%0"
131 : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
132 return oldbit;
133 }
135 #define synch_test_bit(nr,addr) \
136 (__builtin_constant_p(nr) ? \
137 synch_const_test_bit((nr),(addr)) : \
138 synch_var_test_bit((nr),(addr)))
140 #endif /* __XEN_SYNCH_BITOPS_H__ */