ia64/xen-unstable

view xen/include/asm-x86/atomic.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 0a4b76b6b5a0
children cd6b3af19191
line source
1 #ifndef __ARCH_X86_ATOMIC__
2 #define __ARCH_X86_ATOMIC__
4 #include <xen/config.h>
5 #include <asm/system.h>
7 #ifdef CONFIG_SMP
8 #define LOCK "lock ; "
9 #else
10 #define LOCK ""
11 #endif
13 /*
14 * NB. I've pushed the volatile qualifier into the operations. This allows
15 * fast accessors such as _atomic_read() and _atomic_set() which don't give
16 * the compiler a fit.
17 */
18 typedef struct { int counter; } atomic_t;
20 #define ATOMIC_INIT(i) { (i) }
22 /**
23 * atomic_read - read atomic variable
24 * @v: pointer of type atomic_t
25 *
26 * Atomically reads the value of @v. Note that the guaranteed
27 * useful range of an atomic_t is only 24 bits.
28 */
29 #define _atomic_read(v) ((v).counter)
30 #define atomic_read(v) (*(volatile int *)&((v)->counter))
32 /**
33 * atomic_set - set atomic variable
34 * @v: pointer of type atomic_t
35 * @i: required value
36 *
37 * Atomically sets the value of @v to @i. Note that the guaranteed
38 * useful range of an atomic_t is only 24 bits.
39 */
40 #define _atomic_set(v,i) (((v).counter) = (i))
41 #define atomic_set(v,i) (*(volatile int *)&((v)->counter) = (i))
43 /**
44 * atomic_add - add integer to atomic variable
45 * @i: integer value to add
46 * @v: pointer of type atomic_t
47 *
48 * Atomically adds @i to @v. Note that the guaranteed useful range
49 * of an atomic_t is only 24 bits.
50 */
51 static __inline__ void atomic_add(int i, atomic_t *v)
52 {
53 __asm__ __volatile__(
54 LOCK "addl %1,%0"
55 :"=m" (*(volatile int *)&v->counter)
56 :"ir" (i), "m" (*(volatile int *)&v->counter));
57 }
59 /**
60 * atomic_sub - subtract the atomic variable
61 * @i: integer value to subtract
62 * @v: pointer of type atomic_t
63 *
64 * Atomically subtracts @i from @v. Note that the guaranteed
65 * useful range of an atomic_t is only 24 bits.
66 */
67 static __inline__ void atomic_sub(int i, atomic_t *v)
68 {
69 __asm__ __volatile__(
70 LOCK "subl %1,%0"
71 :"=m" (*(volatile int *)&v->counter)
72 :"ir" (i), "m" (*(volatile int *)&v->counter));
73 }
75 /**
76 * atomic_sub_and_test - subtract value from variable and test result
77 * @i: integer value to subtract
78 * @v: pointer of type atomic_t
79 *
80 * Atomically subtracts @i from @v and returns
81 * true if the result is zero, or false for all
82 * other cases. Note that the guaranteed
83 * useful range of an atomic_t is only 24 bits.
84 */
85 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86 {
87 unsigned char c;
89 __asm__ __volatile__(
90 LOCK "subl %2,%0; sete %1"
91 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
92 :"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
93 return c;
94 }
96 /**
97 * atomic_inc - increment atomic variable
98 * @v: pointer of type atomic_t
99 *
100 * Atomically increments @v by 1. Note that the guaranteed
101 * useful range of an atomic_t is only 24 bits.
102 */
103 static __inline__ void atomic_inc(atomic_t *v)
104 {
105 __asm__ __volatile__(
106 LOCK "incl %0"
107 :"=m" (*(volatile int *)&v->counter)
108 :"m" (*(volatile int *)&v->counter));
109 }
111 /**
112 * atomic_dec - decrement atomic variable
113 * @v: pointer of type atomic_t
114 *
115 * Atomically decrements @v by 1. Note that the guaranteed
116 * useful range of an atomic_t is only 24 bits.
117 */
118 static __inline__ void atomic_dec(atomic_t *v)
119 {
120 __asm__ __volatile__(
121 LOCK "decl %0"
122 :"=m" (*(volatile int *)&v->counter)
123 :"m" (*(volatile int *)&v->counter));
124 }
126 /**
127 * atomic_dec_and_test - decrement and test
128 * @v: pointer of type atomic_t
129 *
130 * Atomically decrements @v by 1 and
131 * returns true if the result is 0, or false for all other
132 * cases. Note that the guaranteed
133 * useful range of an atomic_t is only 24 bits.
134 */
135 static __inline__ int atomic_dec_and_test(atomic_t *v)
136 {
137 unsigned char c;
139 __asm__ __volatile__(
140 LOCK "decl %0; sete %1"
141 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
142 :"m" (*(volatile int *)&v->counter) : "memory");
143 return c != 0;
144 }
146 /**
147 * atomic_inc_and_test - increment and test
148 * @v: pointer of type atomic_t
149 *
150 * Atomically increments @v by 1
151 * and returns true if the result is zero, or false for all
152 * other cases. Note that the guaranteed
153 * useful range of an atomic_t is only 24 bits.
154 */
155 static __inline__ int atomic_inc_and_test(atomic_t *v)
156 {
157 unsigned char c;
159 __asm__ __volatile__(
160 LOCK "incl %0; sete %1"
161 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
162 :"m" (*(volatile int *)&v->counter) : "memory");
163 return c != 0;
164 }
166 /**
167 * atomic_add_negative - add and test if negative
168 * @v: pointer of type atomic_t
169 * @i: integer value to add
170 *
171 * Atomically adds @i to @v and returns true
172 * if the result is negative, or false when
173 * result is greater than or equal to zero. Note that the guaranteed
174 * useful range of an atomic_t is only 24 bits.
175 */
176 static __inline__ int atomic_add_negative(int i, atomic_t *v)
177 {
178 unsigned char c;
180 __asm__ __volatile__(
181 LOCK "addl %2,%0; sets %1"
182 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
183 :"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
184 return c;
185 }
187 static __inline__ atomic_t atomic_compareandswap(
188 atomic_t old, atomic_t new, atomic_t *v)
189 {
190 atomic_t rc;
191 rc.counter =
192 __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
193 return rc;
194 }
196 /* Atomic operations are already serializing on x86 */
197 #define smp_mb__before_atomic_dec() barrier()
198 #define smp_mb__after_atomic_dec() barrier()
199 #define smp_mb__before_atomic_inc() barrier()
200 #define smp_mb__after_atomic_inc() barrier()
202 #endif /* __ARCH_X86_ATOMIC__ */