ia64/xen-unstable

view xen/include/asm-x86/system.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 79053138b35c
children ca495837a722
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <xen/config.h>
5 #include <xen/types.h>
6 #include <asm/bitops.h>
8 #define read_segment_register(name) \
9 ({ u16 __sel; \
10 __asm__ __volatile__ ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
11 __sel; \
12 })
14 #define wbinvd() \
15 __asm__ __volatile__ ("wbinvd": : :"memory");
17 #define nop() __asm__ __volatile__ ("nop")
19 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
21 struct __xchg_dummy { unsigned long a[100]; };
22 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
25 /*
26 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
27 * Note 2: xchg has side effect, so that attribute volatile is necessary,
28 * but generally the primitive is invalid, *ptr is output argument. --ANK
29 */
30 static always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
31 {
32 switch (size) {
33 case 1:
34 __asm__ __volatile__("xchgb %b0,%1"
35 :"=q" (x)
36 :"m" (*__xg((volatile void *)ptr)), "0" (x)
37 :"memory");
38 break;
39 case 2:
40 __asm__ __volatile__("xchgw %w0,%1"
41 :"=r" (x)
42 :"m" (*__xg((volatile void *)ptr)), "0" (x)
43 :"memory");
44 break;
45 #if defined(__i386__)
46 case 4:
47 __asm__ __volatile__("xchgl %0,%1"
48 :"=r" (x)
49 :"m" (*__xg((volatile void *)ptr)), "0" (x)
50 :"memory");
51 break;
52 #elif defined(__x86_64__)
53 case 4:
54 __asm__ __volatile__("xchgl %k0,%1"
55 :"=r" (x)
56 :"m" (*__xg((volatile void *)ptr)), "0" (x)
57 :"memory");
58 break;
59 case 8:
60 __asm__ __volatile__("xchgq %0,%1"
61 :"=r" (x)
62 :"m" (*__xg((volatile void *)ptr)), "0" (x)
63 :"memory");
64 break;
65 #endif
66 }
67 return x;
68 }
70 /*
71 * Atomic compare and exchange. Compare OLD with MEM, if identical,
72 * store NEW in MEM. Return the initial value in MEM. Success is
73 * indicated by comparing RETURN with OLD.
74 */
76 static always_inline unsigned long __cmpxchg(
77 volatile void *ptr, unsigned long old, unsigned long new, int size)
78 {
79 unsigned long prev;
80 switch (size) {
81 case 1:
82 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
83 : "=a"(prev)
84 : "q"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
85 : "memory");
86 return prev;
87 case 2:
88 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
89 : "=a"(prev)
90 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
91 : "memory");
92 return prev;
93 #if defined(__i386__)
94 case 4:
95 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
96 : "=a"(prev)
97 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
98 : "memory");
99 return prev;
100 #elif defined(__x86_64__)
101 case 4:
102 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
103 : "=a"(prev)
104 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
105 : "memory");
106 return prev;
107 case 8:
108 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
109 : "=a"(prev)
110 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
111 : "memory");
112 return prev;
113 #endif
114 }
115 return old;
116 }
118 #define __HAVE_ARCH_CMPXCHG
120 #if BITS_PER_LONG == 64
122 #define cmpxchg(ptr,o,n) \
123 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
124 (unsigned long)(n),sizeof(*(ptr))))
125 #else
127 static always_inline unsigned long long __cmpxchg8b(
128 volatile void *ptr, unsigned long long old, unsigned long long new)
129 {
130 unsigned long long prev;
131 __asm__ __volatile__ (
132 LOCK_PREFIX "cmpxchg8b %3"
133 : "=A" (prev)
134 : "c" ((u32)(new>>32)), "b" ((u32)new),
135 "m" (*__xg((volatile void *)ptr)), "0" (old)
136 : "memory" );
137 return prev;
138 }
140 #define cmpxchg(ptr,o,n) \
141 ({ \
142 __typeof__(*(ptr)) __prev; \
143 switch ( sizeof(*(ptr)) ) { \
144 case 8: \
145 __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \
146 (ptr), \
147 (unsigned long long)(o), \
148 (unsigned long long)(n))); \
149 break; \
150 default: \
151 __prev = ((__typeof__(*(ptr)))__cmpxchg( \
152 (ptr), \
153 (unsigned long)(o), \
154 (unsigned long)(n), \
155 sizeof(*(ptr)))); \
156 break; \
157 } \
158 __prev; \
159 })
161 #endif
164 /*
165 * This function causes value _o to be changed to _n at location _p.
166 * If this access causes a fault then we return 1, otherwise we return 0.
167 * If no fault occurs then _o is updated to the value we saw at _p. If this
168 * is the same as the initial value of _o then _n is written to location _p.
169 */
170 #ifdef __i386__
171 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
172 __asm__ __volatile__ ( \
173 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
174 "2:\n" \
175 ".section .fixup,\"ax\"\n" \
176 "3: movl $1,%1\n" \
177 " jmp 2b\n" \
178 ".previous\n" \
179 ".section __ex_table,\"a\"\n" \
180 " .align 4\n" \
181 " .long 1b,3b\n" \
182 ".previous" \
183 : "=a" (_o), "=r" (_rc) \
184 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
185 : "memory");
186 #define cmpxchg_user(_p,_o,_n) \
187 ({ \
188 int _rc; \
189 switch ( sizeof(*(_p)) ) { \
190 case 1: \
191 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
192 break; \
193 case 2: \
194 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
195 break; \
196 case 4: \
197 __cmpxchg_user(_p,_o,_n,"l","","r"); \
198 break; \
199 case 8: \
200 __asm__ __volatile__ ( \
201 "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
202 "2:\n" \
203 ".section .fixup,\"ax\"\n" \
204 "3: movl $1,%1\n" \
205 " jmp 2b\n" \
206 ".previous\n" \
207 ".section __ex_table,\"a\"\n" \
208 " .align 4\n" \
209 " .long 1b,3b\n" \
210 ".previous" \
211 : "=A" (_o), "=r" (_rc) \
212 : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \
213 "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \
214 : "memory"); \
215 break; \
216 } \
217 _rc; \
218 })
219 #else
220 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
221 __asm__ __volatile__ ( \
222 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
223 "2:\n" \
224 ".section .fixup,\"ax\"\n" \
225 "3: movl $1,%1\n" \
226 " jmp 2b\n" \
227 ".previous\n" \
228 ".section __ex_table,\"a\"\n" \
229 " .align 8\n" \
230 " .quad 1b,3b\n" \
231 ".previous" \
232 : "=a" (_o), "=r" (_rc) \
233 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
234 : "memory");
235 #define cmpxchg_user(_p,_o,_n) \
236 ({ \
237 int _rc; \
238 switch ( sizeof(*(_p)) ) { \
239 case 1: \
240 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
241 break; \
242 case 2: \
243 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
244 break; \
245 case 4: \
246 __cmpxchg_user(_p,_o,_n,"l","k","r"); \
247 break; \
248 case 8: \
249 __cmpxchg_user(_p,_o,_n,"q","","r"); \
250 break; \
251 } \
252 _rc; \
253 })
254 #endif
256 #if defined(__i386__)
257 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
258 #define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
259 #elif defined(__x86_64__)
260 #define mb() __asm__ __volatile__ ("mfence":::"memory")
261 #define rmb() __asm__ __volatile__ ("lfence":::"memory")
262 #endif
263 #define wmb() __asm__ __volatile__ ("": : :"memory")
265 #ifdef CONFIG_SMP
266 #define smp_mb() mb()
267 #define smp_rmb() rmb()
268 #define smp_wmb() wmb()
269 #else
270 #define smp_mb() barrier()
271 #define smp_rmb() barrier()
272 #define smp_wmb() barrier()
273 #endif
275 #define set_mb(var, value) do { xchg(&var, value); } while (0)
276 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
278 /* interrupt control.. */
279 #if defined(__i386__)
280 #define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
281 #define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
282 #elif defined(__x86_64__)
283 #define __save_flags(x) do { __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
284 #define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
285 #endif
286 #define __cli() __asm__ __volatile__("cli": : :"memory")
287 #define __sti() __asm__ __volatile__("sti": : :"memory")
288 /* used in the idle loop; sti takes one instruction cycle to complete */
289 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
290 /* used when interrupts are already enabled or to shutdown the processor */
291 #define halt() __asm__ __volatile__("hlt": : :"memory")
293 /* For spinlocks etc */
294 #if defined(__i386__)
295 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
296 #define local_irq_restore(x) __restore_flags(x)
297 #elif defined(__x86_64__)
298 #define local_irq_save(x) do { __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
299 #define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
300 #endif
301 #define local_irq_disable() __cli()
302 #define local_irq_enable() __sti()
304 static inline int local_irq_is_enabled(void)
305 {
306 unsigned long flags;
307 __save_flags(flags);
308 return !!(flags & (1<<9)); /* EFLAGS_IF */
309 }
311 #define BROKEN_ACPI_Sx 0x0001
312 #define BROKEN_INIT_AFTER_S1 0x0002
314 #endif