ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/system.h @ 11221:7c9d7fc3dce5

[HVM] Fix SMBIOS entry point copy destination.
Spotted by Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Aug 19 12:06:36 2006 +0100 (2006-08-19)
parents e58e04589d11
children 4fad820a2233
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
7 #include <asm/synch_bitops.h>
8 #include <asm/hypervisor.h>
9 #include <xen/interface/arch-x86_64.h>
11 #ifdef __KERNEL__
13 #ifdef CONFIG_SMP
14 #define __vcpu_id smp_processor_id()
15 #else
16 #define __vcpu_id 0
17 #endif
19 #ifdef CONFIG_SMP
20 #define LOCK_PREFIX "lock ; "
21 #else
22 #define LOCK_PREFIX ""
23 #endif
25 #define __STR(x) #x
26 #define STR(x) __STR(x)
28 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
29 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
31 /* frame pointer must be last for get_wchan */
32 #define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
33 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
35 #define __EXTRA_CLOBBER \
36 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
38 #define switch_to(prev,next,last) \
39 asm volatile(SAVE_CONTEXT \
40 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
41 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
42 "call __switch_to\n\t" \
43 ".globl thread_return\n" \
44 "thread_return:\n\t" \
45 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
46 "movq %P[thread_info](%%rsi),%%r8\n\t" \
47 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
48 "movq %%rax,%%rdi\n\t" \
49 "jc ret_from_fork\n\t" \
50 RESTORE_CONTEXT \
51 : "=a" (last) \
52 : [next] "S" (next), [prev] "D" (prev), \
53 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
54 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
55 [tif_fork] "i" (TIF_FORK), \
56 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
57 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
58 : "memory", "cc" __EXTRA_CLOBBER)
61 extern void load_gs_index(unsigned);
63 /*
64 * Load a segment. Fall back on loading the zero
65 * segment if something goes wrong..
66 */
67 #define loadsegment(seg,value) \
68 asm volatile("\n" \
69 "1:\t" \
70 "movl %k0,%%" #seg "\n" \
71 "2:\n" \
72 ".section .fixup,\"ax\"\n" \
73 "3:\t" \
74 "movl %1,%%" #seg "\n\t" \
75 "jmp 2b\n" \
76 ".previous\n" \
77 ".section __ex_table,\"a\"\n\t" \
78 ".align 8\n\t" \
79 ".quad 1b,3b\n" \
80 ".previous" \
81 : :"r" (value), "r" (0))
83 #define set_debug(value,register) \
84 __asm__("movq %0,%%db" #register \
85 : /* no output */ \
86 :"r" ((unsigned long) value))
89 #ifdef __KERNEL__
90 struct alt_instr {
91 __u8 *instr; /* original instruction */
92 __u8 *replacement;
93 __u8 cpuid; /* cpuid bit set for replacement */
94 __u8 instrlen; /* length of original instruction */
95 __u8 replacementlen; /* length of new instruction, <= instrlen */
96 __u8 pad[5];
97 };
98 #endif
100 /*
101 * Alternative instructions for different CPU types or capabilities.
102 *
103 * This allows to use optimized instructions even on generic binary
104 * kernels.
105 *
106 * length of oldinstr must be longer or equal the length of newinstr
107 * It can be padded with nops as needed.
108 *
109 * For non barrier like inlines please define new variants
110 * without volatile and memory clobber.
111 */
112 #define alternative(oldinstr, newinstr, feature) \
113 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
114 ".section .altinstructions,\"a\"\n" \
115 " .align 8\n" \
116 " .quad 661b\n" /* label */ \
117 " .quad 663f\n" /* new instruction */ \
118 " .byte %c0\n" /* feature bit */ \
119 " .byte 662b-661b\n" /* sourcelen */ \
120 " .byte 664f-663f\n" /* replacementlen */ \
121 ".previous\n" \
122 ".section .altinstr_replacement,\"ax\"\n" \
123 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
124 ".previous" :: "i" (feature) : "memory")
126 /*
127 * Alternative inline assembly with input.
128 *
129 * Peculiarities:
130 * No memory clobber here.
131 * Argument numbers start with 1.
132 * Best is to use constraints that are fixed size (like (%1) ... "r")
133 * If you use variable sized constraints like "m" or "g" in the
134 * replacement make sure to pad to the worst case length.
135 */
136 #define alternative_input(oldinstr, newinstr, feature, input...) \
137 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
138 ".section .altinstructions,\"a\"\n" \
139 " .align 8\n" \
140 " .quad 661b\n" /* label */ \
141 " .quad 663f\n" /* new instruction */ \
142 " .byte %c0\n" /* feature bit */ \
143 " .byte 662b-661b\n" /* sourcelen */ \
144 " .byte 664f-663f\n" /* replacementlen */ \
145 ".previous\n" \
146 ".section .altinstr_replacement,\"ax\"\n" \
147 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
148 ".previous" :: "i" (feature), ##input)
150 /* Like alternative_input, but with a single output argument */
151 #define alternative_io(oldinstr, newinstr, feature, output, input...) \
152 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
153 ".section .altinstructions,\"a\"\n" \
154 " .align 8\n" \
155 " .quad 661b\n" /* label */ \
156 " .quad 663f\n" /* new instruction */ \
157 " .byte %c[feat]\n" /* feature bit */ \
158 " .byte 662b-661b\n" /* sourcelen */ \
159 " .byte 664f-663f\n" /* replacementlen */ \
160 ".previous\n" \
161 ".section .altinstr_replacement,\"ax\"\n" \
162 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
163 ".previous" : output : [feat] "i" (feature), ##input)
165 /*
166 * Clear and set 'TS' bit respectively
167 */
168 #define clts() (HYPERVISOR_fpu_taskswitch(0))
170 static inline unsigned long read_cr0(void)
171 {
172 unsigned long cr0;
173 asm volatile("movq %%cr0,%0" : "=r" (cr0));
174 return cr0;
175 }
177 static inline void write_cr0(unsigned long val)
178 {
179 asm volatile("movq %0,%%cr0" :: "r" (val));
180 }
182 #define read_cr3() ({ \
183 unsigned long __dummy; \
184 asm("movq %%cr3,%0" : "=r" (__dummy)); \
185 machine_to_phys(__dummy); \
186 })
188 static inline unsigned long read_cr4(void)
189 {
190 unsigned long cr4;
191 asm("movq %%cr4,%0" : "=r" (cr4));
192 return cr4;
193 }
195 static inline void write_cr4(unsigned long val)
196 {
197 asm volatile("movq %0,%%cr4" :: "r" (val));
198 }
200 #define stts() (HYPERVISOR_fpu_taskswitch(1))
202 #define wbinvd() \
203 __asm__ __volatile__ ("wbinvd": : :"memory");
205 /*
206 * On SMP systems, when the scheduler does migration-cost autodetection,
207 * it needs a way to flush as much of the CPU's caches as possible.
208 */
209 static inline void sched_cacheflush(void)
210 {
211 wbinvd();
212 }
214 #endif /* __KERNEL__ */
216 #define nop() __asm__ __volatile__ ("nop")
218 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
220 #define tas(ptr) (xchg((ptr),1))
222 #define __xg(x) ((volatile long *)(x))
224 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
225 {
226 *ptr = val;
227 }
229 #define _set_64bit set_64bit
231 /*
232 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
233 * Note 2: xchg has side effect, so that attribute volatile is necessary,
234 * but generally the primitive is invalid, *ptr is output argument. --ANK
235 */
236 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
237 {
238 switch (size) {
239 case 1:
240 __asm__ __volatile__("xchgb %b0,%1"
241 :"=q" (x)
242 :"m" (*__xg(ptr)), "0" (x)
243 :"memory");
244 break;
245 case 2:
246 __asm__ __volatile__("xchgw %w0,%1"
247 :"=r" (x)
248 :"m" (*__xg(ptr)), "0" (x)
249 :"memory");
250 break;
251 case 4:
252 __asm__ __volatile__("xchgl %k0,%1"
253 :"=r" (x)
254 :"m" (*__xg(ptr)), "0" (x)
255 :"memory");
256 break;
257 case 8:
258 __asm__ __volatile__("xchgq %0,%1"
259 :"=r" (x)
260 :"m" (*__xg(ptr)), "0" (x)
261 :"memory");
262 break;
263 }
264 return x;
265 }
267 /*
268 * Atomic compare and exchange. Compare OLD with MEM, if identical,
269 * store NEW in MEM. Return the initial value in MEM. Success is
270 * indicated by comparing RETURN with OLD.
271 */
273 #define __HAVE_ARCH_CMPXCHG 1
275 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
276 unsigned long new, int size)
277 {
278 unsigned long prev;
279 switch (size) {
280 case 1:
281 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
282 : "=a"(prev)
283 : "q"(new), "m"(*__xg(ptr)), "0"(old)
284 : "memory");
285 return prev;
286 case 2:
287 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
288 : "=a"(prev)
289 : "r"(new), "m"(*__xg(ptr)), "0"(old)
290 : "memory");
291 return prev;
292 case 4:
293 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
294 : "=a"(prev)
295 : "r"(new), "m"(*__xg(ptr)), "0"(old)
296 : "memory");
297 return prev;
298 case 8:
299 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
300 : "=a"(prev)
301 : "r"(new), "m"(*__xg(ptr)), "0"(old)
302 : "memory");
303 return prev;
304 }
305 return old;
306 }
308 #define cmpxchg(ptr,o,n)\
309 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
310 (unsigned long)(n),sizeof(*(ptr))))
312 #ifdef CONFIG_SMP
313 #define smp_mb() mb()
314 #define smp_rmb() rmb()
315 #define smp_wmb() wmb()
316 #define smp_read_barrier_depends() do {} while(0)
317 #else
318 #define smp_mb() barrier()
319 #define smp_rmb() barrier()
320 #define smp_wmb() barrier()
321 #define smp_read_barrier_depends() do {} while(0)
322 #endif
325 /*
326 * Force strict CPU ordering.
327 * And yes, this is required on UP too when we're talking
328 * to devices.
329 */
330 #define mb() asm volatile("mfence":::"memory")
331 #define rmb() asm volatile("lfence":::"memory")
333 #ifdef CONFIG_UNORDERED_IO
334 #define wmb() asm volatile("sfence" ::: "memory")
335 #else
336 #define wmb() asm volatile("" ::: "memory")
337 #endif
338 #define read_barrier_depends() do {} while(0)
339 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
340 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
342 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
345 /*
346 * The use of 'barrier' in the following reflects their use as local-lock
347 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
348 * critical operations are executed. All critical operations must complete
349 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
350 * includes these barriers, for example.
351 */
353 #define __cli() \
354 do { \
355 vcpu_info_t *_vcpu; \
356 preempt_disable(); \
357 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
358 _vcpu->evtchn_upcall_mask = 1; \
359 preempt_enable_no_resched(); \
360 barrier(); \
361 } while (0)
363 #define __sti() \
364 do { \
365 vcpu_info_t *_vcpu; \
366 barrier(); \
367 preempt_disable(); \
368 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
369 _vcpu->evtchn_upcall_mask = 0; \
370 barrier(); /* unmask then check (avoid races) */ \
371 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
372 force_evtchn_callback(); \
373 preempt_enable(); \
374 } while (0)
376 #define __save_flags(x) \
377 do { \
378 vcpu_info_t *_vcpu; \
379 preempt_disable(); \
380 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
381 (x) = _vcpu->evtchn_upcall_mask; \
382 preempt_enable(); \
383 } while (0)
385 #define __restore_flags(x) \
386 do { \
387 vcpu_info_t *_vcpu; \
388 barrier(); \
389 preempt_disable(); \
390 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
391 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
392 barrier(); /* unmask then check (avoid races) */ \
393 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
394 force_evtchn_callback(); \
395 preempt_enable(); \
396 } else \
397 preempt_enable_no_resched(); \
398 } while (0)
400 #define __save_and_cli(x) \
401 do { \
402 vcpu_info_t *_vcpu; \
403 preempt_disable(); \
404 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
405 (x) = _vcpu->evtchn_upcall_mask; \
406 _vcpu->evtchn_upcall_mask = 1; \
407 preempt_enable_no_resched(); \
408 barrier(); \
409 } while (0)
411 #define local_irq_save(x) __save_and_cli(x)
412 #define local_irq_restore(x) __restore_flags(x)
413 #define local_save_flags(x) __save_flags(x)
414 #define local_irq_disable() __cli()
415 #define local_irq_enable() __sti()
417 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
418 #define irqs_disabled() \
419 ({ int ___x; \
420 vcpu_info_t *_vcpu; \
421 preempt_disable(); \
422 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
423 ___x = (_vcpu->evtchn_upcall_mask != 0); \
424 preempt_enable_no_resched(); \
425 ___x; })
427 void safe_halt(void);
428 void halt(void);
430 void cpu_idle_wait(void);
432 extern unsigned long arch_align_stack(unsigned long sp);
434 #endif