direct-io.hg

view linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h @ 7394:6cf6f8783282

Fix local_irq_save() and irqs_disabled() to be preemption-safe.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 16 11:45:51 2005 +0100 (2005-10-16)
parents 06d84bf87159
children 36ab34f1c31e
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
7 #include <asm/synch_bitops.h>
8 #include <asm/hypervisor.h>
9 #include <asm-xen/xen-public/arch-x86_64.h>
11 #ifdef __KERNEL__
13 #ifdef CONFIG_SMP
14 #define LOCK_PREFIX "lock ; "
15 #else
16 #define LOCK_PREFIX ""
17 #endif
19 #define __STR(x) #x
20 #define STR(x) __STR(x)
22 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
23 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
25 /* frame pointer must be last for get_wchan */
26 #define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
27 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
29 #define __EXTRA_CLOBBER \
30 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
32 #define switch_to(prev,next,last) \
33 asm volatile(SAVE_CONTEXT \
34 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
35 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
36 "call __switch_to\n\t" \
37 ".globl thread_return\n" \
38 "thread_return:\n\t" \
39 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
40 "movq %P[thread_info](%%rsi),%%r8\n\t" \
41 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
42 "movq %%rax,%%rdi\n\t" \
43 "jc ret_from_fork\n\t" \
44 RESTORE_CONTEXT \
45 : "=a" (last) \
46 : [next] "S" (next), [prev] "D" (prev), \
47 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
48 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
49 [tif_fork] "i" (TIF_FORK), \
50 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
51 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
52 : "memory", "cc" __EXTRA_CLOBBER)
55 extern void load_gs_index(unsigned);
57 /*
58 * Load a segment. Fall back on loading the zero
59 * segment if something goes wrong..
60 */
61 #define loadsegment(seg,value) \
62 asm volatile("\n" \
63 "1:\t" \
64 "movl %k0,%%" #seg "\n" \
65 "2:\n" \
66 ".section .fixup,\"ax\"\n" \
67 "3:\t" \
68 "movl %1,%%" #seg "\n\t" \
69 "jmp 2b\n" \
70 ".previous\n" \
71 ".section __ex_table,\"a\"\n\t" \
72 ".align 8\n\t" \
73 ".quad 1b,3b\n" \
74 ".previous" \
75 : :"r" (value), "r" (0))
77 #define set_debug(value,register) \
78 __asm__("movq %0,%%db" #register \
79 : /* no output */ \
80 :"r" ((unsigned long) value))
83 #ifdef __KERNEL__
84 struct alt_instr {
85 __u8 *instr; /* original instruction */
86 __u8 *replacement;
87 __u8 cpuid; /* cpuid bit set for replacement */
88 __u8 instrlen; /* length of original instruction */
89 __u8 replacementlen; /* length of new instruction, <= instrlen */
90 __u8 pad[5];
91 };
92 #endif
94 /*
95 * Alternative instructions for different CPU types or capabilities.
96 *
97 * This allows to use optimized instructions even on generic binary
98 * kernels.
99 *
100 * length of oldinstr must be longer or equal the length of newinstr
101 * It can be padded with nops as needed.
102 *
103 * For non barrier like inlines please define new variants
104 * without volatile and memory clobber.
105 */
106 #define alternative(oldinstr, newinstr, feature) \
107 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
108 ".section .altinstructions,\"a\"\n" \
109 " .align 8\n" \
110 " .quad 661b\n" /* label */ \
111 " .quad 663f\n" /* new instruction */ \
112 " .byte %c0\n" /* feature bit */ \
113 " .byte 662b-661b\n" /* sourcelen */ \
114 " .byte 664f-663f\n" /* replacementlen */ \
115 ".previous\n" \
116 ".section .altinstr_replacement,\"ax\"\n" \
117 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
118 ".previous" :: "i" (feature) : "memory")
120 /*
121 * Alternative inline assembly with input.
122 *
123 * Pecularities:
124 * No memory clobber here.
125 * Argument numbers start with 1.
126 * Best is to use constraints that are fixed size (like (%1) ... "r")
127 * If you use variable sized constraints like "m" or "g" in the
128 * replacement maake sure to pad to the worst case length.
129 */
130 #define alternative_input(oldinstr, newinstr, feature, input...) \
131 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
132 ".section .altinstructions,\"a\"\n" \
133 " .align 8\n" \
134 " .quad 661b\n" /* label */ \
135 " .quad 663f\n" /* new instruction */ \
136 " .byte %c0\n" /* feature bit */ \
137 " .byte 662b-661b\n" /* sourcelen */ \
138 " .byte 664f-663f\n" /* replacementlen */ \
139 ".previous\n" \
140 ".section .altinstr_replacement,\"ax\"\n" \
141 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
142 ".previous" :: "i" (feature), ##input)
144 /*
145 * Clear and set 'TS' bit respectively
146 */
147 #define clts() (HYPERVISOR_fpu_taskswitch(0))
149 static inline unsigned long read_cr0(void)
150 {
151 unsigned long cr0;
152 asm volatile("movq %%cr0,%0" : "=r" (cr0));
153 return cr0;
154 }
156 static inline void write_cr0(unsigned long val)
157 {
158 asm volatile("movq %0,%%cr0" :: "r" (val));
159 }
161 static inline unsigned long read_cr3(void)
162 {
163 unsigned long cr3;
164 asm("movq %%cr3,%0" : "=r" (cr3));
165 return cr3;
166 }
168 static inline unsigned long read_cr4(void)
169 {
170 unsigned long cr4;
171 asm("movq %%cr4,%0" : "=r" (cr4));
172 return cr4;
173 }
175 static inline void write_cr4(unsigned long val)
176 {
177 asm volatile("movq %0,%%cr4" :: "r" (val));
178 }
180 #define stts() (HYPERVISOR_fpu_taskswitch(1))
182 #define wbinvd() \
183 __asm__ __volatile__ ("wbinvd": : :"memory");
185 #endif /* __KERNEL__ */
187 #define nop() __asm__ __volatile__ ("nop")
189 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
191 #define tas(ptr) (xchg((ptr),1))
193 #define __xg(x) ((volatile long *)(x))
195 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
196 {
197 *ptr = val;
198 }
200 #define _set_64bit set_64bit
202 /*
203 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
204 * Note 2: xchg has side effect, so that attribute volatile is necessary,
205 * but generally the primitive is invalid, *ptr is output argument. --ANK
206 */
207 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
208 {
209 switch (size) {
210 case 1:
211 __asm__ __volatile__("xchgb %b0,%1"
212 :"=q" (x)
213 :"m" (*__xg(ptr)), "0" (x)
214 :"memory");
215 break;
216 case 2:
217 __asm__ __volatile__("xchgw %w0,%1"
218 :"=r" (x)
219 :"m" (*__xg(ptr)), "0" (x)
220 :"memory");
221 break;
222 case 4:
223 __asm__ __volatile__("xchgl %k0,%1"
224 :"=r" (x)
225 :"m" (*__xg(ptr)), "0" (x)
226 :"memory");
227 break;
228 case 8:
229 __asm__ __volatile__("xchgq %0,%1"
230 :"=r" (x)
231 :"m" (*__xg(ptr)), "0" (x)
232 :"memory");
233 break;
234 }
235 return x;
236 }
238 /*
239 * Atomic compare and exchange. Compare OLD with MEM, if identical,
240 * store NEW in MEM. Return the initial value in MEM. Success is
241 * indicated by comparing RETURN with OLD.
242 */
244 #define __HAVE_ARCH_CMPXCHG 1
246 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
247 unsigned long new, int size)
248 {
249 unsigned long prev;
250 switch (size) {
251 case 1:
252 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
253 : "=a"(prev)
254 : "q"(new), "m"(*__xg(ptr)), "0"(old)
255 : "memory");
256 return prev;
257 case 2:
258 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
259 : "=a"(prev)
260 : "q"(new), "m"(*__xg(ptr)), "0"(old)
261 : "memory");
262 return prev;
263 case 4:
264 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
265 : "=a"(prev)
266 : "q"(new), "m"(*__xg(ptr)), "0"(old)
267 : "memory");
268 return prev;
269 case 8:
270 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
271 : "=a"(prev)
272 : "q"(new), "m"(*__xg(ptr)), "0"(old)
273 : "memory");
274 return prev;
275 }
276 return old;
277 }
279 #define cmpxchg(ptr,o,n)\
280 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
281 (unsigned long)(n),sizeof(*(ptr))))
283 #ifdef CONFIG_SMP
284 #define smp_mb() mb()
285 #define smp_rmb() rmb()
286 #define smp_wmb() wmb()
287 #define smp_read_barrier_depends() do {} while(0)
288 #else
289 #define smp_mb() barrier()
290 #define smp_rmb() barrier()
291 #define smp_wmb() barrier()
292 #define smp_read_barrier_depends() do {} while(0)
293 #endif
296 /*
297 * Force strict CPU ordering.
298 * And yes, this is required on UP too when we're talking
299 * to devices.
300 */
301 #define mb() asm volatile("mfence":::"memory")
302 #define rmb() asm volatile("lfence":::"memory")
304 #ifdef CONFIG_UNORDERED_IO
305 #define wmb() asm volatile("sfence" ::: "memory")
306 #else
307 #define wmb() asm volatile("" ::: "memory")
308 #endif
309 #define read_barrier_depends() do {} while(0)
310 #define set_mb(var, value) do { xchg(&var, value); } while (0)
311 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
313 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
316 /*
317 * The use of 'barrier' in the following reflects their use as local-lock
318 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
319 * critical operations are executed. All critical operations must complete
320 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
321 * includes these barriers, for example.
322 */
324 #define __cli() \
325 do { \
326 vcpu_info_t *_vcpu; \
327 preempt_disable(); \
328 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
329 _vcpu->evtchn_upcall_mask = 1; \
330 preempt_enable_no_resched(); \
331 barrier(); \
332 } while (0)
334 #define __sti() \
335 do { \
336 vcpu_info_t *_vcpu; \
337 barrier(); \
338 preempt_disable(); \
339 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
340 _vcpu->evtchn_upcall_mask = 0; \
341 barrier(); /* unmask then check (avoid races) */ \
342 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
343 force_evtchn_callback(); \
344 preempt_enable(); \
345 } while (0)
347 #define __save_flags(x) \
348 do { \
349 vcpu_info_t *_vcpu; \
350 preempt_disable(); \
351 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
352 (x) = _vcpu->evtchn_upcall_mask; \
353 preempt_enable(); \
354 } while (0)
356 #define __restore_flags(x) \
357 do { \
358 vcpu_info_t *_vcpu; \
359 barrier(); \
360 preempt_disable(); \
361 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
362 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
363 barrier(); /* unmask then check (avoid races) */ \
364 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
365 force_evtchn_callback(); \
366 preempt_enable(); \
367 } else \
368 preempt_enable_no_resched(); \
369 } while (0)
371 #define safe_halt() ((void)0)
373 #define __save_and_cli(x) \
374 do { \
375 vcpu_info_t *_vcpu; \
376 preempt_disable(); \
377 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
378 (x) = _vcpu->evtchn_upcall_mask; \
379 _vcpu->evtchn_upcall_mask = 1; \
380 preempt_enable_no_resched(); \
381 barrier(); \
382 } while (0)
384 void cpu_idle_wait(void);
386 #define local_irq_save(x) __save_and_cli(x)
387 #define local_irq_restore(x) __restore_flags(x)
388 #define local_save_flags(x) __save_flags(x)
389 #define local_irq_disable() __cli()
390 #define local_irq_enable() __sti()
392 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
393 #define irqs_disabled() \
394 ({ int ___x; \
395 vcpu_info_t *_vcpu; \
396 preempt_disable(); \
397 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
398 ___x = (_vcpu->evtchn_upcall_mask != 0); \
399 preempt_enable_no_resched(); \
400 ___x; })
402 /*
403 * disable hlt during certain critical i/o operations
404 */
405 #define HAVE_DISABLE_HLT
406 void disable_hlt(void);
407 void enable_hlt(void);
409 #define HAVE_EAT_KEY
410 void eat_key(void);
412 extern unsigned long arch_align_stack(unsigned long sp);
414 #endif