ia64/xen-unstable

view extras/mini-os/arch/x86/x86_64.S @ 16434:d46265d21dc5

[Mini-OS] Fix x86 arch_switch_thread

Fix x86 arch_switch_thread by making it pure assembly.
There were missing general register clobbers for x86_64, and BP should
theorically be clobbered too, but gcc does not believe that, so the
only simple safe solution is to use pure assembly.

Signed-off-by: Samuel Thibault <samuel.thibault@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Nov 23 16:23:28 2007 +0000 (2007-11-23)
parents f28d36628de8
children 2c52520f3284
line source
1 #include <os.h>
2 #include <xen/features.h>
4 .section __xen_guest
5 .ascii "GUEST_OS=Mini-OS"
6 .ascii ",XEN_VER=xen-3.0"
7 .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
8 .ascii ",ELF_PADDR_OFFSET=0x0"
9 .ascii ",HYPERCALL_PAGE=0x2"
10 .ascii ",LOADER=generic"
11 .byte 0
12 .text
14 #define ENTRY(X) .globl X ; X :
15 .globl _start, shared_info, hypercall_page
18 _start:
19 cld
20 movq stack_start(%rip),%rsp
21 andq $(~(8192-1)), %rsp
22 movq %rsi,%rdi
23 call start_kernel
25 stack_start:
26 .quad stack+(2*8192)
28 /* Unpleasant -- the PTE that maps this page is actually overwritten */
29 /* to map the real shared-info page! :-) */
30 .org 0x1000
31 shared_info:
32 .org 0x2000
34 hypercall_page:
35 .org 0x3000
38 /* Offsets into shared_info_t. */
39 #define evtchn_upcall_pending /* 0 */
40 #define evtchn_upcall_mask 1
42 NMI_MASK = 0x80000000
44 #define RDI 112
45 #define ORIG_RAX 120 /* + error_code */
46 #define EFLAGS 144
48 #define REST_SKIP 6*8
49 .macro SAVE_REST
50 subq $REST_SKIP,%rsp
51 # CFI_ADJUST_CFA_OFFSET REST_SKIP
52 movq %rbx,5*8(%rsp)
53 # CFI_REL_OFFSET rbx,5*8
54 movq %rbp,4*8(%rsp)
55 # CFI_REL_OFFSET rbp,4*8
56 movq %r12,3*8(%rsp)
57 # CFI_REL_OFFSET r12,3*8
58 movq %r13,2*8(%rsp)
59 # CFI_REL_OFFSET r13,2*8
60 movq %r14,1*8(%rsp)
61 # CFI_REL_OFFSET r14,1*8
62 movq %r15,(%rsp)
63 # CFI_REL_OFFSET r15,0*8
64 .endm
67 .macro RESTORE_REST
68 movq (%rsp),%r15
69 # CFI_RESTORE r15
70 movq 1*8(%rsp),%r14
71 # CFI_RESTORE r14
72 movq 2*8(%rsp),%r13
73 # CFI_RESTORE r13
74 movq 3*8(%rsp),%r12
75 # CFI_RESTORE r12
76 movq 4*8(%rsp),%rbp
77 # CFI_RESTORE rbp
78 movq 5*8(%rsp),%rbx
79 # CFI_RESTORE rbx
80 addq $REST_SKIP,%rsp
81 # CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
82 .endm
85 #define ARG_SKIP 9*8
86 .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
87 .if \skipr11
88 .else
89 movq (%rsp),%r11
90 # CFI_RESTORE r11
91 .endif
92 .if \skipr8910
93 .else
94 movq 1*8(%rsp),%r10
95 # CFI_RESTORE r10
96 movq 2*8(%rsp),%r9
97 # CFI_RESTORE r9
98 movq 3*8(%rsp),%r8
99 # CFI_RESTORE r8
100 .endif
101 .if \skiprax
102 .else
103 movq 4*8(%rsp),%rax
104 # CFI_RESTORE rax
105 .endif
106 .if \skiprcx
107 .else
108 movq 5*8(%rsp),%rcx
109 # CFI_RESTORE rcx
110 .endif
111 .if \skiprdx
112 .else
113 movq 6*8(%rsp),%rdx
114 # CFI_RESTORE rdx
115 .endif
116 movq 7*8(%rsp),%rsi
117 # CFI_RESTORE rsi
118 movq 8*8(%rsp),%rdi
119 # CFI_RESTORE rdi
120 .if ARG_SKIP+\addskip > 0
121 addq $ARG_SKIP+\addskip,%rsp
122 # CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
123 .endif
124 .endm
127 .macro HYPERVISOR_IRET flag
128 # testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
129 # jnz 2f /* there is no userspace? */
130 testl $NMI_MASK,2*8(%rsp)
131 jnz 2f
133 testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
134 jnz 1f
136 /* Direct iret to kernel space. Correct CS and SS. */
137 orb $3,1*8(%rsp)
138 orb $3,4*8(%rsp)
139 1: iretq
141 2: /* Slow iret via hypervisor. */
142 andl $~NMI_MASK, 16(%rsp)
143 pushq $\flag
144 jmp hypercall_page + (__HYPERVISOR_iret * 32)
145 .endm
147 /*
148 * Exception entry point. This expects an error code/orig_rax on the stack
149 * and the exception handler in %rax.
150 */
151 ENTRY(error_entry)
152 # _frame RDI
153 /* rdi slot contains rax, oldrax contains error code */
154 cld
155 subq $14*8,%rsp
156 # CFI_ADJUST_CFA_OFFSET (14*8)
157 movq %rsi,13*8(%rsp)
158 # CFI_REL_OFFSET rsi,RSI
159 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
160 movq %rdx,12*8(%rsp)
161 # CFI_REL_OFFSET rdx,RDX
162 movq %rcx,11*8(%rsp)
163 # CFI_REL_OFFSET rcx,RCX
164 movq %rsi,10*8(%rsp) /* store rax */
165 # CFI_REL_OFFSET rax,RAX
166 movq %r8, 9*8(%rsp)
167 # CFI_REL_OFFSET r8,R8
168 movq %r9, 8*8(%rsp)
169 # CFI_REL_OFFSET r9,R9
170 movq %r10,7*8(%rsp)
171 # CFI_REL_OFFSET r10,R10
172 movq %r11,6*8(%rsp)
173 # CFI_REL_OFFSET r11,R11
174 movq %rbx,5*8(%rsp)
175 # CFI_REL_OFFSET rbx,RBX
176 movq %rbp,4*8(%rsp)
177 # CFI_REL_OFFSET rbp,RBP
178 movq %r12,3*8(%rsp)
179 # CFI_REL_OFFSET r12,R12
180 movq %r13,2*8(%rsp)
181 # CFI_REL_OFFSET r13,R13
182 movq %r14,1*8(%rsp)
183 # CFI_REL_OFFSET r14,R14
184 movq %r15,(%rsp)
185 # CFI_REL_OFFSET r15,R15
186 #if 0
187 cmpl $__KERNEL_CS,CS(%rsp)
188 je error_kernelspace
189 #endif
190 error_call_handler:
191 movq %rdi, RDI(%rsp)
192 movq %rsp,%rdi
193 movq ORIG_RAX(%rsp),%rsi # get error code
194 movq $-1,ORIG_RAX(%rsp)
195 call *%rax
197 .macro zeroentry sym
198 # INTR_FRAME
199 movq (%rsp),%rcx
200 movq 8(%rsp),%r11
201 addq $0x10,%rsp /* skip rcx and r11 */
202 pushq $0 /* push error code/oldrax */
203 # CFI_ADJUST_CFA_OFFSET 8
204 pushq %rax /* push real oldrax to the rdi slot */
205 # CFI_ADJUST_CFA_OFFSET 8
206 leaq \sym(%rip),%rax
207 jmp error_entry
208 # CFI_ENDPROC
209 .endm
211 .macro errorentry sym
212 # XCPT_FRAME
213 movq (%rsp),%rcx
214 movq 8(%rsp),%r11
215 addq $0x10,%rsp /* rsp points to the error code */
216 pushq %rax
217 # CFI_ADJUST_CFA_OFFSET 8
218 leaq \sym(%rip),%rax
219 jmp error_entry
220 # CFI_ENDPROC
221 .endm
223 #define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
224 #define XEN_PUT_VCPU_INFO(reg)
225 #define XEN_PUT_VCPU_INFO_fixup
226 #define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
227 #define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
228 #define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
230 #define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
231 XEN_LOCKED_BLOCK_EVENTS(reg) ; \
232 XEN_PUT_VCPU_INFO(reg)
234 #define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
235 XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
236 XEN_PUT_VCPU_INFO(reg)
240 ENTRY(hypervisor_callback)
241 zeroentry hypervisor_callback2
243 ENTRY(hypervisor_callback2)
244 movq %rdi, %rsp
245 11: movq %gs:8,%rax
246 incl %gs:0
247 cmovzq %rax,%rsp
248 pushq %rdi
249 call do_hypervisor_callback
250 popq %rsp
251 decl %gs:0
252 jmp error_exit
254 # ALIGN
255 restore_all_enable_events:
256 XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
258 scrit: /**** START OF CRITICAL REGION ****/
259 XEN_TEST_PENDING(%rsi)
260 jnz 14f # process more events if necessary...
261 XEN_PUT_VCPU_INFO(%rsi)
262 RESTORE_ARGS 0,8,0
263 HYPERVISOR_IRET 0
265 14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
266 XEN_PUT_VCPU_INFO(%rsi)
267 SAVE_REST
268 movq %rsp,%rdi # set the argument again
269 jmp 11b
270 ecrit: /**** END OF CRITICAL REGION ****/
273 retint_kernel:
274 retint_restore_args:
275 movl EFLAGS-REST_SKIP(%rsp), %eax
276 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
277 XEN_GET_VCPU_INFO(%rsi)
278 andb evtchn_upcall_mask(%rsi),%al
279 andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
280 jnz restore_all_enable_events # != 0 => enable event delivery
281 XEN_PUT_VCPU_INFO(%rsi)
283 RESTORE_ARGS 0,8,0
284 HYPERVISOR_IRET 0
287 error_exit:
288 RESTORE_REST
289 /* cli */
290 XEN_BLOCK_EVENTS(%rsi)
291 jmp retint_kernel
295 ENTRY(failsafe_callback)
296 popq %rcx
297 popq %r11
298 iretq
301 ENTRY(coprocessor_error)
302 zeroentry do_coprocessor_error
305 ENTRY(simd_coprocessor_error)
306 zeroentry do_simd_coprocessor_error
309 ENTRY(device_not_available)
310 zeroentry do_device_not_available
313 ENTRY(debug)
314 # INTR_FRAME
315 # CFI_ADJUST_CFA_OFFSET 8 */
316 zeroentry do_debug
317 # CFI_ENDPROC
320 ENTRY(int3)
321 # INTR_FRAME
322 # CFI_ADJUST_CFA_OFFSET 8 */
323 zeroentry do_int3
324 # CFI_ENDPROC
326 ENTRY(overflow)
327 zeroentry do_overflow
330 ENTRY(bounds)
331 zeroentry do_bounds
334 ENTRY(invalid_op)
335 zeroentry do_invalid_op
338 ENTRY(coprocessor_segment_overrun)
339 zeroentry do_coprocessor_segment_overrun
342 ENTRY(invalid_TSS)
343 errorentry do_invalid_TSS
346 ENTRY(segment_not_present)
347 errorentry do_segment_not_present
350 /* runs on exception stack */
351 ENTRY(stack_segment)
352 # XCPT_FRAME
353 errorentry do_stack_segment
354 # CFI_ENDPROC
357 ENTRY(general_protection)
358 errorentry do_general_protection
361 ENTRY(alignment_check)
362 errorentry do_alignment_check
365 ENTRY(divide_error)
366 zeroentry do_divide_error
369 ENTRY(spurious_interrupt_bug)
370 zeroentry do_spurious_interrupt_bug
373 ENTRY(page_fault)
374 errorentry do_page_fault
380 ENTRY(thread_starter)
381 popq %rdi
382 popq %rbx
383 pushq $0
384 xorq %rbp,%rbp
385 call *%rbx
386 call exit_thread
389 ENTRY(__arch_switch_threads)
390 pushq %rbp
391 pushq %rbx
392 pushq %r12
393 pushq %r13
394 pushq %r14
395 pushq %r15
396 movq %rsp, (%rdi) /* save ESP */
397 movq (%rsi), %rsp /* restore ESP */
398 movq $1f, 8(%rdi) /* save EIP */
399 pushq 8(%rsi) /* restore EIP */
400 ret
401 1:
402 popq %r15
403 popq %r14
404 popq %r13
405 popq %r12
406 popq %rbx
407 popq %rbp
408 ret