ia64/xen-unstable

view extras/mini-os/x86_64.S @ 10784:97cc7ed90061

[IA64] fix merge error with xen-unstable.hg

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Tue Jul 25 12:39:01 2006 -0600 (2006-07-25)
parents b3b5f3ff2100
children
line source
1 #include <os.h>
2 #include <xen/features.h>
4 .section __xen_guest
5 .ascii "GUEST_OS=Mini-OS"
6 .ascii ",XEN_VER=xen-3.0"
7 .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
8 .ascii ",ELF_PADDR_OFFSET=0x0"
9 .ascii ",HYPERCALL_PAGE=0x2"
10 .ascii ",LOADER=generic"
11 .byte 0
12 .text
14 #define ENTRY(X) .globl X ; X :
15 .globl _start, shared_info, hypercall_page
18 _start:
19 cld
20 movq stack_start(%rip),%rsp
21 movq %rsi,%rdi
22 call start_kernel
24 stack_start:
25 .quad stack+8192
27 /* Unpleasant -- the PTE that maps this page is actually overwritten */
28 /* to map the real shared-info page! :-) */
29 .org 0x1000
30 shared_info:
31 .org 0x2000
33 hypercall_page:
34 .org 0x3000
37 /* Offsets into shared_info_t. */
38 #define evtchn_upcall_pending /* 0 */
39 #define evtchn_upcall_mask 1
41 NMI_MASK = 0x80000000
43 #define RDI 112
44 #define ORIG_RAX 120 /* + error_code */
45 #define EFLAGS 144
47 #define REST_SKIP 6*8
48 .macro SAVE_REST
49 subq $REST_SKIP,%rsp
50 # CFI_ADJUST_CFA_OFFSET REST_SKIP
51 movq %rbx,5*8(%rsp)
52 # CFI_REL_OFFSET rbx,5*8
53 movq %rbp,4*8(%rsp)
54 # CFI_REL_OFFSET rbp,4*8
55 movq %r12,3*8(%rsp)
56 # CFI_REL_OFFSET r12,3*8
57 movq %r13,2*8(%rsp)
58 # CFI_REL_OFFSET r13,2*8
59 movq %r14,1*8(%rsp)
60 # CFI_REL_OFFSET r14,1*8
61 movq %r15,(%rsp)
62 # CFI_REL_OFFSET r15,0*8
63 .endm
66 .macro RESTORE_REST
67 movq (%rsp),%r15
68 # CFI_RESTORE r15
69 movq 1*8(%rsp),%r14
70 # CFI_RESTORE r14
71 movq 2*8(%rsp),%r13
72 # CFI_RESTORE r13
73 movq 3*8(%rsp),%r12
74 # CFI_RESTORE r12
75 movq 4*8(%rsp),%rbp
76 # CFI_RESTORE rbp
77 movq 5*8(%rsp),%rbx
78 # CFI_RESTORE rbx
79 addq $REST_SKIP,%rsp
80 # CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
81 .endm
84 #define ARG_SKIP 9*8
85 .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
86 .if \skipr11
87 .else
88 movq (%rsp),%r11
89 # CFI_RESTORE r11
90 .endif
91 .if \skipr8910
92 .else
93 movq 1*8(%rsp),%r10
94 # CFI_RESTORE r10
95 movq 2*8(%rsp),%r9
96 # CFI_RESTORE r9
97 movq 3*8(%rsp),%r8
98 # CFI_RESTORE r8
99 .endif
100 .if \skiprax
101 .else
102 movq 4*8(%rsp),%rax
103 # CFI_RESTORE rax
104 .endif
105 .if \skiprcx
106 .else
107 movq 5*8(%rsp),%rcx
108 # CFI_RESTORE rcx
109 .endif
110 .if \skiprdx
111 .else
112 movq 6*8(%rsp),%rdx
113 # CFI_RESTORE rdx
114 .endif
115 movq 7*8(%rsp),%rsi
116 # CFI_RESTORE rsi
117 movq 8*8(%rsp),%rdi
118 # CFI_RESTORE rdi
119 .if ARG_SKIP+\addskip > 0
120 addq $ARG_SKIP+\addskip,%rsp
121 # CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
122 .endif
123 .endm
126 .macro HYPERVISOR_IRET flag
127 # testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
128 # jnz 2f /* there is no userspace? */
129 testl $NMI_MASK,2*8(%rsp)
130 jnz 2f
132 testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
133 jnz 1f
135 /* Direct iret to kernel space. Correct CS and SS. */
136 orb $3,1*8(%rsp)
137 orb $3,4*8(%rsp)
138 1: iretq
140 2: /* Slow iret via hypervisor. */
141 andl $~NMI_MASK, 16(%rsp)
142 pushq $\flag
143 jmp hypercall_page + (__HYPERVISOR_iret * 32)
144 .endm
146 /*
147 * Exception entry point. This expects an error code/orig_rax on the stack
148 * and the exception handler in %rax.
149 */
150 ENTRY(error_entry)
151 # _frame RDI
152 /* rdi slot contains rax, oldrax contains error code */
153 cld
154 subq $14*8,%rsp
155 # CFI_ADJUST_CFA_OFFSET (14*8)
156 movq %rsi,13*8(%rsp)
157 # CFI_REL_OFFSET rsi,RSI
158 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
159 movq %rdx,12*8(%rsp)
160 # CFI_REL_OFFSET rdx,RDX
161 movq %rcx,11*8(%rsp)
162 # CFI_REL_OFFSET rcx,RCX
163 movq %rsi,10*8(%rsp) /* store rax */
164 # CFI_REL_OFFSET rax,RAX
165 movq %r8, 9*8(%rsp)
166 # CFI_REL_OFFSET r8,R8
167 movq %r9, 8*8(%rsp)
168 # CFI_REL_OFFSET r9,R9
169 movq %r10,7*8(%rsp)
170 # CFI_REL_OFFSET r10,R10
171 movq %r11,6*8(%rsp)
172 # CFI_REL_OFFSET r11,R11
173 movq %rbx,5*8(%rsp)
174 # CFI_REL_OFFSET rbx,RBX
175 movq %rbp,4*8(%rsp)
176 # CFI_REL_OFFSET rbp,RBP
177 movq %r12,3*8(%rsp)
178 # CFI_REL_OFFSET r12,R12
179 movq %r13,2*8(%rsp)
180 # CFI_REL_OFFSET r13,R13
181 movq %r14,1*8(%rsp)
182 # CFI_REL_OFFSET r14,R14
183 movq %r15,(%rsp)
184 # CFI_REL_OFFSET r15,R15
185 #if 0
186 cmpl $__KERNEL_CS,CS(%rsp)
187 je error_kernelspace
188 #endif
189 error_call_handler:
190 movq %rdi, RDI(%rsp)
191 movq %rsp,%rdi
192 movq ORIG_RAX(%rsp),%rsi # get error code
193 movq $-1,ORIG_RAX(%rsp)
194 call *%rax
196 .macro zeroentry sym
197 # INTR_FRAME
198 movq (%rsp),%rcx
199 movq 8(%rsp),%r11
200 addq $0x10,%rsp /* skip rcx and r11 */
201 pushq $0 /* push error code/oldrax */
202 # CFI_ADJUST_CFA_OFFSET 8
203 pushq %rax /* push real oldrax to the rdi slot */
204 # CFI_ADJUST_CFA_OFFSET 8
205 leaq \sym(%rip),%rax
206 jmp error_entry
207 # CFI_ENDPROC
208 .endm
210 .macro errorentry sym
211 # XCPT_FRAME
212 movq (%rsp),%rcx
213 movq 8(%rsp),%r11
214 addq $0x10,%rsp /* rsp points to the error code */
215 pushq %rax
216 # CFI_ADJUST_CFA_OFFSET 8
217 leaq \sym(%rip),%rax
218 jmp error_entry
219 # CFI_ENDPROC
220 .endm
222 #define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
223 #define XEN_PUT_VCPU_INFO(reg)
224 #define XEN_PUT_VCPU_INFO_fixup
225 #define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
226 #define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
227 #define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
229 #define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
230 XEN_LOCKED_BLOCK_EVENTS(reg) ; \
231 XEN_PUT_VCPU_INFO(reg)
233 #define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
234 XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
235 XEN_PUT_VCPU_INFO(reg)
239 ENTRY(hypervisor_callback)
240 zeroentry hypervisor_callback2
242 ENTRY(hypervisor_callback2)
243 movq %rdi, %rsp
244 11: movq %gs:8,%rax
245 incl %gs:0
246 cmovzq %rax,%rsp
247 pushq %rdi
248 call do_hypervisor_callback
249 popq %rsp
250 decl %gs:0
251 jmp error_exit
253 # ALIGN
254 restore_all_enable_events:
255 XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
257 scrit: /**** START OF CRITICAL REGION ****/
258 XEN_TEST_PENDING(%rsi)
259 jnz 14f # process more events if necessary...
260 XEN_PUT_VCPU_INFO(%rsi)
261 RESTORE_ARGS 0,8,0
262 HYPERVISOR_IRET 0
264 14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
265 XEN_PUT_VCPU_INFO(%rsi)
266 SAVE_REST
267 movq %rsp,%rdi # set the argument again
268 jmp 11b
269 ecrit: /**** END OF CRITICAL REGION ****/
272 retint_kernel:
273 retint_restore_args:
274 movl EFLAGS-REST_SKIP(%rsp), %eax
275 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
276 XEN_GET_VCPU_INFO(%rsi)
277 andb evtchn_upcall_mask(%rsi),%al
278 andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
279 jnz restore_all_enable_events # != 0 => enable event delivery
280 XEN_PUT_VCPU_INFO(%rsi)
282 RESTORE_ARGS 0,8,0
283 HYPERVISOR_IRET 0
286 error_exit:
287 RESTORE_REST
288 /* cli */
289 XEN_BLOCK_EVENTS(%rsi)
290 jmp retint_kernel
294 ENTRY(failsafe_callback)
295 popq %rcx
296 popq %r11
297 iretq
300 ENTRY(coprocessor_error)
301 zeroentry do_coprocessor_error
304 ENTRY(simd_coprocessor_error)
305 zeroentry do_simd_coprocessor_error
308 ENTRY(device_not_available)
309 zeroentry do_device_not_available
312 ENTRY(debug)
313 # INTR_FRAME
314 # CFI_ADJUST_CFA_OFFSET 8 */
315 zeroentry do_debug
316 # CFI_ENDPROC
319 ENTRY(int3)
320 # INTR_FRAME
321 # CFI_ADJUST_CFA_OFFSET 8 */
322 zeroentry do_int3
323 # CFI_ENDPROC
325 ENTRY(overflow)
326 zeroentry do_overflow
329 ENTRY(bounds)
330 zeroentry do_bounds
333 ENTRY(invalid_op)
334 zeroentry do_invalid_op
337 ENTRY(coprocessor_segment_overrun)
338 zeroentry do_coprocessor_segment_overrun
341 ENTRY(invalid_TSS)
342 errorentry do_invalid_TSS
345 ENTRY(segment_not_present)
346 errorentry do_segment_not_present
349 /* runs on exception stack */
350 ENTRY(stack_segment)
351 # XCPT_FRAME
352 errorentry do_stack_segment
353 # CFI_ENDPROC
356 ENTRY(general_protection)
357 errorentry do_general_protection
360 ENTRY(alignment_check)
361 errorentry do_alignment_check
364 ENTRY(divide_error)
365 zeroentry do_divide_error
368 ENTRY(spurious_interrupt_bug)
369 zeroentry do_spurious_interrupt_bug
372 ENTRY(page_fault)
373 errorentry do_page_fault
379 ENTRY(thread_starter)
380 popq %rdi
381 popq %rbx
382 call *%rbx
383 call exit_thread