ia64/xen-unstable

view extras/mini-os/entry.S @ 810:3f44ecdcb631

bitkeeper revision 1.499 (3f867c85oOyUdtcboCzrLgktKtvdgA)

ac_timer.h, ac_timer.c:
Xen ac timers now use a heap to find earliest timeout.
author kaf24@scramble.cl.cam.ac.uk
date Fri Oct 10 09:31:49 2003 +0000 (2003-10-10)
parents 34473973889b
children 71f9c171157e
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Adjusted for XenoLinux use by K A Frasier
7 * Adjusted for Xen minimal os by R Neugebauer
8 */
11 #include <os.h>
13 EBX = 0x00
14 ECX = 0x04
15 EDX = 0x08
16 ESI = 0x0C
17 EDI = 0x10
18 EBP = 0x14
19 EAX = 0x18
20 DS = 0x1C
21 ES = 0x20
22 ORIG_EAX = 0x24
23 EIP = 0x28
24 CS = 0x2C
25 EFLAGS = 0x30
26 OLDESP = 0x34
27 OLDSS = 0x38
29 CF_MASK = 0x00000001
30 IF_MASK = 0x00000200
31 NT_MASK = 0x00004000
33 /* Declare a globally-visible label */
34 #define ENTRY(X) .globl X ; X :
36 /* A Linux hangover. Just ignore it. */
37 #define SYMBOL_NAME(X) X
39 #define SAVE_ALL \
40 cld; \
41 pushl %es; \
42 pushl %ds; \
43 pushl %eax; \
44 pushl %ebp; \
45 pushl %edi; \
46 pushl %esi; \
47 pushl %edx; \
48 pushl %ecx; \
49 pushl %ebx; \
50 movl $(__KERNEL_DS),%edx; \
51 movl %edx,%ds; \
52 movl %edx,%es;
54 #define RESTORE_ALL \
55 popl %ebx; \
56 popl %ecx; \
57 popl %edx; \
58 popl %esi; \
59 popl %edi; \
60 popl %ebp; \
61 popl %eax; \
62 1: popl %ds; \
63 2: popl %es; \
64 addl $4,%esp; \
65 3: iret; \
66 .section .fixup,"ax"; \
67 4: movl $0,(%esp); \
68 jmp 1b; \
69 5: movl $0,(%esp); \
70 jmp 2b; \
71 6: pushl %ss; \
72 popl %ds; \
73 pushl %ss; \
74 popl %es; \
75 pushl $11; \
76 call do_exit; \
77 .previous; \
78 .section __ex_table,"a";\
79 .align 4; \
80 .long 1b,4b; \
81 .long 2b,5b; \
82 .long 3b,6b; \
83 .previous
85 ENTRY(divide_error)
86 pushl $0 # no error code
87 pushl $ SYMBOL_NAME(do_divide_error)
88 .align 4
89 error_code:
90 pushl %ds
91 pushl %eax
92 xorl %eax,%eax
93 pushl %ebp
94 pushl %edi
95 pushl %esi
96 pushl %edx
97 decl %eax # eax = -1
98 pushl %ecx
99 pushl %ebx
100 cld
101 movl %es,%ecx
102 movl ORIG_EAX(%esp), %esi # get the error code
103 movl ES(%esp), %edi # get the function address
104 movl %eax, ORIG_EAX(%esp)
105 movl %ecx, ES(%esp)
106 movl %esp,%edx
107 pushl %esi # push the error code
108 pushl %edx # push the pt_regs pointer
109 movl $(__KERNEL_DS),%edx
110 movl %edx,%ds
111 movl %edx,%es
112 call *%edi
113 addl $8,%esp
115 # These are the tests Linux makes before exiting the OS back to userland.
116 # At these point preeemption may occur, or signals may get delivered.
117 ret_to_user_tests:
118 # cmpl $0,need_resched(%ebx)
119 # jne reschedule
120 # cmpl $0,sigpending(%ebx)
121 # je safesti
122 jmp safesti
125 ret_from_exception:
126 movb CS(%esp),%cl
127 test $2,%cl # slow return to ring 2 or 3
128 jne ret_to_user_tests
129 RESTORE_ALL
131 # A note on the "critical region" in our callback handler.
132 # We want to avoid stacking callback handlers due to events occurring
133 # during handling of the last event. To do this, we keep events disabled
134 # until weve done all processing. HOWEVER, we must enable events before
135 # popping the stack frame (cant be done atomically) and so it would still
136 # be possible to get enough handler activations to overflow the stack.
137 # Although unlikely, bugs of that kind are hard to track down, so wed
138 # like to avoid the possibility.
139 # So, on entry to the handler we detect whether we interrupted an
140 # existing activation in its critical region -- if so, we pop the current
141 # activation and restart the handler using the previous one.
142 ENTRY(hypervisor_callback)
143 pushl %eax
144 SAVE_ALL
145 movl EIP(%esp),%eax
146 cmpl $scrit,%eax
147 jb 11f
148 cmpl $ecrit,%eax
149 jb critical_region_fixup
150 11: push %esp
151 call do_hypervisor_callback
152 add $4,%esp
153 movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
154 xorl %eax,%eax
155 movb CS(%esp),%cl
156 test $2,%cl # slow return to ring 2 or 3
157 jne ret_to_user_tests
158 safesti:btsl $31,4(%esi) # reenable event callbacks
159 scrit: /**** START OF CRITICAL REGION ****/
160 cmpl %eax,(%esi)
161 jne 14f # process more events if necessary...
162 RESTORE_ALL
163 14: btrl %eax,4(%esi)
164 jmp 11b
165 ecrit: /**** END OF CRITICAL REGION ****/
166 # [How we do the fixup]. We want to merge the current stack frame with the
167 # just-interrupted frame. How we do this depends on where in the critical
168 # region the interrupted handler was executing, and so how many saved
169 # registers are in each frame. We do this quickly using the lookup table
170 # 'critical_fixup_table'. For each byte offset in the critical region, it
171 # provides the number of bytes which have already been popped from the
172 # interrupted stack frame.
173 critical_region_fixup:
174 addl $critical_fixup_table-scrit,%eax
175 movzbl (%eax),%eax # %eax contains num bytes popped
176 mov %esp,%esi
177 add %eax,%esi # %esi points at end of src region
178 mov %esp,%edi
179 add $0x34,%edi # %edi points at end of dst region
180 mov %eax,%ecx
181 shr $2,%ecx # convert words to bytes
182 je 16f # skip loop if nothing to copy
183 15: subl $4,%esi # pre-decrementing copy loop
184 subl $4,%edi
185 movl (%esi),%eax
186 movl %eax,(%edi)
187 loop 15b
188 16: movl %edi,%esp # final %edi is top of merged stack
189 jmp 11b
191 critical_fixup_table:
192 .byte 0x00,0x00 # cmpl %eax,(%esi)
193 .byte 0x00,0x00 # jne 14f
194 .byte 0x00 # pop %ebx
195 .byte 0x04 # pop %ecx
196 .byte 0x08 # pop %edx
197 .byte 0x0c # pop %esi
198 .byte 0x10 # pop %edi
199 .byte 0x14 # pop %ebp
200 .byte 0x18 # pop %eax
201 .byte 0x1c # pop %ds
202 .byte 0x20 # pop %es
203 .byte 0x24,0x24,0x24 # add $4,%esp
204 .byte 0x28 # iret
205 .byte 0x00,0x00,0x00,0x00,0x00 # btrl $31,4(%esi)
206 .byte 0x00,0x00 # jmp 11b
208 # Hypervisor uses this for application faults while it executes.
209 ENTRY(failsafe_callback)
210 1: pop %ds
211 2: pop %es
212 3: pop %fs
213 4: pop %gs
214 5: iret
215 .section .fixup,"ax"; \
216 6: movl $0,(%esp); \
217 jmp 1b; \
218 7: movl $0,(%esp); \
219 jmp 2b; \
220 8: movl $0,(%esp); \
221 jmp 3b; \
222 9: movl $0,(%esp); \
223 jmp 4b; \
224 10: pushl %ss; \
225 popl %ds; \
226 pushl %ss; \
227 popl %es; \
228 pushl $11; \
229 call do_exit; \
230 .previous; \
231 .section __ex_table,"a";\
232 .align 4; \
233 .long 1b,6b; \
234 .long 2b,7b; \
235 .long 3b,8b; \
236 .long 4b,9b; \
237 .long 5b,10b; \
238 .previous
240 ENTRY(coprocessor_error)
241 pushl $0
242 pushl $ SYMBOL_NAME(do_coprocessor_error)
243 jmp error_code
245 ENTRY(simd_coprocessor_error)
246 pushl $0
247 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
248 jmp error_code
250 ENTRY(device_not_available)
251 pushl $-1 # mark this as an int
252 SAVE_ALL
253 #call SYMBOL_NAME(math_state_restore)
254 jmp ret_from_exception
256 ENTRY(debug)
257 pushl $0
258 pushl $ SYMBOL_NAME(do_debug)
259 jmp error_code
261 ENTRY(int3)
262 pushl $0
263 pushl $ SYMBOL_NAME(do_int3)
264 jmp error_code
266 ENTRY(overflow)
267 pushl $0
268 pushl $ SYMBOL_NAME(do_overflow)
269 jmp error_code
271 ENTRY(bounds)
272 pushl $0
273 pushl $ SYMBOL_NAME(do_bounds)
274 jmp error_code
276 ENTRY(invalid_op)
277 pushl $0
278 pushl $ SYMBOL_NAME(do_invalid_op)
279 jmp error_code
281 ENTRY(coprocessor_segment_overrun)
282 pushl $0
283 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
284 jmp error_code
286 ENTRY(double_fault)
287 pushl $ SYMBOL_NAME(do_double_fault)
288 jmp error_code
290 ENTRY(invalid_TSS)
291 pushl $ SYMBOL_NAME(do_invalid_TSS)
292 jmp error_code
294 ENTRY(segment_not_present)
295 pushl $ SYMBOL_NAME(do_segment_not_present)
296 jmp error_code
298 ENTRY(stack_segment)
299 pushl $ SYMBOL_NAME(do_stack_segment)
300 jmp error_code
302 ENTRY(general_protection)
303 pushl $ SYMBOL_NAME(do_general_protection)
304 jmp error_code
306 ENTRY(alignment_check)
307 pushl $ SYMBOL_NAME(do_alignment_check)
308 jmp error_code
310 # This handler is special, because it gets an extra value on its stack,
311 # which is the linear faulting address.
312 ENTRY(page_fault)
313 pushl %ds
314 pushl %eax
315 xorl %eax,%eax
316 pushl %ebp
317 pushl %edi
318 pushl %esi
319 pushl %edx
320 decl %eax # eax = -1
321 pushl %ecx
322 pushl %ebx
323 cld
324 movl %es,%ecx
325 movl ORIG_EAX(%esp), %esi # get the error code
326 movl ES(%esp), %edi # get the faulting address
327 movl %eax, ORIG_EAX(%esp)
328 movl %ecx, ES(%esp)
329 movl %esp,%edx
330 pushl %edi # push the faulting address
331 pushl %esi # push the error code
332 pushl %edx # push the pt_regs pointer
333 movl $(__KERNEL_DS),%edx
334 movl %edx,%ds
335 movl %edx,%es
336 call SYMBOL_NAME(do_page_fault)
337 addl $12,%esp
338 jmp ret_from_exception
340 ENTRY(machine_check)
341 pushl $0
342 pushl $ SYMBOL_NAME(do_machine_check)
343 jmp error_code
345 ENTRY(spurious_interrupt_bug)
346 pushl $0
347 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
348 jmp error_code