ia64/xen-unstable

view extras/mini-os/x86_32.S @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents 279cb235be66
children a6914c2c15cf
line source
1 #include <os.h>
3 .section __xen_guest
4 .asciz "XEN_VER=3.0,LOADER=generic,PT_MODE_WRITABLE"
5 .text
7 .globl _start, shared_info
9 _start:
10 cld
11 lss stack_start,%esp
12 push %esi
13 call start_kernel
15 stack_start:
16 .long stack+8192, __KERNEL_SS
18 /* Unpleasant -- the PTE that maps this page is actually overwritten */
19 /* to map the real shared-info page! :-) */
20 .org 0x1000
21 shared_info:
22 .org 0x2000
24 ES = 0x20
25 ORIG_EAX = 0x24
26 EIP = 0x28
27 CS = 0x2C
29 #define ENTRY(X) .globl X ; X :
31 #define SAVE_ALL \
32 cld; \
33 pushl %es; \
34 pushl %ds; \
35 pushl %eax; \
36 pushl %ebp; \
37 pushl %edi; \
38 pushl %esi; \
39 pushl %edx; \
40 pushl %ecx; \
41 pushl %ebx; \
42 movl $(__KERNEL_DS),%edx; \
43 movl %edx,%ds; \
44 movl %edx,%es;
46 #define RESTORE_ALL \
47 popl %ebx; \
48 popl %ecx; \
49 popl %edx; \
50 popl %esi; \
51 popl %edi; \
52 popl %ebp; \
53 popl %eax; \
54 popl %ds; \
55 popl %es; \
56 addl $4,%esp; \
57 iret; \
59 ENTRY(divide_error)
60 pushl $0 # no error code
61 pushl $do_divide_error
62 do_exception:
63 pushl %ds
64 pushl %eax
65 xorl %eax,%eax
66 pushl %ebp
67 pushl %edi
68 pushl %esi
69 pushl %edx
70 decl %eax # eax = -1
71 pushl %ecx
72 pushl %ebx
73 cld
74 movl %es,%ecx
75 movl ORIG_EAX(%esp), %esi # get the error code
76 movl ES(%esp), %edi # get the function address
77 movl %eax, ORIG_EAX(%esp)
78 movl %ecx, ES(%esp)
79 movl %esp,%edx
80 pushl %esi # push the error code
81 pushl %edx # push the pt_regs pointer
82 movl $(__KERNEL_DS),%edx
83 movl %edx,%ds
84 movl %edx,%es
85 call *%edi
86 addl $8,%esp
89 ret_from_exception:
90 movb CS(%esp),%cl
91 test $2,%cl # slow return to ring 2 or 3
92 jne safesti
93 RESTORE_ALL
95 # A note on the "critical region" in our callback handler.
96 # We want to avoid stacking callback handlers due to events occurring
97 # during handling of the last event. To do this, we keep events disabled
98 # until weve done all processing. HOWEVER, we must enable events before
99 # popping the stack frame (cant be done atomically) and so it would still
100 # be possible to get enough handler activations to overflow the stack.
101 # Although unlikely, bugs of that kind are hard to track down, so wed
102 # like to avoid the possibility.
103 # So, on entry to the handler we detect whether we interrupted an
104 # existing activation in its critical region -- if so, we pop the current
105 # activation and restart the handler using the previous one.
106 ENTRY(hypervisor_callback)
107 pushl %eax
108 SAVE_ALL
109 movl EIP(%esp),%eax
110 cmpl $scrit,%eax
111 jb 11f
112 cmpl $ecrit,%eax
113 jb critical_region_fixup
114 11: push %esp
115 # call do_hypervisor_callback
116 add $4,%esp
117 movl HYPERVISOR_shared_info,%esi
118 xorl %eax,%eax
119 movb CS(%esp),%cl
120 test $2,%cl # slow return to ring 2 or 3
121 jne safesti
122 safesti:btsl $31,4(%esi) # reenable event callbacks
123 scrit: /**** START OF CRITICAL REGION ****/
124 cmpl %eax,(%esi)
125 jne 14f # process more events if necessary...
126 RESTORE_ALL
127 14: btrl %eax,4(%esi)
128 jmp 11b
129 ecrit: /**** END OF CRITICAL REGION ****/
130 # [How we do the fixup]. We want to merge the current stack frame with the
131 # just-interrupted frame. How we do this depends on where in the critical
132 # region the interrupted handler was executing, and so how many saved
133 # registers are in each frame. We do this quickly using the lookup table
134 # 'critical_fixup_table'. For each byte offset in the critical region, it
135 # provides the number of bytes which have already been popped from the
136 # interrupted stack frame.
137 critical_region_fixup:
138 addl $critical_fixup_table-scrit,%eax
139 movzbl (%eax),%eax # %eax contains num bytes popped
140 mov %esp,%esi
141 add %eax,%esi # %esi points at end of src region
142 mov %esp,%edi
143 add $0x34,%edi # %edi points at end of dst region
144 mov %eax,%ecx
145 shr $2,%ecx # convert words to bytes
146 je 16f # skip loop if nothing to copy
147 15: subl $4,%esi # pre-decrementing copy loop
148 subl $4,%edi
149 movl (%esi),%eax
150 movl %eax,(%edi)
151 loop 15b
152 16: movl %edi,%esp # final %edi is top of merged stack
153 jmp 11b
155 critical_fixup_table:
156 .byte 0x00,0x00 # cmpl %eax,(%esi)
157 .byte 0x00,0x00 # jne 14f
158 .byte 0x00 # pop %ebx
159 .byte 0x04 # pop %ecx
160 .byte 0x08 # pop %edx
161 .byte 0x0c # pop %esi
162 .byte 0x10 # pop %edi
163 .byte 0x14 # pop %ebp
164 .byte 0x18 # pop %eax
165 .byte 0x1c # pop %ds
166 .byte 0x20 # pop %es
167 .byte 0x24,0x24,0x24 # add $4,%esp
168 .byte 0x28 # iret
169 .byte 0x00,0x00,0x00,0x00,0x00 # btrl $31,4(%esi)
170 .byte 0x00,0x00 # jmp 11b
172 # Hypervisor uses this for application faults while it executes.
173 ENTRY(failsafe_callback)
174 pop %ds
175 pop %es
176 pop %fs
177 pop %gs
178 iret
180 ENTRY(coprocessor_error)
181 pushl $0
182 pushl $do_coprocessor_error
183 jmp do_exception
185 ENTRY(simd_coprocessor_error)
186 pushl $0
187 pushl $do_simd_coprocessor_error
188 jmp do_exception
190 ENTRY(device_not_available)
191 iret
193 ENTRY(debug)
194 pushl $0
195 pushl $do_debug
196 jmp do_exception
198 ENTRY(int3)
199 pushl $0
200 pushl $do_int3
201 jmp do_exception
203 ENTRY(overflow)
204 pushl $0
205 pushl $do_overflow
206 jmp do_exception
208 ENTRY(bounds)
209 pushl $0
210 pushl $do_bounds
211 jmp do_exception
213 ENTRY(invalid_op)
214 pushl $0
215 pushl $do_invalid_op
216 jmp do_exception
218 ENTRY(coprocessor_segment_overrun)
219 pushl $0
220 pushl $do_coprocessor_segment_overrun
221 jmp do_exception
223 ENTRY(double_fault)
224 pushl $do_double_fault
225 jmp do_exception
227 ENTRY(invalid_TSS)
228 pushl $do_invalid_TSS
229 jmp do_exception
231 ENTRY(segment_not_present)
232 pushl $do_segment_not_present
233 jmp do_exception
235 ENTRY(stack_segment)
236 pushl $do_stack_segment
237 jmp do_exception
239 ENTRY(general_protection)
240 pushl $do_general_protection
241 jmp do_exception
243 ENTRY(alignment_check)
244 pushl $do_alignment_check
245 jmp do_exception
247 # This handler is special, because it gets an extra value on its stack,
248 # which is the linear faulting address.
249 ENTRY(page_fault)
250 pushl %ds
251 pushl %eax
252 xorl %eax,%eax
253 pushl %ebp
254 pushl %edi
255 pushl %esi
256 pushl %edx
257 decl %eax # eax = -1
258 pushl %ecx
259 pushl %ebx
260 cld
261 movl %es,%ecx
262 movl ORIG_EAX(%esp), %esi # get the error code
263 movl ES(%esp), %edi # get the faulting address
264 movl %eax, ORIG_EAX(%esp)
265 movl %ecx, ES(%esp)
266 movl %esp,%edx
267 pushl %edi # push the faulting address
268 pushl %esi # push the error code
269 pushl %edx # push the pt_regs pointer
270 movl $(__KERNEL_DS),%edx
271 movl %edx,%ds
272 movl %edx,%es
273 call do_page_fault
274 addl $12,%esp
275 jmp ret_from_exception
277 ENTRY(machine_check)
278 pushl $0
279 pushl $do_machine_check
280 jmp do_exception
282 ENTRY(spurious_interrupt_bug)
283 pushl $0
284 pushl $do_spurious_interrupt_bug
285 jmp do_exception