direct-io.hg

view extras/mini-os/x86_32.S @ 8693:491a8798945e

Remove shadow-translate Linux patches for now. We'll merge this stuff
in piecemeal.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Jan 28 12:09:45 2006 +0100 (2006-01-28)
parents 198828cc103b
children 323d40eefbce
line source
1 #include <os.h>
2 #include <xen/arch-x86_32.h>
5 .section __xen_guest
6 .ascii "GUEST_OS=Mini-OS"
7 .ascii ",XEN_VER=xen-3.0"
8 .ascii ",LOADER=generic"
9 .ascii ",PT_MODE_WRITABLE"
10 .byte 0
11 .text
13 .globl _start, shared_info
15 _start:
16 cld
17 lss stack_start,%esp
18 push %esi
19 call start_kernel
21 stack_start:
22 .long stack+8192, __KERNEL_SS
24 /* Unpleasant -- the PTE that maps this page is actually overwritten */
25 /* to map the real shared-info page! :-) */
26 .org 0x1000
27 shared_info:
28 .org 0x2000
31 ES = 0x20
32 ORIG_EAX = 0x24
33 EIP = 0x28
34 CS = 0x2C
36 #define ENTRY(X) .globl X ; X :
38 #define SAVE_ALL \
39 cld; \
40 pushl %es; \
41 pushl %ds; \
42 pushl %eax; \
43 pushl %ebp; \
44 pushl %edi; \
45 pushl %esi; \
46 pushl %edx; \
47 pushl %ecx; \
48 pushl %ebx; \
49 movl $(__KERNEL_DS),%edx; \
50 movl %edx,%ds; \
51 movl %edx,%es;
53 #define RESTORE_ALL \
54 popl %ebx; \
55 popl %ecx; \
56 popl %edx; \
57 popl %esi; \
58 popl %edi; \
59 popl %ebp; \
60 popl %eax; \
61 popl %ds; \
62 popl %es; \
63 addl $4,%esp; \
64 iret; \
66 ENTRY(divide_error)
67 pushl $0 # no error code
68 pushl $do_divide_error
69 do_exception:
70 pushl %ds
71 pushl %eax
72 xorl %eax, %eax
73 pushl %ebp
74 pushl %edi
75 pushl %esi
76 pushl %edx
77 decl %eax # eax = -1
78 pushl %ecx
79 pushl %ebx
80 cld
81 movl %es, %ecx
82 movl ES(%esp), %edi # get the function address
83 movl ORIG_EAX(%esp), %edx # get the error code
84 movl %eax, ORIG_EAX(%esp)
85 movl %ecx, ES(%esp)
86 movl $(__KERNEL_DS), %ecx
87 movl %ecx, %ds
88 movl %ecx, %es
89 movl %esp,%eax # pt_regs pointer
90 pushl %edx
91 pushl %eax
92 call *%edi
93 addl $8,%esp
95 /* pushl %ds
96 pushl %eax
97 xorl %eax,%eax
98 pushl %ebp
99 pushl %edi
100 pushl %esi
101 pushl %edx
102 decl %eax # eax = -1
103 pushl %ecx
104 pushl %ebx
105 cld
106 movl %es,%ecx
107 movl ORIG_EAX(%esp), %esi # get the error code
108 movl ES(%esp), %edi # get the function address
109 movl %eax, ORIG_EAX(%esp)
110 movl %ecx, ES(%esp)
111 movl %esp,%edx
112 pushl %esi # push the error code
113 pushl %edx # push the pt_regs pointer
114 movl $(__KERNEL_DS),%edx
115 movl %edx,%ds
116 movl %edx,%es
117 call *%edi
118 addl $8,%esp */
121 ret_from_exception:
122 movb CS(%esp),%cl
123 test $2,%cl # slow return to ring 2 or 3
124 jne safesti
125 RESTORE_ALL
127 # A note on the "critical region" in our callback handler.
128 # We want to avoid stacking callback handlers due to events occurring
129 # during handling of the last event. To do this, we keep events disabled
130 # until weve done all processing. HOWEVER, we must enable events before
131 # popping the stack frame (cant be done atomically) and so it would still
132 # be possible to get enough handler activations to overflow the stack.
133 # Although unlikely, bugs of that kind are hard to track down, so wed
134 # like to avoid the possibility.
135 # So, on entry to the handler we detect whether we interrupted an
136 # existing activation in its critical region -- if so, we pop the current
137 # activation and restart the handler using the previous one.
138 ENTRY(hypervisor_callback)
139 pushl %eax
140 SAVE_ALL
141 movl EIP(%esp),%eax
142 cmpl $scrit,%eax
143 jb 11f
144 cmpl $ecrit,%eax
145 jb critical_region_fixup
146 11: push %esp
147 call do_hypervisor_callback
148 add $4,%esp
149 movl HYPERVISOR_shared_info,%esi
150 xorl %eax,%eax
151 movb CS(%esp),%cl
152 test $2,%cl # slow return to ring 2 or 3
153 jne safesti
154 safesti:movb $0,1(%esi) # reenable event callbacks
155 scrit: /**** START OF CRITICAL REGION ****/
156 testb $0xFF,(%esi)
157 jnz 14f # process more events if necessary...
158 RESTORE_ALL
159 14: movb $1,1(%esi)
160 jmp 11b
161 ecrit: /**** END OF CRITICAL REGION ****/
162 # [How we do the fixup]. We want to merge the current stack frame with the
163 # just-interrupted frame. How we do this depends on where in the critical
164 # region the interrupted handler was executing, and so how many saved
165 # registers are in each frame. We do this quickly using the lookup table
166 # 'critical_fixup_table'. For each byte offset in the critical region, it
167 # provides the number of bytes which have already been popped from the
168 # interrupted stack frame.
169 critical_region_fixup:
170 addl $critical_fixup_table-scrit,%eax
171 movzbl (%eax),%eax # %eax contains num bytes popped
172 mov %esp,%esi
173 add %eax,%esi # %esi points at end of src region
174 mov %esp,%edi
175 add $0x34,%edi # %edi points at end of dst region
176 mov %eax,%ecx
177 shr $2,%ecx # convert words to bytes
178 je 16f # skip loop if nothing to copy
179 15: subl $4,%esi # pre-decrementing copy loop
180 subl $4,%edi
181 movl (%esi),%eax
182 movl %eax,(%edi)
183 loop 15b
184 16: movl %edi,%esp # final %edi is top of merged stack
185 jmp 11b
187 critical_fixup_table:
188 .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
189 .byte 0x00,0x00 # jne 14f
190 .byte 0x00 # pop %ebx
191 .byte 0x04 # pop %ecx
192 .byte 0x08 # pop %edx
193 .byte 0x0c # pop %esi
194 .byte 0x10 # pop %edi
195 .byte 0x14 # pop %ebp
196 .byte 0x18 # pop %eax
197 .byte 0x1c # pop %ds
198 .byte 0x20 # pop %es
199 .byte 0x24,0x24,0x24 # add $4,%esp
200 .byte 0x28 # iret
201 .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
202 .byte 0x00,0x00 # jmp 11b
204 # Hypervisor uses this for application faults while it executes.
205 ENTRY(failsafe_callback)
206 pop %ds
207 pop %es
208 pop %fs
209 pop %gs
210 iret
212 ENTRY(coprocessor_error)
213 pushl $0
214 pushl $do_coprocessor_error
215 jmp do_exception
217 ENTRY(simd_coprocessor_error)
218 pushl $0
219 pushl $do_simd_coprocessor_error
220 jmp do_exception
222 ENTRY(device_not_available)
223 iret
225 ENTRY(debug)
226 pushl $0
227 pushl $do_debug
228 jmp do_exception
230 ENTRY(int3)
231 pushl $0
232 pushl $do_int3
233 jmp do_exception
235 ENTRY(overflow)
236 pushl $0
237 pushl $do_overflow
238 jmp do_exception
240 ENTRY(bounds)
241 pushl $0
242 pushl $do_bounds
243 jmp do_exception
245 ENTRY(invalid_op)
246 pushl $0
247 pushl $do_invalid_op
248 jmp do_exception
250 ENTRY(coprocessor_segment_overrun)
251 pushl $0
252 pushl $do_coprocessor_segment_overrun
253 jmp do_exception
255 ENTRY(invalid_TSS)
256 pushl $do_invalid_TSS
257 jmp do_exception
259 ENTRY(segment_not_present)
260 pushl $do_segment_not_present
261 jmp do_exception
263 ENTRY(stack_segment)
264 pushl $do_stack_segment
265 jmp do_exception
267 ENTRY(general_protection)
268 pushl $do_general_protection
269 jmp do_exception
271 ENTRY(alignment_check)
272 pushl $do_alignment_check
273 jmp do_exception
275 # This handler is special, because it gets an extra value on its stack,
276 # which is the linear faulting address.
277 # fastcall register usage: %eax = pt_regs, %edx = error code,
278 # %ecx = fault address
279 ENTRY(page_fault)
280 pushl %ds
281 pushl %eax
282 xorl %eax, %eax
283 pushl %ebp
284 pushl %edi
285 pushl %esi
286 pushl %edx
287 decl %eax /* eax = -1 */
288 pushl %ecx
289 pushl %ebx
290 cld
291 movl %es,%edi
292 movl ES(%esp), %ecx /* get the faulting address */
293 movl ORIG_EAX(%esp), %edx /* get the error code */
294 movl %eax, ORIG_EAX(%esp)
295 movl %edi, ES(%esp)
296 movl $(__KERNEL_DS),%eax
297 movl %eax, %ds
298 movl %eax, %es
299 movl %esp,%eax /* pt_regs pointer */
300 call do_page_fault
301 jmp ret_from_exception
303 ENTRY(machine_check)
304 pushl $0
305 pushl $do_machine_check
306 jmp do_exception
308 ENTRY(spurious_interrupt_bug)
309 pushl $0
310 pushl $do_spurious_interrupt_bug
311 jmp do_exception