ia64/xen-unstable

view extras/mini-os/x86_32.S @ 6806:4ad19fe76d50

Store dom0 store ring-ref in store.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:32:38 2005 +0000 (2005-09-13)
parents a83ac0806d6b
children 198828cc103b
line source
1 #include <os.h>
2 #include <xen/arch-x86_32.h>
5 .section __xen_guest
6 .asciz "XEN_VER=3.0,LOADER=generic,PT_MODE_WRITABLE"
7 .text
9 .globl _start, shared_info
11 _start:
12 cld
13 lss stack_start,%esp
14 push %esi
15 call start_kernel
17 stack_start:
18 .long stack+8192, __KERNEL_SS
20 /* Unpleasant -- the PTE that maps this page is actually overwritten */
21 /* to map the real shared-info page! :-) */
22 .org 0x1000
23 shared_info:
24 .org 0x2000
27 ES = 0x20
28 ORIG_EAX = 0x24
29 EIP = 0x28
30 CS = 0x2C
32 #define ENTRY(X) .globl X ; X :
34 #define SAVE_ALL \
35 cld; \
36 pushl %es; \
37 pushl %ds; \
38 pushl %eax; \
39 pushl %ebp; \
40 pushl %edi; \
41 pushl %esi; \
42 pushl %edx; \
43 pushl %ecx; \
44 pushl %ebx; \
45 movl $(__KERNEL_DS),%edx; \
46 movl %edx,%ds; \
47 movl %edx,%es;
49 #define RESTORE_ALL \
50 popl %ebx; \
51 popl %ecx; \
52 popl %edx; \
53 popl %esi; \
54 popl %edi; \
55 popl %ebp; \
56 popl %eax; \
57 popl %ds; \
58 popl %es; \
59 addl $4,%esp; \
60 iret; \
62 ENTRY(divide_error)
63 pushl $0 # no error code
64 pushl $do_divide_error
65 do_exception:
66 pushl %ds
67 pushl %eax
68 xorl %eax, %eax
69 pushl %ebp
70 pushl %edi
71 pushl %esi
72 pushl %edx
73 decl %eax # eax = -1
74 pushl %ecx
75 pushl %ebx
76 cld
77 movl %es, %ecx
78 movl ES(%esp), %edi # get the function address
79 movl ORIG_EAX(%esp), %edx # get the error code
80 movl %eax, ORIG_EAX(%esp)
81 movl %ecx, ES(%esp)
82 movl $(__KERNEL_DS), %ecx
83 movl %ecx, %ds
84 movl %ecx, %es
85 movl %esp,%eax # pt_regs pointer
86 pushl %edx
87 pushl %eax
88 call *%edi
89 addl $8,%esp
91 /* pushl %ds
92 pushl %eax
93 xorl %eax,%eax
94 pushl %ebp
95 pushl %edi
96 pushl %esi
97 pushl %edx
98 decl %eax # eax = -1
99 pushl %ecx
100 pushl %ebx
101 cld
102 movl %es,%ecx
103 movl ORIG_EAX(%esp), %esi # get the error code
104 movl ES(%esp), %edi # get the function address
105 movl %eax, ORIG_EAX(%esp)
106 movl %ecx, ES(%esp)
107 movl %esp,%edx
108 pushl %esi # push the error code
109 pushl %edx # push the pt_regs pointer
110 movl $(__KERNEL_DS),%edx
111 movl %edx,%ds
112 movl %edx,%es
113 call *%edi
114 addl $8,%esp */
117 ret_from_exception:
118 movb CS(%esp),%cl
119 test $2,%cl # slow return to ring 2 or 3
120 jne safesti
121 RESTORE_ALL
123 # A note on the "critical region" in our callback handler.
124 # We want to avoid stacking callback handlers due to events occurring
125 # during handling of the last event. To do this, we keep events disabled
126 # until weve done all processing. HOWEVER, we must enable events before
127 # popping the stack frame (cant be done atomically) and so it would still
128 # be possible to get enough handler activations to overflow the stack.
129 # Although unlikely, bugs of that kind are hard to track down, so wed
130 # like to avoid the possibility.
131 # So, on entry to the handler we detect whether we interrupted an
132 # existing activation in its critical region -- if so, we pop the current
133 # activation and restart the handler using the previous one.
134 ENTRY(hypervisor_callback)
135 pushl %eax
136 SAVE_ALL
137 movl EIP(%esp),%eax
138 cmpl $scrit,%eax
139 jb 11f
140 cmpl $ecrit,%eax
141 jb critical_region_fixup
142 11: push %esp
143 call do_hypervisor_callback
144 add $4,%esp
145 movl HYPERVISOR_shared_info,%esi
146 xorl %eax,%eax
147 movb CS(%esp),%cl
148 test $2,%cl # slow return to ring 2 or 3
149 jne safesti
150 safesti:movb $0,1(%esi) # reenable event callbacks
151 scrit: /**** START OF CRITICAL REGION ****/
152 testb $0xFF,(%esi)
153 jnz 14f # process more events if necessary...
154 RESTORE_ALL
155 14: movb $1,1(%esi)
156 jmp 11b
157 ecrit: /**** END OF CRITICAL REGION ****/
158 # [How we do the fixup]. We want to merge the current stack frame with the
159 # just-interrupted frame. How we do this depends on where in the critical
160 # region the interrupted handler was executing, and so how many saved
161 # registers are in each frame. We do this quickly using the lookup table
162 # 'critical_fixup_table'. For each byte offset in the critical region, it
163 # provides the number of bytes which have already been popped from the
164 # interrupted stack frame.
165 critical_region_fixup:
166 addl $critical_fixup_table-scrit,%eax
167 movzbl (%eax),%eax # %eax contains num bytes popped
168 mov %esp,%esi
169 add %eax,%esi # %esi points at end of src region
170 mov %esp,%edi
171 add $0x34,%edi # %edi points at end of dst region
172 mov %eax,%ecx
173 shr $2,%ecx # convert words to bytes
174 je 16f # skip loop if nothing to copy
175 15: subl $4,%esi # pre-decrementing copy loop
176 subl $4,%edi
177 movl (%esi),%eax
178 movl %eax,(%edi)
179 loop 15b
180 16: movl %edi,%esp # final %edi is top of merged stack
181 jmp 11b
183 critical_fixup_table:
184 .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
185 .byte 0x00,0x00 # jne 14f
186 .byte 0x00 # pop %ebx
187 .byte 0x04 # pop %ecx
188 .byte 0x08 # pop %edx
189 .byte 0x0c # pop %esi
190 .byte 0x10 # pop %edi
191 .byte 0x14 # pop %ebp
192 .byte 0x18 # pop %eax
193 .byte 0x1c # pop %ds
194 .byte 0x20 # pop %es
195 .byte 0x24,0x24,0x24 # add $4,%esp
196 .byte 0x28 # iret
197 .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
198 .byte 0x00,0x00 # jmp 11b
200 # Hypervisor uses this for application faults while it executes.
201 ENTRY(failsafe_callback)
202 pop %ds
203 pop %es
204 pop %fs
205 pop %gs
206 iret
208 ENTRY(coprocessor_error)
209 pushl $0
210 pushl $do_coprocessor_error
211 jmp do_exception
213 ENTRY(simd_coprocessor_error)
214 pushl $0
215 pushl $do_simd_coprocessor_error
216 jmp do_exception
218 ENTRY(device_not_available)
219 iret
221 ENTRY(debug)
222 pushl $0
223 pushl $do_debug
224 jmp do_exception
226 ENTRY(int3)
227 pushl $0
228 pushl $do_int3
229 jmp do_exception
231 ENTRY(overflow)
232 pushl $0
233 pushl $do_overflow
234 jmp do_exception
236 ENTRY(bounds)
237 pushl $0
238 pushl $do_bounds
239 jmp do_exception
241 ENTRY(invalid_op)
242 pushl $0
243 pushl $do_invalid_op
244 jmp do_exception
246 ENTRY(coprocessor_segment_overrun)
247 pushl $0
248 pushl $do_coprocessor_segment_overrun
249 jmp do_exception
251 ENTRY(invalid_TSS)
252 pushl $do_invalid_TSS
253 jmp do_exception
255 ENTRY(segment_not_present)
256 pushl $do_segment_not_present
257 jmp do_exception
259 ENTRY(stack_segment)
260 pushl $do_stack_segment
261 jmp do_exception
263 ENTRY(general_protection)
264 pushl $do_general_protection
265 jmp do_exception
267 ENTRY(alignment_check)
268 pushl $do_alignment_check
269 jmp do_exception
271 # This handler is special, because it gets an extra value on its stack,
272 # which is the linear faulting address.
273 # fastcall register usage: %eax = pt_regs, %edx = error code,
274 # %ecx = fault address
275 ENTRY(page_fault)
276 pushl %ds
277 pushl %eax
278 xorl %eax, %eax
279 pushl %ebp
280 pushl %edi
281 pushl %esi
282 pushl %edx
283 decl %eax /* eax = -1 */
284 pushl %ecx
285 pushl %ebx
286 cld
287 movl %es,%edi
288 movl ES(%esp), %ecx /* get the faulting address */
289 movl ORIG_EAX(%esp), %edx /* get the error code */
290 movl %eax, ORIG_EAX(%esp)
291 movl %edi, ES(%esp)
292 movl $(__KERNEL_DS),%eax
293 movl %eax, %ds
294 movl %eax, %es
295 movl %esp,%eax /* pt_regs pointer */
296 call do_page_fault
297 jmp ret_from_exception
299 ENTRY(machine_check)
300 pushl $0
301 pushl $do_machine_check
302 jmp do_exception
304 ENTRY(spurious_interrupt_bug)
305 pushl $0
306 pushl $do_spurious_interrupt_bug
307 jmp do_exception