ia64/xen-unstable

view extras/mini-os/x86_32.S @ 8988:49c02a7a92dd

Remove TBF_SLOW_IRET hack from x86/64 Xen return-to-guest
path. Guest should set up flags for itself in its own
NMI handler.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Feb 23 18:30:43 2006 +0100 (2006-02-23)
parents 323d40eefbce
children 8c21c8ea5fff
line source
1 #include <os.h>
2 #include <xen/arch-x86_32.h>
4 .section __xen_guest
5 .ascii "GUEST_OS=Mini-OS"
6 .ascii ",XEN_VER=xen-3.0"
7 .ascii ",HYPERCALL_PAGE=0x2"
8 .ascii ",LOADER=generic"
9 .ascii ",PT_MODE_WRITABLE"
10 .byte 0
11 .text
13 .globl _start, shared_info, hypercall_page
15 _start:
16 cld
17 lss stack_start,%esp
18 push %esi
19 call start_kernel
21 stack_start:
22 .long stack+8192, __KERNEL_SS
24 /* Unpleasant -- the PTE that maps this page is actually overwritten */
25 /* to map the real shared-info page! :-) */
26 .org 0x1000
27 shared_info:
28 .org 0x2000
30 hypercall_page:
31 .org 0x3000
33 ES = 0x20
34 ORIG_EAX = 0x24
35 EIP = 0x28
36 CS = 0x2C
38 #define ENTRY(X) .globl X ; X :
40 #define SAVE_ALL \
41 cld; \
42 pushl %es; \
43 pushl %ds; \
44 pushl %eax; \
45 pushl %ebp; \
46 pushl %edi; \
47 pushl %esi; \
48 pushl %edx; \
49 pushl %ecx; \
50 pushl %ebx; \
51 movl $(__KERNEL_DS),%edx; \
52 movl %edx,%ds; \
53 movl %edx,%es;
55 #define RESTORE_ALL \
56 popl %ebx; \
57 popl %ecx; \
58 popl %edx; \
59 popl %esi; \
60 popl %edi; \
61 popl %ebp; \
62 popl %eax; \
63 popl %ds; \
64 popl %es; \
65 addl $4,%esp; \
66 iret; \
68 ENTRY(divide_error)
69 pushl $0 # no error code
70 pushl $do_divide_error
71 do_exception:
72 pushl %ds
73 pushl %eax
74 xorl %eax, %eax
75 pushl %ebp
76 pushl %edi
77 pushl %esi
78 pushl %edx
79 decl %eax # eax = -1
80 pushl %ecx
81 pushl %ebx
82 cld
83 movl %es, %ecx
84 movl ES(%esp), %edi # get the function address
85 movl ORIG_EAX(%esp), %edx # get the error code
86 movl %eax, ORIG_EAX(%esp)
87 movl %ecx, ES(%esp)
88 movl $(__KERNEL_DS), %ecx
89 movl %ecx, %ds
90 movl %ecx, %es
91 movl %esp,%eax # pt_regs pointer
92 pushl %edx
93 pushl %eax
94 call *%edi
95 addl $8,%esp
97 /* pushl %ds
98 pushl %eax
99 xorl %eax,%eax
100 pushl %ebp
101 pushl %edi
102 pushl %esi
103 pushl %edx
104 decl %eax # eax = -1
105 pushl %ecx
106 pushl %ebx
107 cld
108 movl %es,%ecx
109 movl ORIG_EAX(%esp), %esi # get the error code
110 movl ES(%esp), %edi # get the function address
111 movl %eax, ORIG_EAX(%esp)
112 movl %ecx, ES(%esp)
113 movl %esp,%edx
114 pushl %esi # push the error code
115 pushl %edx # push the pt_regs pointer
116 movl $(__KERNEL_DS),%edx
117 movl %edx,%ds
118 movl %edx,%es
119 call *%edi
120 addl $8,%esp */
123 ret_from_exception:
124 movb CS(%esp),%cl
125 test $2,%cl # slow return to ring 2 or 3
126 jne safesti
127 RESTORE_ALL
129 # A note on the "critical region" in our callback handler.
130 # We want to avoid stacking callback handlers due to events occurring
131 # during handling of the last event. To do this, we keep events disabled
132 # until weve done all processing. HOWEVER, we must enable events before
133 # popping the stack frame (cant be done atomically) and so it would still
134 # be possible to get enough handler activations to overflow the stack.
135 # Although unlikely, bugs of that kind are hard to track down, so wed
136 # like to avoid the possibility.
137 # So, on entry to the handler we detect whether we interrupted an
138 # existing activation in its critical region -- if so, we pop the current
139 # activation and restart the handler using the previous one.
140 ENTRY(hypervisor_callback)
141 pushl %eax
142 SAVE_ALL
143 movl EIP(%esp),%eax
144 cmpl $scrit,%eax
145 jb 11f
146 cmpl $ecrit,%eax
147 jb critical_region_fixup
148 11: push %esp
149 call do_hypervisor_callback
150 add $4,%esp
151 movl HYPERVISOR_shared_info,%esi
152 xorl %eax,%eax
153 movb CS(%esp),%cl
154 test $2,%cl # slow return to ring 2 or 3
155 jne safesti
156 safesti:movb $0,1(%esi) # reenable event callbacks
157 scrit: /**** START OF CRITICAL REGION ****/
158 testb $0xFF,(%esi)
159 jnz 14f # process more events if necessary...
160 RESTORE_ALL
161 14: movb $1,1(%esi)
162 jmp 11b
163 ecrit: /**** END OF CRITICAL REGION ****/
164 # [How we do the fixup]. We want to merge the current stack frame with the
165 # just-interrupted frame. How we do this depends on where in the critical
166 # region the interrupted handler was executing, and so how many saved
167 # registers are in each frame. We do this quickly using the lookup table
168 # 'critical_fixup_table'. For each byte offset in the critical region, it
169 # provides the number of bytes which have already been popped from the
170 # interrupted stack frame.
171 critical_region_fixup:
172 addl $critical_fixup_table-scrit,%eax
173 movzbl (%eax),%eax # %eax contains num bytes popped
174 mov %esp,%esi
175 add %eax,%esi # %esi points at end of src region
176 mov %esp,%edi
177 add $0x34,%edi # %edi points at end of dst region
178 mov %eax,%ecx
179 shr $2,%ecx # convert words to bytes
180 je 16f # skip loop if nothing to copy
181 15: subl $4,%esi # pre-decrementing copy loop
182 subl $4,%edi
183 movl (%esi),%eax
184 movl %eax,(%edi)
185 loop 15b
186 16: movl %edi,%esp # final %edi is top of merged stack
187 jmp 11b
189 critical_fixup_table:
190 .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
191 .byte 0x00,0x00 # jne 14f
192 .byte 0x00 # pop %ebx
193 .byte 0x04 # pop %ecx
194 .byte 0x08 # pop %edx
195 .byte 0x0c # pop %esi
196 .byte 0x10 # pop %edi
197 .byte 0x14 # pop %ebp
198 .byte 0x18 # pop %eax
199 .byte 0x1c # pop %ds
200 .byte 0x20 # pop %es
201 .byte 0x24,0x24,0x24 # add $4,%esp
202 .byte 0x28 # iret
203 .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
204 .byte 0x00,0x00 # jmp 11b
206 # Hypervisor uses this for application faults while it executes.
207 ENTRY(failsafe_callback)
208 pop %ds
209 pop %es
210 pop %fs
211 pop %gs
212 iret
214 ENTRY(coprocessor_error)
215 pushl $0
216 pushl $do_coprocessor_error
217 jmp do_exception
219 ENTRY(simd_coprocessor_error)
220 pushl $0
221 pushl $do_simd_coprocessor_error
222 jmp do_exception
224 ENTRY(device_not_available)
225 iret
227 ENTRY(debug)
228 pushl $0
229 pushl $do_debug
230 jmp do_exception
232 ENTRY(int3)
233 pushl $0
234 pushl $do_int3
235 jmp do_exception
237 ENTRY(overflow)
238 pushl $0
239 pushl $do_overflow
240 jmp do_exception
242 ENTRY(bounds)
243 pushl $0
244 pushl $do_bounds
245 jmp do_exception
247 ENTRY(invalid_op)
248 pushl $0
249 pushl $do_invalid_op
250 jmp do_exception
252 ENTRY(coprocessor_segment_overrun)
253 pushl $0
254 pushl $do_coprocessor_segment_overrun
255 jmp do_exception
257 ENTRY(invalid_TSS)
258 pushl $do_invalid_TSS
259 jmp do_exception
261 ENTRY(segment_not_present)
262 pushl $do_segment_not_present
263 jmp do_exception
265 ENTRY(stack_segment)
266 pushl $do_stack_segment
267 jmp do_exception
269 ENTRY(general_protection)
270 pushl $do_general_protection
271 jmp do_exception
273 ENTRY(alignment_check)
274 pushl $do_alignment_check
275 jmp do_exception
277 # This handler is special, because it gets an extra value on its stack,
278 # which is the linear faulting address.
279 # fastcall register usage: %eax = pt_regs, %edx = error code,
280 # %ecx = fault address
281 ENTRY(page_fault)
282 pushl %ds
283 pushl %eax
284 xorl %eax, %eax
285 pushl %ebp
286 pushl %edi
287 pushl %esi
288 pushl %edx
289 decl %eax /* eax = -1 */
290 pushl %ecx
291 pushl %ebx
292 cld
293 movl %es,%edi
294 movl ES(%esp), %ecx /* get the faulting address */
295 movl ORIG_EAX(%esp), %edx /* get the error code */
296 movl %eax, ORIG_EAX(%esp)
297 movl %edi, ES(%esp)
298 movl $(__KERNEL_DS),%eax
299 movl %eax, %ds
300 movl %eax, %es
301 movl %esp,%eax /* pt_regs pointer */
302 call do_page_fault
303 jmp ret_from_exception
305 ENTRY(machine_check)
306 pushl $0
307 pushl $do_machine_check
308 jmp do_exception
310 ENTRY(spurious_interrupt_bug)
311 pushl $0
312 pushl $do_spurious_interrupt_bug
313 jmp do_exception