ia64/xen-unstable

view extras/mini-os/arch/x86/x86_32.S @ 16434:d46265d21dc5

[Mini-OS] Fix x86 arch_switch_thread

Fix x86 arch_switch_thread by making it pure assembly.
There were missing general register clobbers for x86_64, and BP should
theorically be clobbered too, but gcc does not believe that, so the
only simple safe solution is to use pure assembly.

Signed-off-by: Samuel Thibault <samuel.thibault@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Nov 23 16:23:28 2007 +0000 (2007-11-23)
parents f28d36628de8
children 8f6640070a86
line source
1 #include <os.h>
2 #include <xen/arch-x86_32.h>
4 .section __xen_guest
5 .ascii "GUEST_OS=Mini-OS"
6 .ascii ",XEN_VER=xen-3.0"
7 .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
8 .ascii ",ELF_PADDR_OFFSET=0x0"
9 .ascii ",HYPERCALL_PAGE=0x2"
10 #ifdef CONFIG_X86_PAE
11 .ascii ",PAE=yes"
12 #else
13 .ascii ",PAE=no"
14 #endif
15 .ascii ",LOADER=generic"
16 .byte 0
17 .text
19 .globl _start, shared_info, hypercall_page
21 _start:
22 cld
23 lss stack_start,%esp
24 andl $(~(8192-1)), %esp
25 push %esi
26 call start_kernel
28 stack_start:
29 .long stack+(2*8192), __KERNEL_SS
31 /* Unpleasant -- the PTE that maps this page is actually overwritten */
32 /* to map the real shared-info page! :-) */
33 .org 0x1000
34 shared_info:
35 .org 0x2000
37 hypercall_page:
38 .org 0x3000
40 ES = 0x20
41 ORIG_EAX = 0x24
42 EIP = 0x28
43 CS = 0x2C
45 #define ENTRY(X) .globl X ; X :
47 #define SAVE_ALL \
48 cld; \
49 pushl %es; \
50 pushl %ds; \
51 pushl %eax; \
52 pushl %ebp; \
53 pushl %edi; \
54 pushl %esi; \
55 pushl %edx; \
56 pushl %ecx; \
57 pushl %ebx; \
58 movl $(__KERNEL_DS),%edx; \
59 movl %edx,%ds; \
60 movl %edx,%es;
62 #define RESTORE_ALL \
63 popl %ebx; \
64 popl %ecx; \
65 popl %edx; \
66 popl %esi; \
67 popl %edi; \
68 popl %ebp; \
69 popl %eax; \
70 popl %ds; \
71 popl %es; \
72 addl $4,%esp; \
73 iret;
75 ENTRY(divide_error)
76 pushl $0 # no error code
77 pushl $do_divide_error
78 do_exception:
79 pushl %ds
80 pushl %eax
81 xorl %eax, %eax
82 pushl %ebp
83 pushl %edi
84 pushl %esi
85 pushl %edx
86 decl %eax # eax = -1
87 pushl %ecx
88 pushl %ebx
89 cld
90 movl %es, %ecx
91 movl ES(%esp), %edi # get the function address
92 movl ORIG_EAX(%esp), %edx # get the error code
93 movl %eax, ORIG_EAX(%esp)
94 movl %ecx, ES(%esp)
95 movl $(__KERNEL_DS), %ecx
96 movl %ecx, %ds
97 movl %ecx, %es
98 movl %esp,%eax # pt_regs pointer
99 pushl %edx
100 pushl %eax
101 call *%edi
102 jmp ret_from_exception
104 ret_from_exception:
105 movb CS(%esp),%cl
106 addl $8,%esp
107 RESTORE_ALL
109 # A note on the "critical region" in our callback handler.
110 # We want to avoid stacking callback handlers due to events occurring
111 # during handling of the last event. To do this, we keep events disabled
112 # until weve done all processing. HOWEVER, we must enable events before
113 # popping the stack frame (cant be done atomically) and so it would still
114 # be possible to get enough handler activations to overflow the stack.
115 # Although unlikely, bugs of that kind are hard to track down, so wed
116 # like to avoid the possibility.
117 # So, on entry to the handler we detect whether we interrupted an
118 # existing activation in its critical region -- if so, we pop the current
119 # activation and restart the handler using the previous one.
120 ENTRY(hypervisor_callback)
121 pushl %eax
122 SAVE_ALL
123 movl EIP(%esp),%eax
124 cmpl $scrit,%eax
125 jb 11f
126 cmpl $ecrit,%eax
127 jb critical_region_fixup
128 11: push %esp
129 xorl %ebp,%ebp
130 call do_hypervisor_callback
131 add $4,%esp
132 movl HYPERVISOR_shared_info,%esi
133 xorl %eax,%eax
134 movb CS(%esp),%cl
135 test $2,%cl # slow return to ring 2 or 3
136 jne safesti
137 safesti:movb $0,1(%esi) # reenable event callbacks
138 scrit: /**** START OF CRITICAL REGION ****/
139 testb $0xFF,(%esi)
140 jnz 14f # process more events if necessary...
141 RESTORE_ALL
142 14: movb $1,1(%esi)
143 jmp 11b
144 ecrit: /**** END OF CRITICAL REGION ****/
145 # [How we do the fixup]. We want to merge the current stack frame with the
146 # just-interrupted frame. How we do this depends on where in the critical
147 # region the interrupted handler was executing, and so how many saved
148 # registers are in each frame. We do this quickly using the lookup table
149 # 'critical_fixup_table'. For each byte offset in the critical region, it
150 # provides the number of bytes which have already been popped from the
151 # interrupted stack frame.
152 critical_region_fixup:
153 addl $critical_fixup_table-scrit,%eax
154 movzbl (%eax),%eax # %eax contains num bytes popped
155 mov %esp,%esi
156 add %eax,%esi # %esi points at end of src region
157 mov %esp,%edi
158 add $0x34,%edi # %edi points at end of dst region
159 mov %eax,%ecx
160 shr $2,%ecx # convert words to bytes
161 je 16f # skip loop if nothing to copy
162 15: subl $4,%esi # pre-decrementing copy loop
163 subl $4,%edi
164 movl (%esi),%eax
165 movl %eax,(%edi)
166 loop 15b
167 16: movl %edi,%esp # final %edi is top of merged stack
168 jmp 11b
170 critical_fixup_table:
171 .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
172 .byte 0x00,0x00 # jne 14f
173 .byte 0x00 # pop %ebx
174 .byte 0x04 # pop %ecx
175 .byte 0x08 # pop %edx
176 .byte 0x0c # pop %esi
177 .byte 0x10 # pop %edi
178 .byte 0x14 # pop %ebp
179 .byte 0x18 # pop %eax
180 .byte 0x1c # pop %ds
181 .byte 0x20 # pop %es
182 .byte 0x24,0x24,0x24 # add $4,%esp
183 .byte 0x28 # iret
184 .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
185 .byte 0x00,0x00 # jmp 11b
187 # Hypervisor uses this for application faults while it executes.
188 ENTRY(failsafe_callback)
189 pop %ds
190 pop %es
191 pop %fs
192 pop %gs
193 iret
195 ENTRY(coprocessor_error)
196 pushl $0
197 pushl $do_coprocessor_error
198 jmp do_exception
200 ENTRY(simd_coprocessor_error)
201 pushl $0
202 pushl $do_simd_coprocessor_error
203 jmp do_exception
205 ENTRY(device_not_available)
206 iret
208 ENTRY(debug)
209 pushl $0
210 pushl $do_debug
211 jmp do_exception
213 ENTRY(int3)
214 pushl $0
215 pushl $do_int3
216 jmp do_exception
218 ENTRY(overflow)
219 pushl $0
220 pushl $do_overflow
221 jmp do_exception
223 ENTRY(bounds)
224 pushl $0
225 pushl $do_bounds
226 jmp do_exception
228 ENTRY(invalid_op)
229 pushl $0
230 pushl $do_invalid_op
231 jmp do_exception
234 ENTRY(coprocessor_segment_overrun)
235 pushl $0
236 pushl $do_coprocessor_segment_overrun
237 jmp do_exception
240 ENTRY(invalid_TSS)
241 pushl $do_invalid_TSS
242 jmp do_exception
245 ENTRY(segment_not_present)
246 pushl $do_segment_not_present
247 jmp do_exception
250 ENTRY(stack_segment)
251 pushl $do_stack_segment
252 jmp do_exception
255 ENTRY(general_protection)
256 pushl $do_general_protection
257 jmp do_exception
260 ENTRY(alignment_check)
261 pushl $do_alignment_check
262 jmp do_exception
265 ENTRY(page_fault)
266 pushl $do_page_fault
267 jmp do_exception
269 ENTRY(machine_check)
270 pushl $0
271 pushl $do_machine_check
272 jmp do_exception
275 ENTRY(spurious_interrupt_bug)
276 pushl $0
277 pushl $do_spurious_interrupt_bug
278 jmp do_exception
282 ENTRY(thread_starter)
283 popl %eax
284 popl %ebx
285 pushl $0
286 xorl %ebp,%ebp
287 pushl %eax
288 call *%ebx
289 call exit_thread
291 ENTRY(__arch_switch_threads)
292 movl 4(%esp), %ecx /* prev */
293 movl 8(%esp), %edx /* next */
294 pushl %ebp
295 pushl %ebx
296 pushl %esi
297 pushl %edi
298 movl %esp, (%ecx) /* save ESP */
299 movl (%edx), %esp /* restore ESP */
300 movl $1f, 4(%ecx) /* save EIP */
301 pushl 4(%edx) /* restore EIP */
302 ret
303 1:
304 popl %edi
305 popl %esi
306 popl %ebx
307 popl %ebp
308 ret