ia64/xen-unstable

view extras/mini-os/arch/x86/x86_32.S @ 17042:a905c582a406

Add stubdomain support. See stubdom/README for usage details.

- Move PAGE_SIZE and STACK_SIZE into __PAGE_SIZE and __STACK_SIZE in
arch_limits.h so as to permit getting them from there without
pulling all the internal Mini-OS defines.
- Setup a xen-elf cross-compilation environment in stubdom/cross-root
- Add a POSIX layer on top of Mini-OS by linking against the newlib C
library and lwIP, and implementing the Unixish part in mini-os/lib/sys.c
- Cross-compile zlib and libpci too.
- Add an xs.h-compatible layer on top of Mini-OS' xenbus.
- Cross-compile libxc with an additional xc_minios.c and a few things
disabled.
- Cross-compile ioemu with an additional block-vbd, but without sound,
tpm and other details. A few hacks are needed:
- Align ide and scsi buffers at least on sector size to permit
direct transmission to the block backend. While we are at it, just
page-align it to possibly save a segment. Also, limit the scsi
buffer size because of limitations of the block paravirtualization
protocol.
- Allocate big tables dynamically rather that letting them go to
bss: when Mini-OS gets installed in memory, bss is not lazily
allocated, and doing so during Mini-OS is unnecessarily trick while
we can simply use malloc.
- Had to change the Mini-OS compilation somehow, so as to export
Mini-OS compilation flags to the Makefiles of libxc and ioemu.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 12 14:35:39 2008 +0000 (2008-02-12)
parents 8f6640070a86
children 8bd776540ab3
line source
1 #include <os.h>
2 #include <arch_limits.h>
3 #include <xen/arch-x86_32.h>
5 .section __xen_guest
6 .ascii "GUEST_OS=Mini-OS"
7 .ascii ",XEN_VER=xen-3.0"
8 .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
9 .ascii ",ELF_PADDR_OFFSET=0x0"
10 .ascii ",HYPERCALL_PAGE=0x2"
11 #ifdef CONFIG_X86_PAE
12 .ascii ",PAE=yes"
13 #else
14 .ascii ",PAE=no"
15 #endif
16 .ascii ",LOADER=generic"
17 .byte 0
18 .text
20 .globl _start, shared_info, hypercall_page
22 _start:
23 cld
24 lss stack_start,%esp
25 andl $(~(__STACK_SIZE-1)), %esp
26 push %esi
27 call start_kernel
29 stack_start:
30 .long stack+(2*__STACK_SIZE), __KERNEL_SS
32 /* Unpleasant -- the PTE that maps this page is actually overwritten */
33 /* to map the real shared-info page! :-) */
34 .org 0x1000
35 shared_info:
36 .org 0x2000
38 hypercall_page:
39 .org 0x3000
41 ES = 0x20
42 ORIG_EAX = 0x24
43 EIP = 0x28
44 CS = 0x2C
46 #define ENTRY(X) .globl X ; X :
48 #define SAVE_ALL \
49 cld; \
50 pushl %es; \
51 pushl %ds; \
52 pushl %eax; \
53 pushl %ebp; \
54 pushl %edi; \
55 pushl %esi; \
56 pushl %edx; \
57 pushl %ecx; \
58 pushl %ebx; \
59 movl $(__KERNEL_DS),%edx; \
60 movl %edx,%ds; \
61 movl %edx,%es;
63 #define RESTORE_ALL \
64 popl %ebx; \
65 popl %ecx; \
66 popl %edx; \
67 popl %esi; \
68 popl %edi; \
69 popl %ebp; \
70 popl %eax; \
71 popl %ds; \
72 popl %es; \
73 addl $4,%esp; \
74 iret;
76 ENTRY(divide_error)
77 pushl $0 # no error code
78 pushl $do_divide_error
79 do_exception:
80 pushl %ds
81 pushl %eax
82 xorl %eax, %eax
83 pushl %ebp
84 pushl %edi
85 pushl %esi
86 pushl %edx
87 decl %eax # eax = -1
88 pushl %ecx
89 pushl %ebx
90 cld
91 movl %es, %ecx
92 movl ES(%esp), %edi # get the function address
93 movl ORIG_EAX(%esp), %edx # get the error code
94 movl %eax, ORIG_EAX(%esp)
95 movl %ecx, ES(%esp)
96 movl $(__KERNEL_DS), %ecx
97 movl %ecx, %ds
98 movl %ecx, %es
99 movl %esp,%eax # pt_regs pointer
100 pushl %edx
101 pushl %eax
102 call *%edi
103 jmp ret_from_exception
105 ret_from_exception:
106 movb CS(%esp),%cl
107 addl $8,%esp
108 RESTORE_ALL
110 # A note on the "critical region" in our callback handler.
111 # We want to avoid stacking callback handlers due to events occurring
112 # during handling of the last event. To do this, we keep events disabled
113 # until weve done all processing. HOWEVER, we must enable events before
114 # popping the stack frame (cant be done atomically) and so it would still
115 # be possible to get enough handler activations to overflow the stack.
116 # Although unlikely, bugs of that kind are hard to track down, so wed
117 # like to avoid the possibility.
118 # So, on entry to the handler we detect whether we interrupted an
119 # existing activation in its critical region -- if so, we pop the current
120 # activation and restart the handler using the previous one.
121 ENTRY(hypervisor_callback)
122 pushl %eax
123 SAVE_ALL
124 movl EIP(%esp),%eax
125 cmpl $scrit,%eax
126 jb 11f
127 cmpl $ecrit,%eax
128 jb critical_region_fixup
129 11: push %esp
130 xorl %ebp,%ebp
131 call do_hypervisor_callback
132 add $4,%esp
133 movl HYPERVISOR_shared_info,%esi
134 xorl %eax,%eax
135 movb CS(%esp),%cl
136 test $2,%cl # slow return to ring 2 or 3
137 jne safesti
138 safesti:movb $0,1(%esi) # reenable event callbacks
139 scrit: /**** START OF CRITICAL REGION ****/
140 testb $0xFF,(%esi)
141 jnz 14f # process more events if necessary...
142 RESTORE_ALL
143 14: movb $1,1(%esi)
144 jmp 11b
145 ecrit: /**** END OF CRITICAL REGION ****/
146 # [How we do the fixup]. We want to merge the current stack frame with the
147 # just-interrupted frame. How we do this depends on where in the critical
148 # region the interrupted handler was executing, and so how many saved
149 # registers are in each frame. We do this quickly using the lookup table
150 # 'critical_fixup_table'. For each byte offset in the critical region, it
151 # provides the number of bytes which have already been popped from the
152 # interrupted stack frame.
153 critical_region_fixup:
154 addl $critical_fixup_table-scrit,%eax
155 movzbl (%eax),%eax # %eax contains num bytes popped
156 mov %esp,%esi
157 add %eax,%esi # %esi points at end of src region
158 mov %esp,%edi
159 add $0x34,%edi # %edi points at end of dst region
160 mov %eax,%ecx
161 shr $2,%ecx # convert words to bytes
162 je 16f # skip loop if nothing to copy
163 15: subl $4,%esi # pre-decrementing copy loop
164 subl $4,%edi
165 movl (%esi),%eax
166 movl %eax,(%edi)
167 loop 15b
168 16: movl %edi,%esp # final %edi is top of merged stack
169 jmp 11b
171 critical_fixup_table:
172 .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
173 .byte 0x00,0x00 # jne 14f
174 .byte 0x00 # pop %ebx
175 .byte 0x04 # pop %ecx
176 .byte 0x08 # pop %edx
177 .byte 0x0c # pop %esi
178 .byte 0x10 # pop %edi
179 .byte 0x14 # pop %ebp
180 .byte 0x18 # pop %eax
181 .byte 0x1c # pop %ds
182 .byte 0x20 # pop %es
183 .byte 0x24,0x24,0x24 # add $4,%esp
184 .byte 0x28 # iret
185 .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
186 .byte 0x00,0x00 # jmp 11b
188 # Hypervisor uses this for application faults while it executes.
189 ENTRY(failsafe_callback)
190 pop %ds
191 pop %es
192 pop %fs
193 pop %gs
194 iret
196 ENTRY(coprocessor_error)
197 pushl $0
198 pushl $do_coprocessor_error
199 jmp do_exception
201 ENTRY(simd_coprocessor_error)
202 pushl $0
203 pushl $do_simd_coprocessor_error
204 jmp do_exception
206 ENTRY(device_not_available)
207 iret
209 ENTRY(debug)
210 pushl $0
211 pushl $do_debug
212 jmp do_exception
214 ENTRY(int3)
215 pushl $0
216 pushl $do_int3
217 jmp do_exception
219 ENTRY(overflow)
220 pushl $0
221 pushl $do_overflow
222 jmp do_exception
224 ENTRY(bounds)
225 pushl $0
226 pushl $do_bounds
227 jmp do_exception
229 ENTRY(invalid_op)
230 pushl $0
231 pushl $do_invalid_op
232 jmp do_exception
235 ENTRY(coprocessor_segment_overrun)
236 pushl $0
237 pushl $do_coprocessor_segment_overrun
238 jmp do_exception
241 ENTRY(invalid_TSS)
242 pushl $do_invalid_TSS
243 jmp do_exception
246 ENTRY(segment_not_present)
247 pushl $do_segment_not_present
248 jmp do_exception
251 ENTRY(stack_segment)
252 pushl $do_stack_segment
253 jmp do_exception
256 ENTRY(general_protection)
257 pushl $do_general_protection
258 jmp do_exception
261 ENTRY(alignment_check)
262 pushl $do_alignment_check
263 jmp do_exception
266 ENTRY(page_fault)
267 pushl $do_page_fault
268 jmp do_exception
270 ENTRY(machine_check)
271 pushl $0
272 pushl $do_machine_check
273 jmp do_exception
276 ENTRY(spurious_interrupt_bug)
277 pushl $0
278 pushl $do_spurious_interrupt_bug
279 jmp do_exception
283 ENTRY(thread_starter)
284 popl %eax
285 popl %ebx
286 pushl $0
287 xorl %ebp,%ebp
288 pushl %eax
289 call *%ebx
290 call exit_thread
292 ENTRY(__arch_switch_threads)
293 movl 4(%esp), %ecx /* prev */
294 movl 8(%esp), %edx /* next */
295 pushl %ebp
296 pushl %ebx
297 pushl %esi
298 pushl %edi
299 movl %esp, (%ecx) /* save ESP */
300 movl (%edx), %esp /* restore ESP */
301 movl $1f, 4(%ecx) /* save EIP */
302 pushl 4(%edx) /* restore EIP */
303 ret
304 1:
305 popl %edi
306 popl %esi
307 popl %ebx
308 popl %ebp
309 ret