ia64/xen-unstable

view extras/mini-os/x86_32.S @ 9788:bdcc838b9a72

Add small memory warning message to domain configuration examples.

Signed-off-by: Daniel Stekloff <dsteklof@us.ibm.com>
author stekloff@dyn9047022152.beaverton.ibm.com
date Wed Apr 19 22:58:24 2006 +0100 (2006-04-19)
parents 8c21c8ea5fff
children f6507937cb7c
line source
1 #include <os.h>
2 #include <xen/arch-x86_32.h>
4 .section __xen_guest
5 .ascii "GUEST_OS=Mini-OS"
6 .ascii ",XEN_VER=xen-3.0"
7 .ascii ",HYPERCALL_PAGE=0x2"
8 .ascii ",LOADER=generic"
9 .ascii ",PT_MODE_WRITABLE"
10 .byte 0
11 .text
13 .globl _start, shared_info, hypercall_page
15 _start:
16 cld
17 lss stack_start,%esp
18 push %esi
19 call start_kernel
21 stack_start:
22 .long stack+8192, __KERNEL_SS
24 /* Unpleasant -- the PTE that maps this page is actually overwritten */
25 /* to map the real shared-info page! :-) */
26 .org 0x1000
27 shared_info:
28 .org 0x2000
30 hypercall_page:
31 .org 0x3000
33 ES = 0x1c
34 ORIG_EAX = 0x20
35 EIP = 0x24
36 CS = 0x28
38 #define ENTRY(X) .globl X ; X :
40 #define SAVE_ALL \
41 cld; \
42 pushl %es; \
43 pushl %ds; \
44 pushl %eax; \
45 pushl %ebp; \
46 pushl %edi; \
47 pushl %esi; \
48 pushl %edx; \
49 pushl %ecx; \
50 pushl %ebx; \
51 movl $(__KERNEL_DS),%edx; \
52 movl %edx,%ds; \
53 movl %edx,%es;
55 #define RESTORE_ALL \
56 popl %ebx; \
57 popl %ecx; \
58 popl %edx; \
59 popl %esi; \
60 popl %edi; \
61 popl %ebp; \
62 popl %eax; \
63 popl %ds; \
64 popl %es; \
65 addl $4,%esp; \
66 iret; \
68 ENTRY(divide_error)
69 pushl $0 # no error code
70 pushl $do_divide_error
71 do_exception:
72 pushl %ds
73 pushl %eax
74 xorl %eax, %eax
75 pushl %ebp
76 pushl %edi
77 pushl %esi
78 pushl %edx
79 decl %eax # eax = -1
80 pushl %ecx
81 pushl %ebx
82 cld
83 movl %es, %ecx
84 movl ES(%esp), %edi # get the function address
85 movl ORIG_EAX(%esp), %edx # get the error code
86 movl %eax, ORIG_EAX(%esp)
87 movl %ecx, ES(%esp)
88 movl $(__KERNEL_DS), %ecx
89 movl %ecx, %ds
90 movl %ecx, %es
91 movl %esp,%eax # pt_regs pointer
92 pushl %edx
93 pushl %eax
94 call *%edi
95 addl $8,%esp
97 ret_from_exception:
98 movb CS(%esp),%cl
99 test $2,%cl # slow return to ring 2 or 3
100 jne safesti
101 RESTORE_ALL
103 # A note on the "critical region" in our callback handler.
104 # We want to avoid stacking callback handlers due to events occurring
105 # during handling of the last event. To do this, we keep events disabled
106 # until weve done all processing. HOWEVER, we must enable events before
107 # popping the stack frame (cant be done atomically) and so it would still
108 # be possible to get enough handler activations to overflow the stack.
109 # Although unlikely, bugs of that kind are hard to track down, so wed
110 # like to avoid the possibility.
111 # So, on entry to the handler we detect whether we interrupted an
112 # existing activation in its critical region -- if so, we pop the current
113 # activation and restart the handler using the previous one.
114 ENTRY(hypervisor_callback)
115 pushl %eax
116 SAVE_ALL
117 movl EIP(%esp),%eax
118 cmpl $scrit,%eax
119 jb 11f
120 cmpl $ecrit,%eax
121 jb critical_region_fixup
122 11: push %esp
123 call do_hypervisor_callback
124 add $4,%esp
125 movl HYPERVISOR_shared_info,%esi
126 xorl %eax,%eax
127 movb CS(%esp),%cl
128 test $2,%cl # slow return to ring 2 or 3
129 jne safesti
130 safesti:movb $0,1(%esi) # reenable event callbacks
131 scrit: /**** START OF CRITICAL REGION ****/
132 testb $0xFF,(%esi)
133 jnz 14f # process more events if necessary...
134 RESTORE_ALL
135 14: movb $1,1(%esi)
136 jmp 11b
137 ecrit: /**** END OF CRITICAL REGION ****/
138 # [How we do the fixup]. We want to merge the current stack frame with the
139 # just-interrupted frame. How we do this depends on where in the critical
140 # region the interrupted handler was executing, and so how many saved
141 # registers are in each frame. We do this quickly using the lookup table
142 # 'critical_fixup_table'. For each byte offset in the critical region, it
143 # provides the number of bytes which have already been popped from the
144 # interrupted stack frame.
145 critical_region_fixup:
146 addl $critical_fixup_table-scrit,%eax
147 movzbl (%eax),%eax # %eax contains num bytes popped
148 mov %esp,%esi
149 add %eax,%esi # %esi points at end of src region
150 mov %esp,%edi
151 add $0x34,%edi # %edi points at end of dst region
152 mov %eax,%ecx
153 shr $2,%ecx # convert words to bytes
154 je 16f # skip loop if nothing to copy
155 15: subl $4,%esi # pre-decrementing copy loop
156 subl $4,%edi
157 movl (%esi),%eax
158 movl %eax,(%edi)
159 loop 15b
160 16: movl %edi,%esp # final %edi is top of merged stack
161 jmp 11b
163 critical_fixup_table:
164 .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
165 .byte 0x00,0x00 # jne 14f
166 .byte 0x00 # pop %ebx
167 .byte 0x04 # pop %ecx
168 .byte 0x08 # pop %edx
169 .byte 0x0c # pop %esi
170 .byte 0x10 # pop %edi
171 .byte 0x14 # pop %ebp
172 .byte 0x18 # pop %eax
173 .byte 0x1c # pop %ds
174 .byte 0x20 # pop %es
175 .byte 0x24,0x24,0x24 # add $4,%esp
176 .byte 0x28 # iret
177 .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
178 .byte 0x00,0x00 # jmp 11b
180 # Hypervisor uses this for application faults while it executes.
181 ENTRY(failsafe_callback)
182 pop %ds
183 pop %es
184 pop %fs
185 pop %gs
186 iret
188 ENTRY(coprocessor_error)
189 pushl $0
190 pushl $do_coprocessor_error
191 jmp do_exception
193 ENTRY(simd_coprocessor_error)
194 pushl $0
195 pushl $do_simd_coprocessor_error
196 jmp do_exception
198 ENTRY(device_not_available)
199 iret
201 ENTRY(debug)
202 pushl $0
203 pushl $do_debug
204 jmp do_exception
206 ENTRY(int3)
207 pushl $0
208 pushl $do_int3
209 jmp do_exception
211 ENTRY(overflow)
212 pushl $0
213 pushl $do_overflow
214 jmp do_exception
216 ENTRY(bounds)
217 pushl $0
218 pushl $do_bounds
219 jmp do_exception
221 ENTRY(invalid_op)
222 pushl $0
223 pushl $do_invalid_op
224 jmp do_exception
226 ENTRY(coprocessor_segment_overrun)
227 pushl $0
228 pushl $do_coprocessor_segment_overrun
229 jmp do_exception
231 ENTRY(invalid_TSS)
232 pushl $do_invalid_TSS
233 jmp do_exception
235 ENTRY(segment_not_present)
236 pushl $do_segment_not_present
237 jmp do_exception
239 ENTRY(stack_segment)
240 pushl $do_stack_segment
241 jmp do_exception
243 ENTRY(general_protection)
244 pushl $do_general_protection
245 jmp do_exception
247 ENTRY(alignment_check)
248 pushl $do_alignment_check
249 jmp do_exception
251 # This handler is special, because it gets an extra value on its stack,
252 # which is the linear faulting address.
253 # fastcall register usage: %eax = pt_regs, %edx = error code,
254 # %ecx = fault address
255 ENTRY(page_fault)
256 pushl %ds
257 pushl %eax
258 xorl %eax, %eax
259 pushl %ebp
260 pushl %edi
261 pushl %esi
262 pushl %edx
263 decl %eax /* eax = -1 */
264 pushl %ecx
265 pushl %ebx
266 cld
267 movl ORIG_EAX(%esp), %edi
268 movl %eax, ORIG_EAX(%esp)
269 movl %es, %ecx
270 movl %ecx, ES(%esp)
271 movl $(__KERNEL_DS),%eax
272 movl %eax, %ds
273 movl %eax, %es
274 pushl %edi
275 movl %esp, %eax
276 pushl %eax
277 call do_page_fault
278 jmp ret_from_exception
280 ENTRY(machine_check)
281 pushl $0
282 pushl $do_machine_check
283 jmp do_exception
285 ENTRY(spurious_interrupt_bug)
286 pushl $0
287 pushl $do_spurious_interrupt_bug
288 jmp do_exception