direct-io.hg

view xen/arch/x86/x86_32/entry.S @ 3280:dda5ab69e74a

bitkeeper revision 1.1159.1.477 (41bf20d2wgoxIqhcE0nzBC8W-yFPhg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Tue Dec 14 17:20:18 2004 +0000 (2004-12-14)
parents fd0d4d8e6193 da409d40699a
children b9ab4345fd1b
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <public/xen.h>
62 #define GET_CURRENT(reg) \
63 movl $8192-4, reg; \
64 orl %esp, reg; \
65 andl $~3,reg; \
66 movl (reg),reg;
68 ALIGN
69 restore_all_guest:
70 testb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
71 jnz failsafe_callback
72 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
73 jnz restore_all_vm86
74 FLT1: movl XREGS_ds(%esp),%ds
75 FLT2: movl XREGS_es(%esp),%es
76 FLT3: movl XREGS_fs(%esp),%fs
77 FLT4: movl XREGS_gs(%esp),%gs
78 restore_all_vm86:
79 popl %ebx
80 popl %ecx
81 popl %edx
82 popl %esi
83 popl %edi
84 popl %ebp
85 popl %eax
86 addl $4,%esp
87 FLT5: iret
88 .section .fixup,"ax"
89 FIX5: subl $28,%esp
90 pushl 28(%esp) # error_code/entry_vector
91 movl %eax,XREGS_eax+4(%esp)
92 movl %ebp,XREGS_ebp+4(%esp)
93 movl %edi,XREGS_edi+4(%esp)
94 movl %esi,XREGS_esi+4(%esp)
95 movl %edx,XREGS_edx+4(%esp)
96 movl %ecx,XREGS_ecx+4(%esp)
97 movl %ebx,XREGS_ebx+4(%esp)
98 FIX1: SET_XEN_SEGMENTS(a)
99 movl %eax,%fs
100 movl %eax,%gs
101 sti
102 popl %esi
103 pushfl # EFLAGS
104 movl $__HYPERVISOR_CS,%eax
105 pushl %eax # CS
106 movl $DBLFLT1,%eax
107 pushl %eax # EIP
108 pushl %esi # error_code/entry_vector
109 jmp error_code
110 DBLFLT1:GET_CURRENT(%ebx)
111 jmp test_all_events
112 DBLFIX1:GET_CURRENT(%ebx)
113 testb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
114 jnz domain_crash # cannot reenter failsafe code
115 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
116 jmp test_all_events # will return via failsafe code
117 .previous
118 .section __pre_ex_table,"a"
119 .long FLT1,FIX1
120 .long FLT2,FIX1
121 .long FLT3,FIX1
122 .long FLT4,FIX1
123 .long FLT5,FIX5
124 .previous
125 .section __ex_table,"a"
126 .long DBLFLT1,DBLFIX1
127 .previous
129 /* No special register assumptions */
130 failsafe_callback:
131 GET_CURRENT(%ebx)
132 andb $~TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
133 leal EDOMAIN_trap_bounce(%ebx),%edx
134 movl EDOMAIN_failsafe_addr(%ebx),%eax
135 movl %eax,TRAPBOUNCE_eip(%edx)
136 movl EDOMAIN_failsafe_sel(%ebx),%eax
137 movw %ax,TRAPBOUNCE_cs(%edx)
138 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
139 call create_bounce_frame
140 popl %ebx
141 popl %ecx
142 popl %edx
143 popl %esi
144 popl %edi
145 popl %ebp
146 popl %eax
147 addl $4,%esp
148 FLT6: iret
149 .section .fixup,"ax"
150 FIX6: pushl %ebx
151 GET_CURRENT(%ebx)
152 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
153 pop %ebx
154 jmp FIX5
155 .section __pre_ex_table,"a"
156 .long FLT6,FIX6
157 .previous
159 ALIGN
160 restore_all_xen:
161 popl %ebx
162 popl %ecx
163 popl %edx
164 popl %esi
165 popl %edi
166 popl %ebp
167 popl %eax
168 addl $4,%esp
169 iret
171 ALIGN
172 ENTRY(hypercall)
173 subl $4,%esp
174 SAVE_ALL(b)
175 sti
176 GET_CURRENT(%ebx)
177 andl $(NR_hypercalls-1),%eax
178 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
180 ret_from_hypercall:
181 movl %eax,XREGS_eax(%esp) # save the return value
183 test_all_events:
184 xorl %ecx,%ecx
185 notl %ecx
186 cli # tests must not race interrupts
187 /*test_softirqs:*/
188 movl EDOMAIN_processor(%ebx),%eax
189 shl $6,%eax # sizeof(irq_cpustat) == 64
190 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
191 jnz process_softirqs
192 /*test_guest_events:*/
193 movl EDOMAIN_vcpu_info(%ebx),%eax
194 testb $0xFF,VCPUINFO_upcall_mask(%eax)
195 jnz restore_all_guest
196 testb $0xFF,VCPUINFO_upcall_pending(%eax)
197 jz restore_all_guest
198 /*process_guest_events:*/
199 leal EDOMAIN_trap_bounce(%ebx),%edx
200 movl EDOMAIN_event_addr(%ebx),%eax
201 movl %eax,TRAPBOUNCE_eip(%edx)
202 movl EDOMAIN_event_sel(%ebx),%eax
203 movw %ax,TRAPBOUNCE_cs(%edx)
204 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
205 call create_bounce_frame
206 movl EDOMAIN_vcpu_info(%ebx),%eax
207 movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
208 jmp restore_all_guest
210 ALIGN
211 process_softirqs:
212 sti
213 call SYMBOL_NAME(do_softirq)
214 jmp test_all_events
216 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
217 /* {EIP, CS, EFLAGS, [ESP, SS]} */
218 /* %edx == trap_bounce, %ebx == task_struct */
219 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
220 create_bounce_frame:
221 movl XREGS_eflags+4(%esp),%ecx
222 movb XREGS_cs+4(%esp),%cl
223 testl $(2|X86_EFLAGS_VM),%ecx
224 jz ring1 /* jump if returning to an existing ring-1 activation */
225 /* obtain ss/esp from TSS -- no current ring-1 activations */
226 movl EDOMAIN_processor(%ebx),%eax
227 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
228 movl %eax, %ecx
229 shll $7, %ecx
230 shll $13, %eax
231 addl %ecx,%eax
232 addl $init_tss + 12,%eax
233 movl (%eax),%esi /* tss->esp1 */
234 FLT7: movl 4(%eax),%gs /* tss->ss1 */
235 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
236 jz nvm86_1
237 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
238 movl XREGS_es+4(%esp),%eax
239 FLT8: movl %eax,%gs:(%esi)
240 movl XREGS_ds+4(%esp),%eax
241 FLT9: movl %eax,%gs:4(%esi)
242 movl XREGS_fs+4(%esp),%eax
243 FLT10: movl %eax,%gs:8(%esi)
244 movl XREGS_gs+4(%esp),%eax
245 FLT11: movl %eax,%gs:12(%esi)
246 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
247 movl XREGS_esp+4(%esp),%eax
248 FLT12: movl %eax,%gs:(%esi)
249 movl XREGS_ss+4(%esp),%eax
250 FLT13: movl %eax,%gs:4(%esi)
251 jmp 1f
252 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
253 movl XREGS_esp+4(%esp),%esi
254 FLT14: movl XREGS_ss+4(%esp),%gs
255 1: /* Construct a stack frame: EFLAGS, CS/EIP */
256 subl $12,%esi
257 movl XREGS_eip+4(%esp),%eax
258 FLT15: movl %eax,%gs:(%esi)
259 movl XREGS_cs+4(%esp),%eax
260 FLT16: movl %eax,%gs:4(%esi)
261 movl XREGS_eflags+4(%esp),%eax
262 FLT17: movl %eax,%gs:8(%esi)
263 movb TRAPBOUNCE_flags(%edx),%cl
264 test $TBF_EXCEPTION_ERRCODE,%cl
265 jz 1f
266 subl $4,%esi # push error_code onto guest frame
267 movl TRAPBOUNCE_error_code(%edx),%eax
268 FLT18: movl %eax,%gs:(%esi)
269 testb $TBF_EXCEPTION_CR2,%cl
270 jz 2f
271 subl $4,%esi # push %cr2 onto guest frame
272 movl TRAPBOUNCE_cr2(%edx),%eax
273 FLT19: movl %eax,%gs:(%esi)
274 1: testb $TBF_FAILSAFE,%cl
275 jz 2f
276 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
277 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
278 jz nvm86_2
279 xorl %eax,%eax # VM86: we write zero selector values
280 FLT20: movl %eax,%gs:(%esi)
281 FLT21: movl %eax,%gs:4(%esi)
282 FLT22: movl %eax,%gs:8(%esi)
283 FLT23: movl %eax,%gs:12(%esi)
284 jmp 2f
285 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
286 FLT24: movl %eax,%gs:(%esi)
287 movl XREGS_es+4(%esp),%eax
288 FLT25: movl %eax,%gs:4(%esi)
289 movl XREGS_fs+4(%esp),%eax
290 FLT26: movl %eax,%gs:8(%esi)
291 movl XREGS_gs+4(%esp),%eax
292 FLT27: movl %eax,%gs:12(%esi)
293 2: movb $0,TRAPBOUNCE_flags(%edx)
294 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
295 jz nvm86_3
296 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
297 movl %eax,XREGS_ds+4(%esp)
298 movl %eax,XREGS_es+4(%esp)
299 movl %eax,XREGS_fs+4(%esp)
300 movl %eax,XREGS_gs+4(%esp)
301 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
302 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
303 andl $0xfffcbeff,XREGS_eflags+4(%esp)
304 movl %gs,XREGS_ss+4(%esp)
305 movl %esi,XREGS_esp+4(%esp)
306 movzwl TRAPBOUNCE_cs(%edx),%eax
307 movl %eax,XREGS_cs+4(%esp)
308 movl TRAPBOUNCE_eip(%edx),%eax
309 movl %eax,XREGS_eip+4(%esp)
310 ret
311 .section .fixup,"ax"
312 FIX7: sti
313 popl %esi
314 addl $4,%esp # Discard create_b_frame return address
315 pushfl # EFLAGS
316 movl $__HYPERVISOR_CS,%eax
317 pushl %eax # CS
318 movl $DBLFLT2,%eax
319 pushl %eax # EIP
320 pushl %esi # error_code/entry_vector
321 jmp error_code
322 DBLFLT2:jmp process_guest_exception_and_events
323 .previous
324 .section __pre_ex_table,"a"
325 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
326 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
327 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
328 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
329 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
330 .previous
331 .section __ex_table,"a"
332 .long DBLFLT2,domain_crash
333 .previous
335 ALIGN
336 process_guest_exception_and_events:
337 leal EDOMAIN_trap_bounce(%ebx),%edx
338 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
339 jz test_all_events
340 cli # create_bounce_frame needs CLI for pre-exceptions to work
341 call create_bounce_frame
342 jmp test_all_events
344 ALIGN
345 ENTRY(ret_from_intr)
346 GET_CURRENT(%ebx)
347 movl XREGS_eflags(%esp),%eax
348 movb XREGS_cs(%esp),%al
349 testl $(3|X86_EFLAGS_VM),%eax
350 jnz test_all_events
351 jmp restore_all_xen
353 ENTRY(divide_error)
354 pushl $TRAP_divide_error<<16
355 ALIGN
356 error_code:
357 SAVE_ALL_NOSEGREGS(a)
358 SET_XEN_SEGMENTS(a)
359 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
360 jz exception_with_ints_disabled
361 1: sti # re-enable interrupts
362 xorl %eax,%eax
363 movw XREGS_entry_vector(%esp),%ax
364 movl %esp,%edx
365 pushl %edx # push the xen_regs pointer
366 GET_CURRENT(%ebx)
367 call *SYMBOL_NAME(exception_table)(,%eax,4)
368 addl $4,%esp
369 movl XREGS_eflags(%esp),%eax
370 movb XREGS_cs(%esp),%al
371 testl $(3|X86_EFLAGS_VM),%eax
372 jz restore_all_xen
373 jmp process_guest_exception_and_events
375 exception_with_ints_disabled:
376 movl XREGS_eflags(%esp),%eax
377 movb XREGS_cs(%esp),%al
378 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
379 jnz 1b # it really does happen!
380 # (e.g., DOM0 X server)
381 pushl XREGS_eip(%esp)
382 call search_pre_exception_table
383 addl $4,%esp
384 testl %eax,%eax # no fixup code for faulting EIP?
385 jz FATAL_exception_with_ints_disabled
386 movl %eax,XREGS_eip(%esp)
387 movl %esp,%esi
388 subl $4,%esp
389 movl %esp,%edi
390 movl $XREGS_kernel_sizeof/4,%ecx
391 rep; movsl # make room for error_code/entry_vector
392 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
393 movl %eax,XREGS_kernel_sizeof(%esp)
394 jmp restore_all_xen # return to fixup code
396 FATAL_exception_with_ints_disabled:
397 xorl %esi,%esi
398 movw XREGS_entry_vector(%esp),%si
399 movl %esp,%edx
400 pushl %edx # push the xen_regs pointer
401 pushl %esi # push the trapnr (entry vector)
402 call SYMBOL_NAME(fatal_trap)
403 ud2
405 ENTRY(coprocessor_error)
406 pushl $TRAP_copro_error<<16
407 jmp error_code
409 ENTRY(simd_coprocessor_error)
410 pushl $TRAP_simd_error<<16
411 jmp error_code
413 ENTRY(device_not_available)
414 pushl $TRAP_no_device<<16
415 jmp error_code
417 ENTRY(debug)
418 pushl $TRAP_debug<<16
419 jmp error_code
421 ENTRY(int3)
422 pushl $TRAP_int3<<16
423 jmp error_code
425 ENTRY(overflow)
426 pushl $TRAP_overflow<<16
427 jmp error_code
429 ENTRY(bounds)
430 pushl $TRAP_bounds<<16
431 jmp error_code
433 ENTRY(invalid_op)
434 pushl $TRAP_invalid_op<<16
435 jmp error_code
437 ENTRY(coprocessor_segment_overrun)
438 pushl $TRAP_copro_seg<<16
439 jmp error_code
441 ENTRY(invalid_TSS)
442 movw $TRAP_invalid_tss,2(%esp)
443 jmp error_code
445 ENTRY(segment_not_present)
446 movw $TRAP_no_segment,2(%esp)
447 jmp error_code
449 ENTRY(stack_segment)
450 movw $TRAP_stack_error,2(%esp)
451 jmp error_code
453 ENTRY(general_protection)
454 movw $TRAP_gp_fault,2(%esp)
455 jmp error_code
457 ENTRY(alignment_check)
458 movw $TRAP_alignment_check,2(%esp)
459 jmp error_code
461 ENTRY(page_fault)
462 movw $TRAP_page_fault,2(%esp)
463 jmp error_code
465 ENTRY(machine_check)
466 pushl $TRAP_machine_check<<16
467 jmp error_code
469 ENTRY(spurious_interrupt_bug)
470 pushl $TRAP_spurious_int<<16
471 jmp error_code
473 ENTRY(nmi)
474 # Save state but do not trash the segment registers!
475 # We may otherwise be unable to reload them or copy them to ring 1.
476 pushl %eax
477 SAVE_ALL_NOSEGREGS(a)
479 # Check for hardware problems.
480 inb $0x61,%al
481 testb $0x80,%al
482 jne nmi_parity_err
483 testb $0x40,%al
484 jne nmi_io_err
485 movl %eax,%ebx
487 # Okay, its almost a normal NMI tick. We can only process it if:
488 # A. We are the outermost Xen activation (in which case we have
489 # the selectors safely saved on our stack)
490 # B. DS-GS all contain sane Xen values.
491 # In all other cases we bail without touching DS-GS, as we have
492 # interrupted an enclosing Xen activation in tricky prologue or
493 # epilogue code.
494 movl XREGS_eflags(%esp),%eax
495 movb XREGS_cs(%esp),%al
496 testl $(3|X86_EFLAGS_VM),%eax
497 jnz do_watchdog_tick
498 movl XREGS_ds(%esp),%eax
499 cmpw $(__HYPERVISOR_DS),%ax
500 jne restore_all_xen
501 movl XREGS_es(%esp),%eax
502 cmpw $(__HYPERVISOR_DS),%ax
503 jne restore_all_xen
504 movl XREGS_fs(%esp),%eax
505 cmpw $(__HYPERVISOR_DS),%ax
506 jne restore_all_xen
507 movl XREGS_gs(%esp),%eax
508 cmpw $(__HYPERVISOR_DS),%ax
509 jne restore_all_xen
511 do_watchdog_tick:
512 movl $(__HYPERVISOR_DS),%edx
513 movl %edx,%ds
514 movl %edx,%es
515 movl %esp,%edx
516 pushl %ebx # reason
517 pushl %edx # regs
518 call SYMBOL_NAME(do_nmi)
519 addl $8,%esp
520 movl XREGS_eflags(%esp),%eax
521 movb XREGS_cs(%esp),%al
522 testl $(3|X86_EFLAGS_VM),%eax
523 jz restore_all_xen
524 GET_CURRENT(%ebx)
525 jmp restore_all_guest
527 nmi_parity_err:
528 # Clear and disable the parity-error line
529 andb $0xf,%al
530 orb $0x4,%al
531 outb %al,$0x61
532 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
533 je restore_all_xen
534 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
535 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
536 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
537 je restore_all_xen
538 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
539 movl %edx,%ds
540 movl %edx,%es
541 movl %esp,%edx
542 push %edx
543 call SYMBOL_NAME(mem_parity_error)
544 addl $4,%esp
545 jmp ret_from_intr
547 nmi_io_err:
548 # Clear and disable the I/O-error line
549 andb $0xf,%al
550 orb $0x8,%al
551 outb %al,$0x61
552 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
553 je restore_all_xen
554 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
555 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
556 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
557 je restore_all_xen
558 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
559 movl %edx,%ds
560 movl %edx,%es
561 movl %esp,%edx
562 push %edx
563 call SYMBOL_NAME(io_check_error)
564 addl $4,%esp
565 jmp ret_from_intr
568 ENTRY(setup_vm86_frame)
569 # Copies the entire stack frame forwards by 16 bytes.
570 .macro copy_vm86_words count=18
571 .if \count
572 pushl ((\count-1)*4)(%esp)
573 popl ((\count-1)*4)+16(%esp)
574 copy_vm86_words "(\count-1)"
575 .endif
576 .endm
577 copy_vm86_words
578 addl $16,%esp
579 ret
581 do_switch_vm86:
582 # Discard the return address
583 addl $4,%esp
585 movl XREGS_eflags(%esp),%edx
587 # GS:ESI == Ring-1 stack activation
588 movl XREGS_esp(%esp),%esi
589 VFLT1: movl XREGS_ss(%esp),%gs
591 # ES:EDI == Ring-0 stack activation
592 leal XREGS_eip(%esp),%edi
594 # Restore the hypercall-number-clobbered EAX on our stack frame
595 VFLT2: movl %gs:(%esi),%eax
596 movl %eax,XREGS_eax(%esp)
597 addl $4,%esi
599 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
600 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
601 VFLT3: movl %gs:(%esi),%eax
602 stosl
603 addl $4,%esi
604 loop VFLT3
606 # Fix up EFLAGS
607 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
608 andl $X86_EFLAGS_IOPL,%edx # Ignore attempts to change EFLAGS.IOPL
609 jnz 1f
610 orl $X86_EFLAGS_IF,%edx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
611 1: orl $X86_EFLAGS_VM,%edx # Force EFLAGS.VM
612 orl %edx,XREGS_eflags(%esp)
614 jmp test_all_events
616 .section __ex_table,"a"
617 .long VFLT1,domain_crash
618 .long VFLT2,domain_crash
619 .long VFLT3,domain_crash
620 .previous
622 .data
624 ENTRY(exception_table)
625 .long SYMBOL_NAME(do_divide_error)
626 .long SYMBOL_NAME(do_debug)
627 .long 0 # nmi
628 .long SYMBOL_NAME(do_int3)
629 .long SYMBOL_NAME(do_overflow)
630 .long SYMBOL_NAME(do_bounds)
631 .long SYMBOL_NAME(do_invalid_op)
632 .long SYMBOL_NAME(math_state_restore)
633 .long 0 # double fault
634 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
635 .long SYMBOL_NAME(do_invalid_TSS)
636 .long SYMBOL_NAME(do_segment_not_present)
637 .long SYMBOL_NAME(do_stack_segment)
638 .long SYMBOL_NAME(do_general_protection)
639 .long SYMBOL_NAME(do_page_fault)
640 .long SYMBOL_NAME(do_spurious_interrupt_bug)
641 .long SYMBOL_NAME(do_coprocessor_error)
642 .long SYMBOL_NAME(do_alignment_check)
643 .long SYMBOL_NAME(do_machine_check)
644 .long SYMBOL_NAME(do_simd_coprocessor_error)
646 ENTRY(hypercall_table)
647 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
648 .long SYMBOL_NAME(do_mmu_update)
649 .long SYMBOL_NAME(do_set_gdt)
650 .long SYMBOL_NAME(do_stack_switch)
651 .long SYMBOL_NAME(do_set_callbacks)
652 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
653 .long SYMBOL_NAME(do_sched_op)
654 .long SYMBOL_NAME(do_dom0_op)
655 .long SYMBOL_NAME(do_set_debugreg)
656 .long SYMBOL_NAME(do_get_debugreg)
657 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
658 .long SYMBOL_NAME(do_set_fast_trap)
659 .long SYMBOL_NAME(do_dom_mem_op)
660 .long SYMBOL_NAME(do_multicall)
661 .long SYMBOL_NAME(do_update_va_mapping)
662 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
663 .long SYMBOL_NAME(do_event_channel_op)
664 .long SYMBOL_NAME(do_xen_version)
665 .long SYMBOL_NAME(do_console_io)
666 .long SYMBOL_NAME(do_physdev_op)
667 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
668 .long SYMBOL_NAME(do_vm_assist)
669 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
670 .long SYMBOL_NAME(do_switch_vm86)
671 .long SYMBOL_NAME(do_boot_vcpu)
672 .rept NR_hypercalls-((.-hypercall_table)/4)
673 .long SYMBOL_NAME(do_ni_hypercall)
674 .endr