ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 57b3fdca5dae
children 99914b54f7bf
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_CURRENT(reg) \
65 movl $STACK_SIZE-4, reg; \
66 orl %esp, reg; \
67 andl $~3,reg; \
68 movl (reg),reg;
70 #ifdef CONFIG_VMX
71 /*
72 * At VMExit time the processor saves the guest selectors, esp, eip,
73 * and eflags. Therefore we don't save them, but simply decrement
74 * the kernel stack pointer to make it consistent with the stack frame
75 * at usual interruption time. The eflags of the host is not saved by VMX,
76 * and we set it to the fixed value.
77 *
78 * We also need the room, especially because orig_eax field is used
79 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
80 * (10) u32 gs;
81 * (9) u32 fs;
82 * (8) u32 ds;
83 * (7) u32 es;
84 * <- get_stack_bottom() (= HOST_ESP)
85 * (6) u32 ss;
86 * (5) u32 esp;
87 * (4) u32 eflags;
88 * (3) u32 cs;
89 * (2) u32 eip;
90 * (2/1) u16 entry_vector;
91 * (1/1) u16 error_code;
92 * However, get_stack_bottom() actually returns 20 bytes before the real
93 * bottom of the stack to allow space for:
94 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
95 */
96 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
97 #define NR_SKIPPED_REGS 6 /* See the above explanation */
98 #define VMX_SAVE_ALL_NOSEGREGS \
99 pushl $VMX_MONITOR_EFLAGS; \
100 popf; \
101 subl $(NR_SKIPPED_REGS*4), %esp; \
102 movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
103 pushl %eax; \
104 pushl %ebp; \
105 pushl %edi; \
106 pushl %esi; \
107 pushl %edx; \
108 pushl %ecx; \
109 pushl %ebx;
111 #define VMX_RESTORE_ALL_NOSEGREGS \
112 popl %ebx; \
113 popl %ecx; \
114 popl %edx; \
115 popl %esi; \
116 popl %edi; \
117 popl %ebp; \
118 popl %eax; \
119 addl $(NR_SKIPPED_REGS*4), %esp
121 ENTRY(vmx_asm_vmexit_handler)
122 /* selectors are restored/saved by VMX */
123 VMX_SAVE_ALL_NOSEGREGS
124 call vmx_vmexit_handler
125 jmp vmx_asm_do_resume
127 .macro vmx_asm_common launch initialized
128 1:
129 /* vmx_test_all_events */
130 .if \initialized
131 GET_CURRENT(%ebx)
132 /*test_all_events:*/
133 xorl %ecx,%ecx
134 notl %ecx
135 cli # tests must not race interrupts
136 /*test_softirqs:*/
137 movl VCPU_processor(%ebx),%eax
138 shl $IRQSTAT_shift,%eax
139 test %ecx,irq_stat(%eax,1)
140 jnz 2f
142 /* vmx_restore_all_guest */
143 call load_cr2
144 .endif
145 VMX_RESTORE_ALL_NOSEGREGS
146 /*
147 * Check if we are going back to VMX-based VM
148 * By this time, all the setups in the VMCS must be complete.
149 */
150 .if \launch
151 /* VMLUANCH */
152 .byte 0x0f,0x01,0xc2
153 pushf
154 call vm_launch_fail
155 .else
156 /* VMRESUME */
157 .byte 0x0f,0x01,0xc3
158 pushf
159 call vm_resume_fail
160 .endif
161 /* Should never reach here */
162 hlt
164 ALIGN
165 .if \initialized
166 2:
167 /* vmx_process_softirqs */
168 sti
169 call do_softirq
170 jmp 1b
171 ALIGN
172 .endif
173 .endm
175 ENTRY(vmx_asm_do_launch)
176 vmx_asm_common 1 0
178 ENTRY(vmx_asm_do_resume)
179 vmx_asm_common 0 1
181 ENTRY(vmx_asm_do_relaunch)
182 vmx_asm_common 1 1
184 #endif
186 ALIGN
187 restore_all_guest:
188 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
189 jnz restore_all_vm86
190 FLT1: mov UREGS_ds(%esp),%ds
191 FLT2: mov UREGS_es(%esp),%es
192 FLT3: mov UREGS_fs(%esp),%fs
193 FLT4: mov UREGS_gs(%esp),%gs
194 restore_all_vm86:
195 popl %ebx
196 popl %ecx
197 popl %edx
198 popl %esi
199 popl %edi
200 popl %ebp
201 popl %eax
202 addl $4,%esp
203 FLT5: iret
204 .section .fixup,"ax"
205 FIX5: subl $28,%esp
206 pushl 28(%esp) # error_code/entry_vector
207 movl %eax,UREGS_eax+4(%esp)
208 movl %ebp,UREGS_ebp+4(%esp)
209 movl %edi,UREGS_edi+4(%esp)
210 movl %esi,UREGS_esi+4(%esp)
211 movl %edx,UREGS_edx+4(%esp)
212 movl %ecx,UREGS_ecx+4(%esp)
213 movl %ebx,UREGS_ebx+4(%esp)
214 FIX1: SET_XEN_SEGMENTS(a)
215 movl %eax,%fs
216 movl %eax,%gs
217 sti
218 popl %esi
219 pushfl # EFLAGS
220 movl $__HYPERVISOR_CS,%eax
221 pushl %eax # CS
222 movl $DBLFLT1,%eax
223 pushl %eax # EIP
224 pushl %esi # error_code/entry_vector
225 jmp error_code
226 DBLFLT1:GET_CURRENT(%ebx)
227 jmp test_all_events
228 failsafe_callback:
229 GET_CURRENT(%ebx)
230 leal VCPU_trap_bounce(%ebx),%edx
231 movl VCPU_failsafe_addr(%ebx),%eax
232 movl %eax,TRAPBOUNCE_eip(%edx)
233 movl VCPU_failsafe_sel(%ebx),%eax
234 movw %ax,TRAPBOUNCE_cs(%edx)
235 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
236 call create_bounce_frame
237 xorl %eax,%eax
238 movl %eax,UREGS_ds(%esp)
239 movl %eax,UREGS_es(%esp)
240 movl %eax,UREGS_fs(%esp)
241 movl %eax,UREGS_gs(%esp)
242 jmp test_all_events
243 .previous
244 .section __pre_ex_table,"a"
245 .long FLT1,FIX1
246 .long FLT2,FIX1
247 .long FLT3,FIX1
248 .long FLT4,FIX1
249 .long FLT5,FIX5
250 .previous
251 .section __ex_table,"a"
252 .long DBLFLT1,failsafe_callback
253 .previous
255 ALIGN
256 restore_all_xen:
257 popl %ebx
258 popl %ecx
259 popl %edx
260 popl %esi
261 popl %edi
262 popl %ebp
263 popl %eax
264 addl $4,%esp
265 iret
267 ALIGN
268 ENTRY(hypercall)
269 subl $4,%esp
270 SAVE_ALL(b)
271 sti
272 GET_CURRENT(%ebx)
273 andl $(NR_hypercalls-1),%eax
274 PERFC_INCR(PERFC_hypercalls, %eax)
275 call *hypercall_table(,%eax,4)
276 movl %eax,UREGS_eax(%esp) # save the return value
278 test_all_events:
279 xorl %ecx,%ecx
280 notl %ecx
281 cli # tests must not race interrupts
282 /*test_softirqs:*/
283 movl VCPU_processor(%ebx),%eax
284 shl $IRQSTAT_shift,%eax
285 test %ecx,irq_stat(%eax,1)
286 jnz process_softirqs
287 /*test_guest_events:*/
288 movl VCPU_vcpu_info(%ebx),%eax
289 testb $0xFF,VCPUINFO_upcall_mask(%eax)
290 jnz restore_all_guest
291 testb $0xFF,VCPUINFO_upcall_pending(%eax)
292 jz restore_all_guest
293 /*process_guest_events:*/
294 sti
295 leal VCPU_trap_bounce(%ebx),%edx
296 movl VCPU_event_addr(%ebx),%eax
297 movl %eax,TRAPBOUNCE_eip(%edx)
298 movl VCPU_event_sel(%ebx),%eax
299 movw %ax,TRAPBOUNCE_cs(%edx)
300 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
301 call create_bounce_frame
302 jmp test_all_events
304 ALIGN
305 process_softirqs:
306 sti
307 call do_softirq
308 jmp test_all_events
310 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
311 /* {EIP, CS, EFLAGS, [ESP, SS]} */
312 /* %edx == trap_bounce, %ebx == struct vcpu */
313 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
314 create_bounce_frame:
315 movl UREGS_eflags+4(%esp),%ecx
316 movb UREGS_cs+4(%esp),%cl
317 testl $(2|X86_EFLAGS_VM),%ecx
318 jz ring1 /* jump if returning to an existing ring-1 activation */
319 movl VCPU_kernel_sp(%ebx),%esi
320 FLT6: mov VCPU_kernel_ss(%ebx),%gs
321 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
322 jz nvm86_1
323 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
324 movl UREGS_es+4(%esp),%eax
325 FLT7: movl %eax,%gs:(%esi)
326 movl UREGS_ds+4(%esp),%eax
327 FLT8: movl %eax,%gs:4(%esi)
328 movl UREGS_fs+4(%esp),%eax
329 FLT9: movl %eax,%gs:8(%esi)
330 movl UREGS_gs+4(%esp),%eax
331 FLT10: movl %eax,%gs:12(%esi)
332 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
333 movl UREGS_esp+4(%esp),%eax
334 FLT11: movl %eax,%gs:(%esi)
335 movl UREGS_ss+4(%esp),%eax
336 FLT12: movl %eax,%gs:4(%esi)
337 jmp 1f
338 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
339 movl UREGS_esp+4(%esp),%esi
340 FLT13: mov UREGS_ss+4(%esp),%gs
341 1: /* Construct a stack frame: EFLAGS, CS/EIP */
342 movb TRAPBOUNCE_flags(%edx),%cl
343 subl $12,%esi
344 movl UREGS_eip+4(%esp),%eax
345 FLT14: movl %eax,%gs:(%esi)
346 movl VCPU_vcpu_info(%ebx),%eax
347 pushl VCPUINFO_upcall_mask(%eax)
348 testb $TBF_INTERRUPT,%cl
349 setnz %ch # TBF_INTERRUPT -> set upcall mask
350 orb %ch,VCPUINFO_upcall_mask(%eax)
351 popl %eax
352 shll $16,%eax # Bits 16-23: saved_upcall_mask
353 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
354 FLT15: movl %eax,%gs:4(%esi)
355 movl UREGS_eflags+4(%esp),%eax
356 FLT16: movl %eax,%gs:8(%esi)
357 test $TBF_EXCEPTION_ERRCODE,%cl
358 jz 1f
359 subl $4,%esi # push error_code onto guest frame
360 movl TRAPBOUNCE_error_code(%edx),%eax
361 FLT17: movl %eax,%gs:(%esi)
362 testb $TBF_EXCEPTION_CR2,%cl
363 jz 2f
364 subl $4,%esi # push %cr2 onto guest frame
365 movl TRAPBOUNCE_cr2(%edx),%eax
366 FLT18: movl %eax,%gs:(%esi)
367 1: testb $TBF_FAILSAFE,%cl
368 jz 2f
369 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
370 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
371 jz nvm86_2
372 xorl %eax,%eax # VM86: we write zero selector values
373 FLT19: movl %eax,%gs:(%esi)
374 FLT20: movl %eax,%gs:4(%esi)
375 FLT21: movl %eax,%gs:8(%esi)
376 FLT22: movl %eax,%gs:12(%esi)
377 jmp 2f
378 nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
379 FLT23: movl %eax,%gs:(%esi)
380 movl UREGS_es+4(%esp),%eax
381 FLT24: movl %eax,%gs:4(%esi)
382 movl UREGS_fs+4(%esp),%eax
383 FLT25: movl %eax,%gs:8(%esi)
384 movl UREGS_gs+4(%esp),%eax
385 FLT26: movl %eax,%gs:12(%esi)
386 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
387 jz nvm86_3
388 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
389 movl %eax,UREGS_ds+4(%esp)
390 movl %eax,UREGS_es+4(%esp)
391 movl %eax,UREGS_fs+4(%esp)
392 movl %eax,UREGS_gs+4(%esp)
393 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
394 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
395 andl $0xfffcbeff,UREGS_eflags+4(%esp)
396 mov %gs,UREGS_ss+4(%esp)
397 movl %esi,UREGS_esp+4(%esp)
398 movzwl TRAPBOUNCE_cs(%edx),%eax
399 movl %eax,UREGS_cs+4(%esp)
400 movl TRAPBOUNCE_eip(%edx),%eax
401 test %eax,%eax
402 jz domain_crash_synchronous
403 movl %eax,UREGS_eip+4(%esp)
404 movb $0,TRAPBOUNCE_flags(%edx)
405 ret
406 .section __ex_table,"a"
407 .long FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
408 .long FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
409 .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
410 .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
411 .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
412 .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
413 .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
414 .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
415 .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
416 .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
417 .long FLT26,domain_crash_synchronous
418 .previous
420 ALIGN
421 process_guest_exception_and_events:
422 leal VCPU_trap_bounce(%ebx),%edx
423 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
424 jz test_all_events
425 call create_bounce_frame
426 jmp test_all_events
428 ALIGN
429 ENTRY(ret_from_intr)
430 GET_CURRENT(%ebx)
431 movl UREGS_eflags(%esp),%eax
432 movb UREGS_cs(%esp),%al
433 testl $(3|X86_EFLAGS_VM),%eax
434 jnz test_all_events
435 jmp restore_all_xen
437 ENTRY(divide_error)
438 pushl $TRAP_divide_error<<16
439 ALIGN
440 error_code:
441 SAVE_ALL_NOSEGREGS(a)
442 SET_XEN_SEGMENTS(a)
443 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
444 jz exception_with_ints_disabled
445 sti # re-enable interrupts
446 xorl %eax,%eax
447 movw UREGS_entry_vector(%esp),%ax
448 movl %esp,%edx
449 pushl %edx # push the cpu_user_regs pointer
450 GET_CURRENT(%ebx)
451 PERFC_INCR(PERFC_exceptions, %eax)
452 call *exception_table(,%eax,4)
453 addl $4,%esp
454 movl UREGS_eflags(%esp),%eax
455 movb UREGS_cs(%esp),%al
456 testl $(3|X86_EFLAGS_VM),%eax
457 jz restore_all_xen
458 jmp process_guest_exception_and_events
460 exception_with_ints_disabled:
461 movl UREGS_eflags(%esp),%eax
462 movb UREGS_cs(%esp),%al
463 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
464 jnz FATAL_exception_with_ints_disabled
465 pushl %esp
466 call search_pre_exception_table
467 addl $4,%esp
468 testl %eax,%eax # no fixup code for faulting EIP?
469 jz FATAL_exception_with_ints_disabled
470 movl %eax,UREGS_eip(%esp)
471 movl %esp,%esi
472 subl $4,%esp
473 movl %esp,%edi
474 movl $UREGS_kernel_sizeof/4,%ecx
475 rep; movsl # make room for error_code/entry_vector
476 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
477 movl %eax,UREGS_kernel_sizeof(%esp)
478 jmp restore_all_xen # return to fixup code
480 FATAL_exception_with_ints_disabled:
481 xorl %esi,%esi
482 movw UREGS_entry_vector(%esp),%si
483 movl %esp,%edx
484 pushl %edx # push the cpu_user_regs pointer
485 pushl %esi # push the trapnr (entry vector)
486 call fatal_trap
487 ud2
489 ENTRY(coprocessor_error)
490 pushl $TRAP_copro_error<<16
491 jmp error_code
493 ENTRY(simd_coprocessor_error)
494 pushl $TRAP_simd_error<<16
495 jmp error_code
497 ENTRY(device_not_available)
498 pushl $TRAP_no_device<<16
499 jmp error_code
501 ENTRY(debug)
502 pushl $TRAP_debug<<16
503 jmp error_code
505 ENTRY(int3)
506 pushl $TRAP_int3<<16
507 jmp error_code
509 ENTRY(overflow)
510 pushl $TRAP_overflow<<16
511 jmp error_code
513 ENTRY(bounds)
514 pushl $TRAP_bounds<<16
515 jmp error_code
517 ENTRY(invalid_op)
518 pushl $TRAP_invalid_op<<16
519 jmp error_code
521 ENTRY(coprocessor_segment_overrun)
522 pushl $TRAP_copro_seg<<16
523 jmp error_code
525 ENTRY(invalid_TSS)
526 movw $TRAP_invalid_tss,2(%esp)
527 jmp error_code
529 ENTRY(segment_not_present)
530 movw $TRAP_no_segment,2(%esp)
531 jmp error_code
533 ENTRY(stack_segment)
534 movw $TRAP_stack_error,2(%esp)
535 jmp error_code
537 ENTRY(general_protection)
538 movw $TRAP_gp_fault,2(%esp)
539 jmp error_code
541 ENTRY(alignment_check)
542 movw $TRAP_alignment_check,2(%esp)
543 jmp error_code
545 ENTRY(page_fault)
546 movw $TRAP_page_fault,2(%esp)
547 jmp error_code
549 ENTRY(machine_check)
550 pushl $TRAP_machine_check<<16
551 jmp error_code
553 ENTRY(spurious_interrupt_bug)
554 pushl $TRAP_spurious_int<<16
555 jmp error_code
557 ENTRY(nmi)
558 # Save state but do not trash the segment registers!
559 # We may otherwise be unable to reload them or copy them to ring 1.
560 pushl %eax
561 SAVE_ALL_NOSEGREGS(a)
563 # Check for hardware problems.
564 inb $0x61,%al
565 testb $0x80,%al
566 jne nmi_parity_err
567 testb $0x40,%al
568 jne nmi_io_err
569 movl %eax,%ebx
571 # Okay, its almost a normal NMI tick. We can only process it if:
572 # A. We are the outermost Xen activation (in which case we have
573 # the selectors safely saved on our stack)
574 # B. DS and ES contain sane Xen values.
575 # In all other cases we bail without touching DS-GS, as we have
576 # interrupted an enclosing Xen activation in tricky prologue or
577 # epilogue code.
578 movl UREGS_eflags(%esp),%eax
579 movb UREGS_cs(%esp),%al
580 testl $(3|X86_EFLAGS_VM),%eax
581 jnz do_watchdog_tick
582 movl %ds,%eax
583 cmpw $(__HYPERVISOR_DS),%ax
584 jne defer_nmi
585 movl %es,%eax
586 cmpw $(__HYPERVISOR_DS),%ax
587 jne defer_nmi
589 do_watchdog_tick:
590 movl $(__HYPERVISOR_DS),%edx
591 movl %edx,%ds
592 movl %edx,%es
593 movl %esp,%edx
594 pushl %ebx # reason
595 pushl %edx # regs
596 call do_nmi
597 addl $8,%esp
598 jmp ret_from_intr
600 defer_nmi:
601 movl $FIXMAP_apic_base,%eax
602 # apic_wait_icr_idle()
603 1: movl %ss:APIC_ICR(%eax),%ebx
604 testl $APIC_ICR_BUSY,%ebx
605 jnz 1b
606 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
607 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
608 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
609 jmp restore_all_xen
611 nmi_parity_err:
612 # Clear and disable the parity-error line
613 andb $0xf,%al
614 orb $0x4,%al
615 outb %al,$0x61
616 cmpb $'i',%ss:opt_nmi # nmi=ignore
617 je nmi_out
618 bts $0,%ss:nmi_softirq_reason
619 bts $NMI_SOFTIRQ,%ss:irq_stat
620 cmpb $'d',%ss:opt_nmi # nmi=dom0
621 je nmi_out
622 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
623 movl %edx,%ds
624 movl %edx,%es
625 movl %esp,%edx
626 push %edx
627 call mem_parity_error
628 addl $4,%esp
629 nmi_out:movl %ss:UREGS_eflags(%esp),%eax
630 movb %ss:UREGS_cs(%esp),%al
631 testl $(3|X86_EFLAGS_VM),%eax
632 jz restore_all_xen
633 movl $(__HYPERVISOR_DS),%edx
634 movl %edx,%ds
635 movl %edx,%es
636 GET_CURRENT(%ebx)
637 jmp test_all_events
639 nmi_io_err:
640 # Clear and disable the I/O-error line
641 andb $0xf,%al
642 orb $0x8,%al
643 outb %al,$0x61
644 cmpb $'i',%ss:opt_nmi # nmi=ignore
645 je nmi_out
646 bts $1,%ss:nmi_softirq_reason
647 bts $NMI_SOFTIRQ,%ss:irq_stat
648 cmpb $'d',%ss:opt_nmi # nmi=dom0
649 je nmi_out
650 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
651 movl %edx,%ds
652 movl %edx,%es
653 movl %esp,%edx
654 push %edx
655 call io_check_error
656 addl $4,%esp
657 jmp nmi_out
660 ENTRY(setup_vm86_frame)
661 # Copies the entire stack frame forwards by 16 bytes.
662 .macro copy_vm86_words count=18
663 .if \count
664 pushl ((\count-1)*4)(%esp)
665 popl ((\count-1)*4)+16(%esp)
666 copy_vm86_words "(\count-1)"
667 .endif
668 .endm
669 copy_vm86_words
670 addl $16,%esp
671 ret
673 do_arch_sched_op:
674 # Ensure we return success even if we return via schedule_tail()
675 xorl %eax,%eax
676 movl %eax,UREGS_eax+4(%esp)
677 jmp do_sched_op
679 do_switch_vm86:
680 # Discard the return address
681 addl $4,%esp
683 # GS:ESI == Ring-1 stack activation
684 movl UREGS_esp(%esp),%esi
685 VFLT1: mov UREGS_ss(%esp),%gs
687 # ES:EDI == Ring-0 stack activation
688 leal UREGS_eip(%esp),%edi
690 # Restore the hypercall-number-clobbered EAX on our stack frame
691 VFLT2: movl %gs:(%esi),%eax
692 movl %eax,UREGS_eax(%esp)
693 addl $4,%esi
695 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
696 movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
697 VFLT3: movl %gs:(%esi),%eax
698 stosl
699 addl $4,%esi
700 loop VFLT3
702 # Fix up EFLAGS: IOPL=0, IF=1, VM=1
703 andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
704 orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
706 jmp test_all_events
708 .section __ex_table,"a"
709 .long VFLT1,domain_crash_synchronous
710 .long VFLT2,domain_crash_synchronous
711 .long VFLT3,domain_crash_synchronous
712 .previous
714 .data
716 ENTRY(exception_table)
717 .long do_divide_error
718 .long do_debug
719 .long 0 # nmi
720 .long do_int3
721 .long do_overflow
722 .long do_bounds
723 .long do_invalid_op
724 .long math_state_restore
725 .long 0 # double fault
726 .long do_coprocessor_segment_overrun
727 .long do_invalid_TSS
728 .long do_segment_not_present
729 .long do_stack_segment
730 .long do_general_protection
731 .long do_page_fault
732 .long do_spurious_interrupt_bug
733 .long do_coprocessor_error
734 .long do_alignment_check
735 .long do_machine_check
736 .long do_simd_coprocessor_error
738 ENTRY(hypercall_table)
739 .long do_set_trap_table /* 0 */
740 .long do_mmu_update
741 .long do_set_gdt
742 .long do_stack_switch
743 .long do_set_callbacks
744 .long do_fpu_taskswitch /* 5 */
745 .long do_arch_sched_op
746 .long do_dom0_op
747 .long do_set_debugreg
748 .long do_get_debugreg
749 .long do_update_descriptor /* 10 */
750 .long do_ni_hypercall
751 .long do_dom_mem_op
752 .long do_multicall
753 .long do_update_va_mapping
754 .long do_set_timer_op /* 15 */
755 .long do_event_channel_op
756 .long do_xen_version
757 .long do_console_io
758 .long do_physdev_op
759 .long do_grant_table_op /* 20 */
760 .long do_vm_assist
761 .long do_update_va_mapping_otherdomain
762 .long do_switch_vm86
763 .long do_boot_vcpu
764 .long do_ni_hypercall /* 25 */
765 .long do_mmuext_op
766 .long do_acm_op /* 27 */
767 .rept NR_hypercalls-((.-hypercall_table)/4)
768 .long do_ni_hypercall
769 .endr