ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 4696:e686528abbfc

bitkeeper revision 1.1389.3.1 (42714dabVSywx2XWGjgw2J54ZylwYg)

Ensure block/yield hypercalls always return a sane return code.

Ensure callers of __enter_scheduler take appropriate arch-specific
action if no context switch occurs (callers from arch/x86 do not
expect to return from a call into the scheduler).

This fixes wildly unintuitive behaviour of do_block() for the
VMX team.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 28 20:55:07 2005 +0000 (2005-04-28)
parents 38a02ee9a9c8
children 123bd8c4b408
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_CURRENT(reg) \
65 movl $STACK_SIZE-4, reg; \
66 orl %esp, reg; \
67 andl $~3,reg; \
68 movl (reg),reg;
70 #ifdef CONFIG_VMX
71 /*
72 * At VMExit time the processor saves the guest selectors, esp, eip,
73 * and eflags. Therefore we don't save them, but simply decrement
74 * the kernel stack pointer to make it consistent with the stack frame
75 * at usual interruption time. The eflags of the host is not saved by VMX,
76 * and we set it to the fixed value.
77 *
78 * We also need the room, especially because orig_eax field is used
79 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
80 * (10) u32 gs;
81 * (9) u32 fs;
82 * (8) u32 ds;
83 * (7) u32 es;
84 * <- get_stack_bottom() (= HOST_ESP)
85 * (6) u32 ss;
86 * (5) u32 esp;
87 * (4) u32 eflags;
88 * (3) u32 cs;
89 * (2) u32 eip;
90 * (2/1) u16 entry_vector;
91 * (1/1) u16 error_code;
92 * However, get_stack_bottom() actually returns 20 bytes before the real
93 * bottom of the stack to allow space for:
94 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
95 */
96 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
97 #define NR_SKIPPED_REGS 6 /* See the above explanation */
98 #define VMX_SAVE_ALL_NOSEGREGS \
99 pushl $VMX_MONITOR_EFLAGS; \
100 popf; \
101 subl $(NR_SKIPPED_REGS*4), %esp; \
102 movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
103 pushl %eax; \
104 pushl %ebp; \
105 pushl %edi; \
106 pushl %esi; \
107 pushl %edx; \
108 pushl %ecx; \
109 pushl %ebx;
111 ENTRY(vmx_asm_vmexit_handler)
112 /* selectors are restored/saved by VMX */
113 VMX_SAVE_ALL_NOSEGREGS
114 call SYMBOL_NAME(vmx_vmexit_handler)
115 jmp vmx_asm_do_resume
117 ENTRY(vmx_asm_do_launch)
118 popl %ebx
119 popl %ecx
120 popl %edx
121 popl %esi
122 popl %edi
123 popl %ebp
124 popl %eax
125 addl $(NR_SKIPPED_REGS*4), %esp
126 /* VMLUANCH */
127 .byte 0x0f,0x01,0xc2
128 pushf
129 call SYMBOL_NAME(vm_launch_fail)
130 hlt
132 ALIGN
134 ENTRY(vmx_asm_do_resume)
135 vmx_test_all_events:
136 GET_CURRENT(%ebx)
137 /*test_all_events:*/
138 xorl %ecx,%ecx
139 notl %ecx
140 cli # tests must not race interrupts
141 /*test_softirqs:*/
142 movl EDOMAIN_processor(%ebx),%eax
143 shl $IRQSTAT_shift,%eax
144 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
145 jnz vmx_process_softirqs
147 vmx_restore_all_guest:
148 call SYMBOL_NAME(load_cr2)
149 /*
150 * Check if we are going back to VMX-based VM
151 * By this time, all the setups in the VMCS must be complete.
152 */
153 popl %ebx
154 popl %ecx
155 popl %edx
156 popl %esi
157 popl %edi
158 popl %ebp
159 popl %eax
160 addl $(NR_SKIPPED_REGS*4), %esp
161 /* VMRESUME */
162 .byte 0x0f,0x01,0xc3
163 pushf
164 call SYMBOL_NAME(vm_resume_fail)
165 /* Should never reach here */
166 hlt
168 ALIGN
169 vmx_process_softirqs:
170 sti
171 call SYMBOL_NAME(do_softirq)
172 jmp vmx_test_all_events
173 #endif
175 ALIGN
176 restore_all_guest:
177 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
178 jnz restore_all_vm86
179 FLT1: movl UREGS_ds(%esp),%ds
180 FLT2: movl UREGS_es(%esp),%es
181 FLT3: movl UREGS_fs(%esp),%fs
182 FLT4: movl UREGS_gs(%esp),%gs
183 restore_all_vm86:
184 popl %ebx
185 popl %ecx
186 popl %edx
187 popl %esi
188 popl %edi
189 popl %ebp
190 popl %eax
191 addl $4,%esp
192 FLT5: iret
193 .section .fixup,"ax"
194 FIX5: subl $28,%esp
195 pushl 28(%esp) # error_code/entry_vector
196 movl %eax,UREGS_eax+4(%esp)
197 movl %ebp,UREGS_ebp+4(%esp)
198 movl %edi,UREGS_edi+4(%esp)
199 movl %esi,UREGS_esi+4(%esp)
200 movl %edx,UREGS_edx+4(%esp)
201 movl %ecx,UREGS_ecx+4(%esp)
202 movl %ebx,UREGS_ebx+4(%esp)
203 FIX1: SET_XEN_SEGMENTS(a)
204 movl %eax,%fs
205 movl %eax,%gs
206 sti
207 popl %esi
208 pushfl # EFLAGS
209 movl $__HYPERVISOR_CS,%eax
210 pushl %eax # CS
211 movl $DBLFLT1,%eax
212 pushl %eax # EIP
213 pushl %esi # error_code/entry_vector
214 jmp error_code
215 DBLFLT1:GET_CURRENT(%ebx)
216 jmp test_all_events
217 failsafe_callback:
218 GET_CURRENT(%ebx)
219 leal EDOMAIN_trap_bounce(%ebx),%edx
220 movl EDOMAIN_failsafe_addr(%ebx),%eax
221 movl %eax,TRAPBOUNCE_eip(%edx)
222 movl EDOMAIN_failsafe_sel(%ebx),%eax
223 movw %ax,TRAPBOUNCE_cs(%edx)
224 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
225 call create_bounce_frame
226 xorl %eax,%eax
227 movl %eax,UREGS_ds(%esp)
228 movl %eax,UREGS_es(%esp)
229 movl %eax,UREGS_fs(%esp)
230 movl %eax,UREGS_gs(%esp)
231 jmp test_all_events
232 .previous
233 .section __pre_ex_table,"a"
234 .long FLT1,FIX1
235 .long FLT2,FIX1
236 .long FLT3,FIX1
237 .long FLT4,FIX1
238 .long FLT5,FIX5
239 .previous
240 .section __ex_table,"a"
241 .long DBLFLT1,failsafe_callback
242 .previous
244 ALIGN
245 restore_all_xen:
246 popl %ebx
247 popl %ecx
248 popl %edx
249 popl %esi
250 popl %edi
251 popl %ebp
252 popl %eax
253 addl $4,%esp
254 iret
256 ALIGN
257 ENTRY(hypercall)
258 subl $4,%esp
259 SAVE_ALL(b)
260 sti
261 GET_CURRENT(%ebx)
262 andl $(NR_hypercalls-1),%eax
263 PERFC_INCR(PERFC_hypercalls, %eax)
264 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
265 movl %eax,UREGS_eax(%esp) # save the return value
267 test_all_events:
268 xorl %ecx,%ecx
269 notl %ecx
270 cli # tests must not race interrupts
271 /*test_softirqs:*/
272 movl EDOMAIN_processor(%ebx),%eax
273 shl $IRQSTAT_shift,%eax
274 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
275 jnz process_softirqs
276 /*test_guest_events:*/
277 movl EDOMAIN_vcpu_info(%ebx),%eax
278 testb $0xFF,VCPUINFO_upcall_mask(%eax)
279 jnz restore_all_guest
280 testb $0xFF,VCPUINFO_upcall_pending(%eax)
281 jz restore_all_guest
282 /*process_guest_events:*/
283 sti
284 leal EDOMAIN_trap_bounce(%ebx),%edx
285 movl EDOMAIN_event_addr(%ebx),%eax
286 movl %eax,TRAPBOUNCE_eip(%edx)
287 movl EDOMAIN_event_sel(%ebx),%eax
288 movw %ax,TRAPBOUNCE_cs(%edx)
289 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
290 call create_bounce_frame
291 movl EDOMAIN_vcpu_info(%ebx),%eax
292 movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
293 jmp test_all_events
295 ALIGN
296 process_softirqs:
297 sti
298 call SYMBOL_NAME(do_softirq)
299 jmp test_all_events
301 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
302 /* {EIP, CS, EFLAGS, [ESP, SS]} */
303 /* %edx == trap_bounce, %ebx == struct exec_domain */
304 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
305 create_bounce_frame:
306 movl UREGS_eflags+4(%esp),%ecx
307 movb UREGS_cs+4(%esp),%cl
308 testl $(2|X86_EFLAGS_VM),%ecx
309 jz ring1 /* jump if returning to an existing ring-1 activation */
310 movl EDOMAIN_kernel_sp(%ebx),%esi
311 FLT6: movl EDOMAIN_kernel_ss(%ebx),%gs
312 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
313 jz nvm86_1
314 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
315 movl UREGS_es+4(%esp),%eax
316 FLT7: movl %eax,%gs:(%esi)
317 movl UREGS_ds+4(%esp),%eax
318 FLT8: movl %eax,%gs:4(%esi)
319 movl UREGS_fs+4(%esp),%eax
320 FLT9: movl %eax,%gs:8(%esi)
321 movl UREGS_gs+4(%esp),%eax
322 FLT10: movl %eax,%gs:12(%esi)
323 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
324 movl UREGS_esp+4(%esp),%eax
325 FLT11: movl %eax,%gs:(%esi)
326 movl UREGS_ss+4(%esp),%eax
327 FLT12: movl %eax,%gs:4(%esi)
328 jmp 1f
329 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
330 movl UREGS_esp+4(%esp),%esi
331 FLT13: movl UREGS_ss+4(%esp),%gs
332 1: /* Construct a stack frame: EFLAGS, CS/EIP */
333 subl $12,%esi
334 movl UREGS_eip+4(%esp),%eax
335 FLT14: movl %eax,%gs:(%esi)
336 movl UREGS_cs+4(%esp),%eax
337 FLT15: movl %eax,%gs:4(%esi)
338 movl UREGS_eflags+4(%esp),%eax
339 FLT16: movl %eax,%gs:8(%esi)
340 movb TRAPBOUNCE_flags(%edx),%cl
341 test $TBF_EXCEPTION_ERRCODE,%cl
342 jz 1f
343 subl $4,%esi # push error_code onto guest frame
344 movl TRAPBOUNCE_error_code(%edx),%eax
345 FLT17: movl %eax,%gs:(%esi)
346 testb $TBF_EXCEPTION_CR2,%cl
347 jz 2f
348 subl $4,%esi # push %cr2 onto guest frame
349 movl TRAPBOUNCE_cr2(%edx),%eax
350 FLT18: movl %eax,%gs:(%esi)
351 1: testb $TBF_FAILSAFE,%cl
352 jz 2f
353 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
354 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
355 jz nvm86_2
356 xorl %eax,%eax # VM86: we write zero selector values
357 FLT19: movl %eax,%gs:(%esi)
358 FLT20: movl %eax,%gs:4(%esi)
359 FLT21: movl %eax,%gs:8(%esi)
360 FLT22: movl %eax,%gs:12(%esi)
361 jmp 2f
362 nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
363 FLT23: movl %eax,%gs:(%esi)
364 movl UREGS_es+4(%esp),%eax
365 FLT24: movl %eax,%gs:4(%esi)
366 movl UREGS_fs+4(%esp),%eax
367 FLT25: movl %eax,%gs:8(%esi)
368 movl UREGS_gs+4(%esp),%eax
369 FLT26: movl %eax,%gs:12(%esi)
370 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
371 jz nvm86_3
372 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
373 movl %eax,UREGS_ds+4(%esp)
374 movl %eax,UREGS_es+4(%esp)
375 movl %eax,UREGS_fs+4(%esp)
376 movl %eax,UREGS_gs+4(%esp)
377 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
378 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
379 andl $0xfffcbeff,UREGS_eflags+4(%esp)
380 movl %gs,UREGS_ss+4(%esp)
381 movl %esi,UREGS_esp+4(%esp)
382 movzwl TRAPBOUNCE_cs(%edx),%eax
383 movl %eax,UREGS_cs+4(%esp)
384 movl TRAPBOUNCE_eip(%edx),%eax
385 movl %eax,UREGS_eip+4(%esp)
386 movb $0,TRAPBOUNCE_flags(%edx)
387 ret
388 .section __ex_table,"a"
389 .long FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
390 .long FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
391 .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
392 .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
393 .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
394 .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
395 .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
396 .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
397 .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
398 .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
399 .long FLT26,domain_crash_synchronous
400 .previous
402 ALIGN
403 process_guest_exception_and_events:
404 leal EDOMAIN_trap_bounce(%ebx),%edx
405 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
406 jz test_all_events
407 call create_bounce_frame
408 jmp test_all_events
410 ALIGN
411 ENTRY(ret_from_intr)
412 GET_CURRENT(%ebx)
413 movl UREGS_eflags(%esp),%eax
414 movb UREGS_cs(%esp),%al
415 testl $(3|X86_EFLAGS_VM),%eax
416 jnz test_all_events
417 jmp restore_all_xen
419 ENTRY(divide_error)
420 pushl $TRAP_divide_error<<16
421 ALIGN
422 error_code:
423 SAVE_ALL_NOSEGREGS(a)
424 SET_XEN_SEGMENTS(a)
425 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
426 jz exception_with_ints_disabled
427 sti # re-enable interrupts
428 xorl %eax,%eax
429 movw UREGS_entry_vector(%esp),%ax
430 movl %esp,%edx
431 pushl %edx # push the cpu_user_regs pointer
432 GET_CURRENT(%ebx)
433 PERFC_INCR(PERFC_exceptions, %eax)
434 call *SYMBOL_NAME(exception_table)(,%eax,4)
435 addl $4,%esp
436 movl UREGS_eflags(%esp),%eax
437 movb UREGS_cs(%esp),%al
438 testl $(3|X86_EFLAGS_VM),%eax
439 jz restore_all_xen
440 jmp process_guest_exception_and_events
442 exception_with_ints_disabled:
443 movl UREGS_eflags(%esp),%eax
444 movb UREGS_cs(%esp),%al
445 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
446 jnz FATAL_exception_with_ints_disabled
447 pushl %esp
448 call search_pre_exception_table
449 addl $4,%esp
450 testl %eax,%eax # no fixup code for faulting EIP?
451 jz FATAL_exception_with_ints_disabled
452 movl %eax,UREGS_eip(%esp)
453 movl %esp,%esi
454 subl $4,%esp
455 movl %esp,%edi
456 movl $UREGS_kernel_sizeof/4,%ecx
457 rep; movsl # make room for error_code/entry_vector
458 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
459 movl %eax,UREGS_kernel_sizeof(%esp)
460 jmp restore_all_xen # return to fixup code
462 FATAL_exception_with_ints_disabled:
463 xorl %esi,%esi
464 movw UREGS_entry_vector(%esp),%si
465 movl %esp,%edx
466 pushl %edx # push the cpu_user_regs pointer
467 pushl %esi # push the trapnr (entry vector)
468 call SYMBOL_NAME(fatal_trap)
469 ud2
471 ENTRY(coprocessor_error)
472 pushl $TRAP_copro_error<<16
473 jmp error_code
475 ENTRY(simd_coprocessor_error)
476 pushl $TRAP_simd_error<<16
477 jmp error_code
479 ENTRY(device_not_available)
480 pushl $TRAP_no_device<<16
481 jmp error_code
483 ENTRY(debug)
484 pushl $TRAP_debug<<16
485 jmp error_code
487 ENTRY(int3)
488 pushl $TRAP_int3<<16
489 jmp error_code
491 ENTRY(overflow)
492 pushl $TRAP_overflow<<16
493 jmp error_code
495 ENTRY(bounds)
496 pushl $TRAP_bounds<<16
497 jmp error_code
499 ENTRY(invalid_op)
500 pushl $TRAP_invalid_op<<16
501 jmp error_code
503 ENTRY(coprocessor_segment_overrun)
504 pushl $TRAP_copro_seg<<16
505 jmp error_code
507 ENTRY(invalid_TSS)
508 movw $TRAP_invalid_tss,2(%esp)
509 jmp error_code
511 ENTRY(segment_not_present)
512 movw $TRAP_no_segment,2(%esp)
513 jmp error_code
515 ENTRY(stack_segment)
516 movw $TRAP_stack_error,2(%esp)
517 jmp error_code
519 ENTRY(general_protection)
520 movw $TRAP_gp_fault,2(%esp)
521 jmp error_code
523 ENTRY(alignment_check)
524 movw $TRAP_alignment_check,2(%esp)
525 jmp error_code
527 ENTRY(page_fault)
528 movw $TRAP_page_fault,2(%esp)
529 jmp error_code
531 ENTRY(machine_check)
532 pushl $TRAP_machine_check<<16
533 jmp error_code
535 ENTRY(spurious_interrupt_bug)
536 pushl $TRAP_spurious_int<<16
537 jmp error_code
539 ENTRY(nmi)
540 # Save state but do not trash the segment registers!
541 # We may otherwise be unable to reload them or copy them to ring 1.
542 pushl %eax
543 SAVE_ALL_NOSEGREGS(a)
545 # Check for hardware problems.
546 inb $0x61,%al
547 testb $0x80,%al
548 jne nmi_parity_err
549 testb $0x40,%al
550 jne nmi_io_err
551 movl %eax,%ebx
553 # Okay, its almost a normal NMI tick. We can only process it if:
554 # A. We are the outermost Xen activation (in which case we have
555 # the selectors safely saved on our stack)
556 # B. DS and ES contain sane Xen values.
557 # In all other cases we bail without touching DS-GS, as we have
558 # interrupted an enclosing Xen activation in tricky prologue or
559 # epilogue code.
560 movl UREGS_eflags(%esp),%eax
561 movb UREGS_cs(%esp),%al
562 testl $(3|X86_EFLAGS_VM),%eax
563 jnz do_watchdog_tick
564 movl %ds,%eax
565 cmpw $(__HYPERVISOR_DS),%ax
566 jne defer_nmi
567 movl %es,%eax
568 cmpw $(__HYPERVISOR_DS),%ax
569 jne defer_nmi
571 do_watchdog_tick:
572 movl $(__HYPERVISOR_DS),%edx
573 movl %edx,%ds
574 movl %edx,%es
575 movl %esp,%edx
576 pushl %ebx # reason
577 pushl %edx # regs
578 call SYMBOL_NAME(do_nmi)
579 addl $8,%esp
580 jmp ret_from_intr
582 defer_nmi:
583 movl $FIXMAP_apic_base,%eax
584 # apic_wait_icr_idle()
585 1: movl %ss:APIC_ICR(%eax),%ebx
586 testl $APIC_ICR_BUSY,%ebx
587 jnz 1b
588 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
589 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
590 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
591 jmp restore_all_xen
593 nmi_parity_err:
594 # Clear and disable the parity-error line
595 andb $0xf,%al
596 orb $0x4,%al
597 outb %al,$0x61
598 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
599 je nmi_out
600 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
601 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
602 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
603 je nmi_out
604 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
605 movl %edx,%ds
606 movl %edx,%es
607 movl %esp,%edx
608 push %edx
609 call SYMBOL_NAME(mem_parity_error)
610 addl $4,%esp
611 nmi_out:movl %ss:UREGS_eflags(%esp),%eax
612 movb %ss:UREGS_cs(%esp),%al
613 testl $(3|X86_EFLAGS_VM),%eax
614 jz restore_all_xen
615 movl $(__HYPERVISOR_DS),%edx
616 movl %edx,%ds
617 movl %edx,%es
618 GET_CURRENT(%ebx)
619 jmp test_all_events
621 nmi_io_err:
622 # Clear and disable the I/O-error line
623 andb $0xf,%al
624 orb $0x8,%al
625 outb %al,$0x61
626 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
627 je nmi_out
628 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
629 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
630 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
631 je nmi_out
632 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
633 movl %edx,%ds
634 movl %edx,%es
635 movl %esp,%edx
636 push %edx
637 call SYMBOL_NAME(io_check_error)
638 addl $4,%esp
639 jmp nmi_out
642 ENTRY(setup_vm86_frame)
643 # Copies the entire stack frame forwards by 16 bytes.
644 .macro copy_vm86_words count=18
645 .if \count
646 pushl ((\count-1)*4)(%esp)
647 popl ((\count-1)*4)+16(%esp)
648 copy_vm86_words "(\count-1)"
649 .endif
650 .endm
651 copy_vm86_words
652 addl $16,%esp
653 ret
655 do_arch_sched_op:
656 # Ensure we return success even if we return via schedule_tail()
657 xorl %eax,%eax
658 movl %eax,UREGS_eax+4(%esp)
659 jmp SYMBOL_NAME(do_sched_op)
661 do_switch_vm86:
662 # Discard the return address
663 addl $4,%esp
665 # GS:ESI == Ring-1 stack activation
666 movl UREGS_esp(%esp),%esi
667 VFLT1: movl UREGS_ss(%esp),%gs
669 # ES:EDI == Ring-0 stack activation
670 leal UREGS_eip(%esp),%edi
672 # Restore the hypercall-number-clobbered EAX on our stack frame
673 VFLT2: movl %gs:(%esi),%eax
674 movl %eax,UREGS_eax(%esp)
675 addl $4,%esi
677 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
678 movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
679 VFLT3: movl %gs:(%esi),%eax
680 stosl
681 addl $4,%esi
682 loop VFLT3
684 # Fix up EFLAGS: IOPL=0, IF=1, VM=1
685 andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
686 orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
688 jmp test_all_events
690 .section __ex_table,"a"
691 .long VFLT1,domain_crash_synchronous
692 .long VFLT2,domain_crash_synchronous
693 .long VFLT3,domain_crash_synchronous
694 .previous
696 .data
698 ENTRY(exception_table)
699 .long SYMBOL_NAME(do_divide_error)
700 .long SYMBOL_NAME(do_debug)
701 .long 0 # nmi
702 .long SYMBOL_NAME(do_int3)
703 .long SYMBOL_NAME(do_overflow)
704 .long SYMBOL_NAME(do_bounds)
705 .long SYMBOL_NAME(do_invalid_op)
706 .long SYMBOL_NAME(math_state_restore)
707 .long 0 # double fault
708 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
709 .long SYMBOL_NAME(do_invalid_TSS)
710 .long SYMBOL_NAME(do_segment_not_present)
711 .long SYMBOL_NAME(do_stack_segment)
712 .long SYMBOL_NAME(do_general_protection)
713 .long SYMBOL_NAME(do_page_fault)
714 .long SYMBOL_NAME(do_spurious_interrupt_bug)
715 .long SYMBOL_NAME(do_coprocessor_error)
716 .long SYMBOL_NAME(do_alignment_check)
717 .long SYMBOL_NAME(do_machine_check)
718 .long SYMBOL_NAME(do_simd_coprocessor_error)
720 ENTRY(hypercall_table)
721 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
722 .long SYMBOL_NAME(do_mmu_update)
723 .long SYMBOL_NAME(do_set_gdt)
724 .long SYMBOL_NAME(do_stack_switch)
725 .long SYMBOL_NAME(do_set_callbacks)
726 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
727 .long SYMBOL_NAME(do_arch_sched_op)
728 .long SYMBOL_NAME(do_dom0_op)
729 .long SYMBOL_NAME(do_set_debugreg)
730 .long SYMBOL_NAME(do_get_debugreg)
731 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
732 .long SYMBOL_NAME(do_set_fast_trap)
733 .long SYMBOL_NAME(do_dom_mem_op)
734 .long SYMBOL_NAME(do_multicall)
735 .long SYMBOL_NAME(do_update_va_mapping)
736 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
737 .long SYMBOL_NAME(do_event_channel_op)
738 .long SYMBOL_NAME(do_xen_version)
739 .long SYMBOL_NAME(do_console_io)
740 .long SYMBOL_NAME(do_physdev_op)
741 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
742 .long SYMBOL_NAME(do_vm_assist)
743 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
744 .long SYMBOL_NAME(do_switch_vm86)
745 .long SYMBOL_NAME(do_boot_vcpu)
746 .long SYMBOL_NAME(do_ni_hypercall) /* 25 */
747 .long SYMBOL_NAME(do_mmuext_op)
748 .rept NR_hypercalls-((.-hypercall_table)/4)
749 .long SYMBOL_NAME(do_ni_hypercall)
750 .endr