ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 6756:f752e0c873a6

merge?
author cl349@firebug.cl.cam.ac.uk
date Mon Sep 12 12:32:20 2005 +0000 (2005-09-12)
parents dd668f7527cb 939fd35d58da
children 4d899a738d59 8ca0f98ba8e2
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_GUEST_REGS(reg) \
65 movl $~(STACK_SIZE-1),reg; \
66 andl %esp,reg; \
67 orl $(STACK_SIZE-CPUINFO_sizeof),reg;
69 #define GET_CURRENT(reg) \
70 movl $STACK_SIZE-4, reg; \
71 orl %esp, reg; \
72 andl $~3,reg; \
73 movl (reg),reg;
75 #ifdef CONFIG_VMX
76 /*
77 * At VMExit time the processor saves the guest selectors, esp, eip,
78 * and eflags. Therefore we don't save them, but simply decrement
79 * the kernel stack pointer to make it consistent with the stack frame
80 * at usual interruption time. The eflags of the host is not saved by VMX,
81 * and we set it to the fixed value.
82 *
83 * We also need the room, especially because orig_eax field is used
84 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
85 * (10) u32 gs;
86 * (9) u32 fs;
87 * (8) u32 ds;
88 * (7) u32 es;
89 * <- get_stack_bottom() (= HOST_ESP)
90 * (6) u32 ss;
91 * (5) u32 esp;
92 * (4) u32 eflags;
93 * (3) u32 cs;
94 * (2) u32 eip;
95 * (2/1) u16 entry_vector;
96 * (1/1) u16 error_code;
97 * However, get_stack_bottom() actually returns 20 bytes before the real
98 * bottom of the stack to allow space for:
99 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
100 */
101 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
102 #define NR_SKIPPED_REGS 6 /* See the above explanation */
103 #define VMX_SAVE_ALL_NOSEGREGS \
104 pushl $VMX_MONITOR_EFLAGS; \
105 popf; \
106 subl $(NR_SKIPPED_REGS*4), %esp; \
107 movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
108 pushl %eax; \
109 pushl %ebp; \
110 pushl %edi; \
111 pushl %esi; \
112 pushl %edx; \
113 pushl %ecx; \
114 pushl %ebx;
116 #define VMX_RESTORE_ALL_NOSEGREGS \
117 popl %ebx; \
118 popl %ecx; \
119 popl %edx; \
120 popl %esi; \
121 popl %edi; \
122 popl %ebp; \
123 popl %eax; \
124 addl $(NR_SKIPPED_REGS*4), %esp
126 ENTRY(vmx_asm_vmexit_handler)
127 /* selectors are restored/saved by VMX */
128 VMX_SAVE_ALL_NOSEGREGS
129 #ifdef TRACE_BUFFER
130 call trace_vmexit
131 #endif
132 call vmx_vmexit_handler
133 jmp vmx_asm_do_resume
135 .macro vmx_asm_common launch initialized
136 1:
137 /* vmx_test_all_events */
138 .if \initialized
139 GET_CURRENT(%ebx)
140 /*test_all_events:*/
141 xorl %ecx,%ecx
142 notl %ecx
143 cli # tests must not race interrupts
144 /*test_softirqs:*/
145 movl VCPU_processor(%ebx),%eax
146 shl $IRQSTAT_shift,%eax
147 test %ecx,irq_stat(%eax,1)
148 jnz 2f
150 /* vmx_restore_all_guest */
151 call vmx_intr_assist
152 call load_cr2
153 #ifdef TRACE_BUFFER
154 call trace_vmentry
155 #endif
156 .endif
157 VMX_RESTORE_ALL_NOSEGREGS
158 /*
159 * Check if we are going back to VMX-based VM
160 * By this time, all the setups in the VMCS must be complete.
161 */
162 .if \launch
163 /* VMLUANCH */
164 .byte 0x0f,0x01,0xc2
165 pushf
166 call vm_launch_fail
167 .else
168 /* VMRESUME */
169 .byte 0x0f,0x01,0xc3
170 pushf
171 call vm_resume_fail
172 .endif
173 /* Should never reach here */
174 hlt
176 ALIGN
177 .if \initialized
178 2:
179 /* vmx_process_softirqs */
180 sti
181 call do_softirq
182 jmp 1b
183 ALIGN
184 .endif
185 .endm
187 ENTRY(vmx_asm_do_launch)
188 vmx_asm_common 1 0
190 ENTRY(vmx_asm_do_resume)
191 vmx_asm_common 0 1
193 ENTRY(vmx_asm_do_relaunch)
194 vmx_asm_common 1 1
196 #endif
198 ALIGN
199 restore_all_guest:
200 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
201 jnz restore_all_vm86
202 FLT1: mov UREGS_ds(%esp),%ds
203 FLT2: mov UREGS_es(%esp),%es
204 FLT3: mov UREGS_fs(%esp),%fs
205 FLT4: mov UREGS_gs(%esp),%gs
206 restore_all_vm86:
207 popl %ebx
208 popl %ecx
209 popl %edx
210 popl %esi
211 popl %edi
212 popl %ebp
213 popl %eax
214 addl $4,%esp
215 FLT5: iret
216 .section .fixup,"ax"
217 FIX5: subl $28,%esp
218 pushl 28(%esp) # error_code/entry_vector
219 movl %eax,UREGS_eax+4(%esp)
220 movl %ebp,UREGS_ebp+4(%esp)
221 movl %edi,UREGS_edi+4(%esp)
222 movl %esi,UREGS_esi+4(%esp)
223 movl %edx,UREGS_edx+4(%esp)
224 movl %ecx,UREGS_ecx+4(%esp)
225 movl %ebx,UREGS_ebx+4(%esp)
226 FIX1: SET_XEN_SEGMENTS(a)
227 movl %eax,%fs
228 movl %eax,%gs
229 sti
230 popl %esi
231 pushfl # EFLAGS
232 movl $__HYPERVISOR_CS,%eax
233 pushl %eax # CS
234 movl $DBLFLT1,%eax
235 pushl %eax # EIP
236 pushl %esi # error_code/entry_vector
237 jmp error_code
238 DBLFLT1:GET_CURRENT(%ebx)
239 jmp test_all_events
240 failsafe_callback:
241 GET_CURRENT(%ebx)
242 leal VCPU_trap_bounce(%ebx),%edx
243 movl VCPU_failsafe_addr(%ebx),%eax
244 movl %eax,TRAPBOUNCE_eip(%edx)
245 movl VCPU_failsafe_sel(%ebx),%eax
246 movw %ax,TRAPBOUNCE_cs(%edx)
247 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
248 call create_bounce_frame
249 xorl %eax,%eax
250 movl %eax,UREGS_ds(%esp)
251 movl %eax,UREGS_es(%esp)
252 movl %eax,UREGS_fs(%esp)
253 movl %eax,UREGS_gs(%esp)
254 jmp test_all_events
255 .previous
256 .section __pre_ex_table,"a"
257 .long FLT1,FIX1
258 .long FLT2,FIX1
259 .long FLT3,FIX1
260 .long FLT4,FIX1
261 .long FLT5,FIX5
262 .previous
263 .section __ex_table,"a"
264 .long DBLFLT1,failsafe_callback
265 .previous
267 ALIGN
268 restore_all_xen:
269 popl %ebx
270 popl %ecx
271 popl %edx
272 popl %esi
273 popl %edi
274 popl %ebp
275 popl %eax
276 addl $4,%esp
277 iret
279 ALIGN
280 ENTRY(hypercall)
281 subl $4,%esp
282 SAVE_ALL(b)
283 sti
284 GET_CURRENT(%ebx)
285 andl $(NR_hypercalls-1),%eax
286 PERFC_INCR(PERFC_hypercalls, %eax)
287 #ifndef NDEBUG
288 /* Deliberately corrupt parameter regs not used by this hypercall. */
289 pushl %eax
290 pushl UREGS_eip+4(%esp)
291 pushl 28(%esp) # EBP
292 pushl 28(%esp) # EDI
293 pushl 28(%esp) # ESI
294 pushl 28(%esp) # EDX
295 pushl 28(%esp) # ECX
296 pushl 28(%esp) # EBX
297 movzb hypercall_args_table(,%eax,1),%ecx
298 leal (%esp,%ecx,4),%edi
299 subl $6,%ecx
300 negl %ecx
301 movl %eax,%esi
302 movl $0xDEADBEEF,%eax
303 rep stosl
304 movl %esi,%eax
305 #endif
306 call *hypercall_table(,%eax,4)
307 #ifndef NDEBUG
308 /* Deliberately corrupt parameter regs used by this hypercall. */
309 addl $24,%esp # Shadow parameters
310 popl %ecx # Shadow EIP
311 cmpl %ecx,UREGS_eip(%esp)
312 popl %ecx # Shadow hypercall index
313 jne skip_clobber # If EIP has changed then don't clobber
314 movzb hypercall_args_table(,%ecx,1),%ecx
315 movl %esp,%edi
316 movl %eax,%esi
317 movl $0xDEADBEEF,%eax
318 rep stosl
319 movl %esi,%eax
320 skip_clobber:
321 #endif
322 movl %eax,UREGS_eax(%esp) # save the return value
324 test_all_events:
325 xorl %ecx,%ecx
326 notl %ecx
327 cli # tests must not race interrupts
328 /*test_softirqs:*/
329 movl VCPU_processor(%ebx),%eax
330 shl $IRQSTAT_shift,%eax
331 test %ecx,irq_stat(%eax,1)
332 jnz process_softirqs
333 /*test_guest_events:*/
334 movl VCPU_vcpu_info(%ebx),%eax
335 testb $0xFF,VCPUINFO_upcall_mask(%eax)
336 jnz restore_all_guest
337 testb $0xFF,VCPUINFO_upcall_pending(%eax)
338 jz restore_all_guest
339 /*process_guest_events:*/
340 sti
341 leal VCPU_trap_bounce(%ebx),%edx
342 movl VCPU_event_addr(%ebx),%eax
343 movl %eax,TRAPBOUNCE_eip(%edx)
344 movl VCPU_event_sel(%ebx),%eax
345 movw %ax,TRAPBOUNCE_cs(%edx)
346 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
347 call create_bounce_frame
348 jmp test_all_events
350 ALIGN
351 process_softirqs:
352 sti
353 call do_softirq
354 jmp test_all_events
356 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
357 /* {EIP, CS, EFLAGS, [ESP, SS]} */
358 /* %edx == trap_bounce, %ebx == struct vcpu */
359 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
360 create_bounce_frame:
361 movl UREGS_eflags+4(%esp),%ecx
362 movb UREGS_cs+4(%esp),%cl
363 testl $(2|X86_EFLAGS_VM),%ecx
364 jz ring1 /* jump if returning to an existing ring-1 activation */
365 movl VCPU_kernel_sp(%ebx),%esi
366 FLT6: mov VCPU_kernel_ss(%ebx),%gs
367 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
368 jz nvm86_1
369 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
370 movl UREGS_es+4(%esp),%eax
371 FLT7: movl %eax,%gs:(%esi)
372 movl UREGS_ds+4(%esp),%eax
373 FLT8: movl %eax,%gs:4(%esi)
374 movl UREGS_fs+4(%esp),%eax
375 FLT9: movl %eax,%gs:8(%esi)
376 movl UREGS_gs+4(%esp),%eax
377 FLT10: movl %eax,%gs:12(%esi)
378 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
379 movl UREGS_esp+4(%esp),%eax
380 FLT11: movl %eax,%gs:(%esi)
381 movl UREGS_ss+4(%esp),%eax
382 FLT12: movl %eax,%gs:4(%esi)
383 jmp 1f
384 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
385 movl UREGS_esp+4(%esp),%esi
386 FLT13: mov UREGS_ss+4(%esp),%gs
387 1: /* Construct a stack frame: EFLAGS, CS/EIP */
388 movb TRAPBOUNCE_flags(%edx),%cl
389 subl $12,%esi
390 movl UREGS_eip+4(%esp),%eax
391 FLT14: movl %eax,%gs:(%esi)
392 movl VCPU_vcpu_info(%ebx),%eax
393 pushl VCPUINFO_upcall_mask(%eax)
394 testb $TBF_INTERRUPT,%cl
395 setnz %ch # TBF_INTERRUPT -> set upcall mask
396 orb %ch,VCPUINFO_upcall_mask(%eax)
397 popl %eax
398 shll $16,%eax # Bits 16-23: saved_upcall_mask
399 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
400 FLT15: movl %eax,%gs:4(%esi)
401 movl UREGS_eflags+4(%esp),%eax
402 FLT16: movl %eax,%gs:8(%esi)
403 test $TBF_EXCEPTION_ERRCODE,%cl
404 jz 1f
405 subl $4,%esi # push error_code onto guest frame
406 movl TRAPBOUNCE_error_code(%edx),%eax
407 FLT17: movl %eax,%gs:(%esi)
408 testb $TBF_EXCEPTION_CR2,%cl
409 jz 2f
410 subl $4,%esi # push %cr2 onto guest frame
411 movl TRAPBOUNCE_cr2(%edx),%eax
412 FLT18: movl %eax,%gs:(%esi)
413 1: testb $TBF_FAILSAFE,%cl
414 jz 2f
415 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
416 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
417 jz nvm86_2
418 xorl %eax,%eax # VM86: we write zero selector values
419 FLT19: movl %eax,%gs:(%esi)
420 FLT20: movl %eax,%gs:4(%esi)
421 FLT21: movl %eax,%gs:8(%esi)
422 FLT22: movl %eax,%gs:12(%esi)
423 jmp 2f
424 nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
425 FLT23: movl %eax,%gs:(%esi)
426 movl UREGS_es+4(%esp),%eax
427 FLT24: movl %eax,%gs:4(%esi)
428 movl UREGS_fs+4(%esp),%eax
429 FLT25: movl %eax,%gs:8(%esi)
430 movl UREGS_gs+4(%esp),%eax
431 FLT26: movl %eax,%gs:12(%esi)
432 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
433 jz nvm86_3
434 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
435 movl %eax,UREGS_ds+4(%esp)
436 movl %eax,UREGS_es+4(%esp)
437 movl %eax,UREGS_fs+4(%esp)
438 movl %eax,UREGS_gs+4(%esp)
439 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
440 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
441 andl $0xfffcbeff,UREGS_eflags+4(%esp)
442 mov %gs,UREGS_ss+4(%esp)
443 movl %esi,UREGS_esp+4(%esp)
444 movzwl TRAPBOUNCE_cs(%edx),%eax
445 movl %eax,UREGS_cs+4(%esp)
446 movl TRAPBOUNCE_eip(%edx),%eax
447 test %eax,%eax
448 jz domain_crash_synchronous
449 movl %eax,UREGS_eip+4(%esp)
450 movb $0,TRAPBOUNCE_flags(%edx)
451 ret
452 .section __ex_table,"a"
453 .long FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
454 .long FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
455 .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
456 .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
457 .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
458 .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
459 .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
460 .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
461 .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
462 .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
463 .long FLT26,domain_crash_synchronous
464 .previous
466 ALIGN
467 process_guest_exception_and_events:
468 leal VCPU_trap_bounce(%ebx),%edx
469 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
470 jz test_all_events
471 call create_bounce_frame
472 jmp test_all_events
474 ALIGN
475 ENTRY(ret_from_intr)
476 GET_CURRENT(%ebx)
477 movl UREGS_eflags(%esp),%eax
478 movb UREGS_cs(%esp),%al
479 testl $(3|X86_EFLAGS_VM),%eax
480 jnz test_all_events
481 jmp restore_all_xen
483 ENTRY(divide_error)
484 pushl $TRAP_divide_error<<16
485 ALIGN
486 error_code:
487 SAVE_ALL_NOSEGREGS(a)
488 SET_XEN_SEGMENTS(a)
489 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
490 jz exception_with_ints_disabled
491 sti # re-enable interrupts
492 xorl %eax,%eax
493 movw UREGS_entry_vector(%esp),%ax
494 movl %esp,%edx
495 pushl %edx # push the cpu_user_regs pointer
496 GET_CURRENT(%ebx)
497 PERFC_INCR(PERFC_exceptions, %eax)
498 call *exception_table(,%eax,4)
499 addl $4,%esp
500 movl UREGS_eflags(%esp),%eax
501 movb UREGS_cs(%esp),%al
502 testl $(3|X86_EFLAGS_VM),%eax
503 jz restore_all_xen
504 jmp process_guest_exception_and_events
506 exception_with_ints_disabled:
507 movl UREGS_eflags(%esp),%eax
508 movb UREGS_cs(%esp),%al
509 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
510 jnz FATAL_exception_with_ints_disabled
511 pushl %esp
512 call search_pre_exception_table
513 addl $4,%esp
514 testl %eax,%eax # no fixup code for faulting EIP?
515 jz FATAL_exception_with_ints_disabled
516 movl %eax,UREGS_eip(%esp)
517 movl %esp,%esi
518 subl $4,%esp
519 movl %esp,%edi
520 movl $UREGS_kernel_sizeof/4,%ecx
521 rep; movsl # make room for error_code/entry_vector
522 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
523 movl %eax,UREGS_kernel_sizeof(%esp)
524 jmp restore_all_xen # return to fixup code
526 FATAL_exception_with_ints_disabled:
527 xorl %esi,%esi
528 movw UREGS_entry_vector(%esp),%si
529 movl %esp,%edx
530 pushl %edx # push the cpu_user_regs pointer
531 pushl %esi # push the trapnr (entry vector)
532 call fatal_trap
533 ud2
535 ENTRY(coprocessor_error)
536 pushl $TRAP_copro_error<<16
537 jmp error_code
539 ENTRY(simd_coprocessor_error)
540 pushl $TRAP_simd_error<<16
541 jmp error_code
543 ENTRY(device_not_available)
544 pushl $TRAP_no_device<<16
545 jmp error_code
547 ENTRY(debug)
548 pushl $TRAP_debug<<16
549 jmp error_code
551 ENTRY(int3)
552 pushl $TRAP_int3<<16
553 jmp error_code
555 ENTRY(overflow)
556 pushl $TRAP_overflow<<16
557 jmp error_code
559 ENTRY(bounds)
560 pushl $TRAP_bounds<<16
561 jmp error_code
563 ENTRY(invalid_op)
564 pushl $TRAP_invalid_op<<16
565 jmp error_code
567 ENTRY(coprocessor_segment_overrun)
568 pushl $TRAP_copro_seg<<16
569 jmp error_code
571 ENTRY(invalid_TSS)
572 movw $TRAP_invalid_tss,2(%esp)
573 jmp error_code
575 ENTRY(segment_not_present)
576 movw $TRAP_no_segment,2(%esp)
577 jmp error_code
579 ENTRY(stack_segment)
580 movw $TRAP_stack_error,2(%esp)
581 jmp error_code
583 ENTRY(general_protection)
584 movw $TRAP_gp_fault,2(%esp)
585 jmp error_code
587 ENTRY(alignment_check)
588 movw $TRAP_alignment_check,2(%esp)
589 jmp error_code
591 ENTRY(page_fault)
592 movw $TRAP_page_fault,2(%esp)
593 jmp error_code
595 ENTRY(machine_check)
596 pushl $TRAP_machine_check<<16
597 jmp error_code
599 ENTRY(spurious_interrupt_bug)
600 pushl $TRAP_spurious_int<<16
601 jmp error_code
603 ENTRY(nmi)
604 # Save state but do not trash the segment registers!
605 # We may otherwise be unable to reload them or copy them to ring 1.
606 pushl %eax
607 SAVE_ALL_NOSEGREGS(a)
609 # Check for hardware problems.
610 inb $0x61,%al
611 testb $0x80,%al
612 jne nmi_parity_err
613 testb $0x40,%al
614 jne nmi_io_err
615 movl %eax,%ebx
617 # Okay, its almost a normal NMI tick. We can only process it if:
618 # A. We are the outermost Xen activation (in which case we have
619 # the selectors safely saved on our stack)
620 # B. DS and ES contain sane Xen values.
621 # In all other cases we bail without touching DS-GS, as we have
622 # interrupted an enclosing Xen activation in tricky prologue or
623 # epilogue code.
624 movl UREGS_eflags(%esp),%eax
625 movb UREGS_cs(%esp),%al
626 testl $(3|X86_EFLAGS_VM),%eax
627 jnz do_watchdog_tick
628 movl %ds,%eax
629 cmpw $(__HYPERVISOR_DS),%ax
630 jne defer_nmi
631 movl %es,%eax
632 cmpw $(__HYPERVISOR_DS),%ax
633 jne defer_nmi
635 do_watchdog_tick:
636 movl $(__HYPERVISOR_DS),%edx
637 movl %edx,%ds
638 movl %edx,%es
639 movl %esp,%edx
640 pushl %ebx # reason
641 pushl %edx # regs
642 call do_nmi
643 addl $8,%esp
644 jmp ret_from_intr
646 defer_nmi:
647 movl $FIXMAP_apic_base,%eax
648 # apic_wait_icr_idle()
649 1: movl %ss:APIC_ICR(%eax),%ebx
650 testl $APIC_ICR_BUSY,%ebx
651 jnz 1b
652 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
653 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
654 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
655 jmp restore_all_xen
657 nmi_parity_err:
658 # Clear and disable the parity-error line
659 andb $0xf,%al
660 orb $0x4,%al
661 outb %al,$0x61
662 cmpb $'i',%ss:opt_nmi # nmi=ignore
663 je nmi_out
664 bts $0,%ss:nmi_softirq_reason
665 bts $NMI_SOFTIRQ,%ss:irq_stat
666 cmpb $'d',%ss:opt_nmi # nmi=dom0
667 je nmi_out
668 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
669 movl %edx,%ds
670 movl %edx,%es
671 movl %esp,%edx
672 push %edx
673 call mem_parity_error
674 addl $4,%esp
675 nmi_out:movl %ss:UREGS_eflags(%esp),%eax
676 movb %ss:UREGS_cs(%esp),%al
677 testl $(3|X86_EFLAGS_VM),%eax
678 jz restore_all_xen
679 movl $(__HYPERVISOR_DS),%edx
680 movl %edx,%ds
681 movl %edx,%es
682 GET_CURRENT(%ebx)
683 jmp test_all_events
685 nmi_io_err:
686 # Clear and disable the I/O-error line
687 andb $0xf,%al
688 orb $0x8,%al
689 outb %al,$0x61
690 cmpb $'i',%ss:opt_nmi # nmi=ignore
691 je nmi_out
692 bts $1,%ss:nmi_softirq_reason
693 bts $NMI_SOFTIRQ,%ss:irq_stat
694 cmpb $'d',%ss:opt_nmi # nmi=dom0
695 je nmi_out
696 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
697 movl %edx,%ds
698 movl %edx,%es
699 movl %esp,%edx
700 push %edx
701 call io_check_error
702 addl $4,%esp
703 jmp nmi_out
706 ENTRY(setup_vm86_frame)
707 # Copies the entire stack frame forwards by 16 bytes.
708 .macro copy_vm86_words count=18
709 .if \count
710 pushl ((\count-1)*4)(%esp)
711 popl ((\count-1)*4)+16(%esp)
712 copy_vm86_words "(\count-1)"
713 .endif
714 .endm
715 copy_vm86_words
716 addl $16,%esp
717 ret
719 do_arch_sched_op:
720 # Ensure we return success even if we return via schedule_tail()
721 xorl %eax,%eax
722 GET_GUEST_REGS(%ecx)
723 movl %eax,UREGS_eax(%ecx)
724 jmp do_sched_op
726 do_switch_vm86:
727 # Reset the stack pointer
728 GET_GUEST_REGS(%ecx)
729 movl %ecx,%esp
731 # GS:ESI == Ring-1 stack activation
732 movl UREGS_esp(%esp),%esi
733 VFLT1: mov UREGS_ss(%esp),%gs
735 # ES:EDI == Ring-0 stack activation
736 leal UREGS_eip(%esp),%edi
738 # Restore the hypercall-number-clobbered EAX on our stack frame
739 VFLT2: movl %gs:(%esi),%eax
740 movl %eax,UREGS_eax(%esp)
741 addl $4,%esi
743 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
744 movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
745 VFLT3: movl %gs:(%esi),%eax
746 stosl
747 addl $4,%esi
748 loop VFLT3
750 # Fix up EFLAGS: IOPL=0, IF=1, VM=1
751 andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
752 orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
754 jmp test_all_events
756 .section __ex_table,"a"
757 .long VFLT1,domain_crash_synchronous
758 .long VFLT2,domain_crash_synchronous
759 .long VFLT3,domain_crash_synchronous
760 .previous
762 .data
764 ENTRY(exception_table)
765 .long do_divide_error
766 .long do_debug
767 .long 0 # nmi
768 .long do_int3
769 .long do_overflow
770 .long do_bounds
771 .long do_invalid_op
772 .long math_state_restore
773 .long 0 # double fault
774 .long do_coprocessor_segment_overrun
775 .long do_invalid_TSS
776 .long do_segment_not_present
777 .long do_stack_segment
778 .long do_general_protection
779 .long do_page_fault
780 .long do_spurious_interrupt_bug
781 .long do_coprocessor_error
782 .long do_alignment_check
783 .long do_machine_check
784 .long do_simd_coprocessor_error
786 ENTRY(hypercall_table)
787 .long do_set_trap_table /* 0 */
788 .long do_mmu_update
789 .long do_set_gdt
790 .long do_stack_switch
791 .long do_set_callbacks
792 .long do_fpu_taskswitch /* 5 */
793 .long do_arch_sched_op
794 .long do_dom0_op
795 .long do_set_debugreg
796 .long do_get_debugreg
797 .long do_update_descriptor /* 10 */
798 .long do_ni_hypercall
799 .long do_memory_op
800 .long do_multicall
801 .long do_update_va_mapping
802 .long do_set_timer_op /* 15 */
803 .long do_event_channel_op
804 .long do_xen_version
805 .long do_console_io
806 .long do_physdev_op
807 .long do_grant_table_op /* 20 */
808 .long do_vm_assist
809 .long do_update_va_mapping_otherdomain
810 .long do_switch_vm86
811 .long do_boot_vcpu
812 .long do_ni_hypercall /* 25 */
813 .long do_mmuext_op
814 .long do_acm_op /* 27 */
815 .rept NR_hypercalls-((.-hypercall_table)/4)
816 .long do_ni_hypercall
817 .endr
819 ENTRY(hypercall_args_table)
820 .byte 1 /* do_set_trap_table */ /* 0 */
821 .byte 4 /* do_mmu_update */
822 .byte 2 /* do_set_gdt */
823 .byte 2 /* do_stack_switch */
824 .byte 4 /* do_set_callbacks */
825 .byte 1 /* do_fpu_taskswitch */ /* 5 */
826 .byte 2 /* do_arch_sched_op */
827 .byte 1 /* do_dom0_op */
828 .byte 2 /* do_set_debugreg */
829 .byte 1 /* do_get_debugreg */
830 .byte 4 /* do_update_descriptor */ /* 10 */
831 .byte 0 /* do_ni_hypercall */
832 .byte 2 /* do_memory_op */
833 .byte 2 /* do_multicall */
834 .byte 4 /* do_update_va_mapping */
835 .byte 2 /* do_set_timer_op */ /* 15 */
836 .byte 1 /* do_event_channel_op */
837 .byte 2 /* do_xen_version */
838 .byte 3 /* do_console_io */
839 .byte 1 /* do_physdev_op */
840 .byte 3 /* do_grant_table_op */ /* 20 */
841 .byte 2 /* do_vm_assist */
842 .byte 5 /* do_update_va_mapping_otherdomain */
843 .byte 0 /* do_switch_vm86 */
844 .byte 2 /* do_boot_vcpu */
845 .byte 0 /* do_ni_hypercall */ /* 25 */
846 .byte 4 /* do_mmuext_op */
847 .byte 1 /* do_acm_op */
848 .rept NR_hypercalls-(.-hypercall_args_table)
849 .byte 0 /* do_ni_hypercall */
850 .endr