ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 3761:118e0a3af9b0

bitkeeper revision 1.1159.1.564 (420b44edsb8XzPev-TiGW16GSsCW6g)

More x86_64 stuff. Added hypercalls to register a user-space pagetable,
modify FS/GS base addresses, and switch to user mode. User mode switches
back to kernel mode automatically on executing SYSCALL instruction.
Still todo: 1. getdomaininfo needs to include pagetable_user
2. get writable and shadow pagetables working
3. testing
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 10 11:26:37 2005 +0000 (2005-02-10)
parents 2318bacbb21e
children 0a4b76b6b5a0
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_CURRENT(reg) \
65 movl $STACK_SIZE-4, reg; \
66 orl %esp, reg; \
67 andl $~3,reg; \
68 movl (reg),reg;
70 #ifdef CONFIG_VMX
71 /*
72 * At VMExit time the processor saves the guest selectors, esp, eip,
73 * and eflags. Therefore we don't save them, but simply decrement
74 * the kernel stack pointer to make it consistent with the stack frame
75 * at usual interruption time. The eflags of the host is not saved by VMX,
76 * and we set it to the fixed value.
77 *
78 * We also need the room, especially because orig_eax field is used
79 * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
80 * (10) u32 gs;
81 * (9) u32 fs;
82 * (8) u32 ds;
83 * (7) u32 es;
84 * <- get_stack_bottom() (= HOST_ESP)
85 * (6) u32 ss;
86 * (5) u32 esp;
87 * (4) u32 eflags;
88 * (3) u32 cs;
89 * (2) u32 eip;
90 * (2/1) u16 entry_vector;
91 * (1/1) u16 error_code;
92 * However, get_stack_bottom() actually returns 20 bytes before the real
93 * bottom of the stack to allow space for:
94 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
95 */
96 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
97 #define NR_SKIPPED_REGS 6 /* See the above explanation */
98 #define VMX_SAVE_ALL_NOSEGREGS \
99 pushl $VMX_MONITOR_EFLAGS; \
100 popf; \
101 subl $(NR_SKIPPED_REGS*4), %esp; \
102 pushl %eax; \
103 pushl %ebp; \
104 pushl %edi; \
105 pushl %esi; \
106 pushl %edx; \
107 pushl %ecx; \
108 pushl %ebx;
110 ENTRY(vmx_asm_vmexit_handler)
111 /* selectors are restored/saved by VMX */
112 VMX_SAVE_ALL_NOSEGREGS
113 call SYMBOL_NAME(vmx_vmexit_handler)
114 jmp vmx_asm_do_resume
116 ENTRY(vmx_asm_do_launch)
117 popl %ebx
118 popl %ecx
119 popl %edx
120 popl %esi
121 popl %edi
122 popl %ebp
123 popl %eax
124 addl $(NR_SKIPPED_REGS*4), %esp
125 /* VMLUANCH */
126 .byte 0x0f,0x01,0xc2
127 pushf
128 call SYMBOL_NAME(vm_launch_fail)
129 hlt
131 ALIGN
133 ENTRY(vmx_asm_do_resume)
134 vmx_test_all_events:
135 GET_CURRENT(%ebx)
136 /* test_all_events: */
137 xorl %ecx,%ecx
138 notl %ecx
139 cli # tests must not race interrupts
140 /*test_softirqs:*/
141 movl EDOMAIN_processor(%ebx),%eax
142 shl $6,%eax # sizeof(irq_cpustat) == 64
143 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
144 jnz vmx_process_softirqs
146 vmx_restore_all_guest:
147 call SYMBOL_NAME(load_cr2)
148 /*
149 * Check if we are going back to VMX-based VM
150 * By this time, all the setups in the VMCS must be complete.
151 */
152 popl %ebx
153 popl %ecx
154 popl %edx
155 popl %esi
156 popl %edi
157 popl %ebp
158 popl %eax
159 addl $(NR_SKIPPED_REGS*4), %esp
160 /* VMRESUME */
161 .byte 0x0f,0x01,0xc3
162 pushf
163 call SYMBOL_NAME(vm_resume_fail)
164 /* Should never reach here */
165 hlt
167 ALIGN
168 vmx_process_softirqs:
169 sti
170 call SYMBOL_NAME(do_softirq)
171 jmp vmx_test_all_events
172 #endif
174 ALIGN
175 restore_all_guest:
176 btr $_TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
177 jc failsafe_callback
178 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
179 jnz restore_all_vm86
180 FLT1: movl XREGS_ds(%esp),%ds
181 FLT2: movl XREGS_es(%esp),%es
182 FLT3: movl XREGS_fs(%esp),%fs
183 FLT4: movl XREGS_gs(%esp),%gs
184 restore_all_vm86:
185 popl %ebx
186 popl %ecx
187 popl %edx
188 popl %esi
189 popl %edi
190 popl %ebp
191 popl %eax
192 addl $4,%esp
193 FLT5: iret
194 .section .fixup,"ax"
195 FIX5: subl $28,%esp
196 pushl 28(%esp) # error_code/entry_vector
197 movl %eax,XREGS_eax+4(%esp)
198 movl %ebp,XREGS_ebp+4(%esp)
199 movl %edi,XREGS_edi+4(%esp)
200 movl %esi,XREGS_esi+4(%esp)
201 movl %edx,XREGS_edx+4(%esp)
202 movl %ecx,XREGS_ecx+4(%esp)
203 movl %ebx,XREGS_ebx+4(%esp)
204 FIX1: SET_XEN_SEGMENTS(a)
205 movl %eax,%fs
206 movl %eax,%gs
207 sti
208 popl %esi
209 pushfl # EFLAGS
210 movl $__HYPERVISOR_CS,%eax
211 pushl %eax # CS
212 movl $DBLFLT1,%eax
213 pushl %eax # EIP
214 pushl %esi # error_code/entry_vector
215 jmp error_code
216 DBLFLT1:GET_CURRENT(%ebx)
217 jmp test_all_events
218 DBLFIX1:GET_CURRENT(%ebx)
219 bts $_TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
220 jc domain_crash # cannot reenter failsafe code
221 jmp test_all_events # will return via failsafe code
222 .previous
223 .section __pre_ex_table,"a"
224 .long FLT1,FIX1
225 .long FLT2,FIX1
226 .long FLT3,FIX1
227 .long FLT4,FIX1
228 .long FLT5,FIX5
229 .previous
230 .section __ex_table,"a"
231 .long DBLFLT1,DBLFIX1
232 .previous
234 /* No special register assumptions */
235 failsafe_callback:
236 GET_CURRENT(%ebx)
237 leal EDOMAIN_trap_bounce(%ebx),%edx
238 movl EDOMAIN_failsafe_addr(%ebx),%eax
239 movl %eax,TRAPBOUNCE_eip(%edx)
240 movl EDOMAIN_failsafe_sel(%ebx),%eax
241 movw %ax,TRAPBOUNCE_cs(%edx)
242 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
243 call create_bounce_frame
244 popl %ebx
245 popl %ecx
246 popl %edx
247 popl %esi
248 popl %edi
249 popl %ebp
250 popl %eax
251 addl $4,%esp
252 FLT6: iret
253 .section .fixup,"ax"
254 FIX6: pushl %ebx
255 GET_CURRENT(%ebx)
256 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
257 pop %ebx
258 jmp FIX5
259 .section __pre_ex_table,"a"
260 .long FLT6,FIX6
261 .previous
263 ALIGN
264 restore_all_xen:
265 popl %ebx
266 popl %ecx
267 popl %edx
268 popl %esi
269 popl %edi
270 popl %ebp
271 popl %eax
272 addl $4,%esp
273 iret
275 ALIGN
276 ENTRY(hypercall)
277 subl $4,%esp
278 SAVE_ALL(b)
279 sti
280 GET_CURRENT(%ebx)
281 andl $(NR_hypercalls-1),%eax
282 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
283 movl %eax,XREGS_eax(%esp) # save the return value
285 test_all_events:
286 xorl %ecx,%ecx
287 notl %ecx
288 cli # tests must not race interrupts
289 /*test_softirqs:*/
290 movl EDOMAIN_processor(%ebx),%eax
291 shl $6,%eax # sizeof(irq_cpustat) == 64
292 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
293 jnz process_softirqs
294 /*test_guest_events:*/
295 movl EDOMAIN_vcpu_info(%ebx),%eax
296 testb $0xFF,VCPUINFO_upcall_mask(%eax)
297 jnz restore_all_guest
298 testb $0xFF,VCPUINFO_upcall_pending(%eax)
299 jz restore_all_guest
300 /*process_guest_events:*/
301 leal EDOMAIN_trap_bounce(%ebx),%edx
302 movl EDOMAIN_event_addr(%ebx),%eax
303 movl %eax,TRAPBOUNCE_eip(%edx)
304 movl EDOMAIN_event_sel(%ebx),%eax
305 movw %ax,TRAPBOUNCE_cs(%edx)
306 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
307 call create_bounce_frame
308 movl EDOMAIN_vcpu_info(%ebx),%eax
309 movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
310 jmp restore_all_guest
312 ALIGN
313 process_softirqs:
314 sti
315 call SYMBOL_NAME(do_softirq)
316 jmp test_all_events
318 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
319 /* {EIP, CS, EFLAGS, [ESP, SS]} */
320 /* %edx == trap_bounce, %ebx == task_struct */
321 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
322 create_bounce_frame:
323 movl XREGS_eflags+4(%esp),%ecx
324 movb XREGS_cs+4(%esp),%cl
325 testl $(2|X86_EFLAGS_VM),%ecx
326 jz ring1 /* jump if returning to an existing ring-1 activation */
327 /* obtain ss/esp from TSS -- no current ring-1 activations */
328 movl EDOMAIN_processor(%ebx),%eax
329 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
330 movl %eax, %ecx
331 shll $7, %ecx
332 shll $13, %eax
333 addl %ecx,%eax
334 addl $init_tss + 12,%eax
335 movl (%eax),%esi /* tss->esp1 */
336 FLT7: movl 4(%eax),%gs /* tss->ss1 */
337 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
338 jz nvm86_1
339 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
340 movl XREGS_es+4(%esp),%eax
341 FLT8: movl %eax,%gs:(%esi)
342 movl XREGS_ds+4(%esp),%eax
343 FLT9: movl %eax,%gs:4(%esi)
344 movl XREGS_fs+4(%esp),%eax
345 FLT10: movl %eax,%gs:8(%esi)
346 movl XREGS_gs+4(%esp),%eax
347 FLT11: movl %eax,%gs:12(%esi)
348 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
349 movl XREGS_esp+4(%esp),%eax
350 FLT12: movl %eax,%gs:(%esi)
351 movl XREGS_ss+4(%esp),%eax
352 FLT13: movl %eax,%gs:4(%esi)
353 jmp 1f
354 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
355 movl XREGS_esp+4(%esp),%esi
356 FLT14: movl XREGS_ss+4(%esp),%gs
357 1: /* Construct a stack frame: EFLAGS, CS/EIP */
358 subl $12,%esi
359 movl XREGS_eip+4(%esp),%eax
360 FLT15: movl %eax,%gs:(%esi)
361 movl XREGS_cs+4(%esp),%eax
362 FLT16: movl %eax,%gs:4(%esi)
363 movl XREGS_eflags+4(%esp),%eax
364 FLT17: movl %eax,%gs:8(%esi)
365 movb TRAPBOUNCE_flags(%edx),%cl
366 test $TBF_EXCEPTION_ERRCODE,%cl
367 jz 1f
368 subl $4,%esi # push error_code onto guest frame
369 movl TRAPBOUNCE_error_code(%edx),%eax
370 FLT18: movl %eax,%gs:(%esi)
371 testb $TBF_EXCEPTION_CR2,%cl
372 jz 2f
373 subl $4,%esi # push %cr2 onto guest frame
374 movl TRAPBOUNCE_cr2(%edx),%eax
375 FLT19: movl %eax,%gs:(%esi)
376 1: testb $TBF_FAILSAFE,%cl
377 jz 2f
378 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
379 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
380 jz nvm86_2
381 xorl %eax,%eax # VM86: we write zero selector values
382 FLT20: movl %eax,%gs:(%esi)
383 FLT21: movl %eax,%gs:4(%esi)
384 FLT22: movl %eax,%gs:8(%esi)
385 FLT23: movl %eax,%gs:12(%esi)
386 jmp 2f
387 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
388 FLT24: movl %eax,%gs:(%esi)
389 movl XREGS_es+4(%esp),%eax
390 FLT25: movl %eax,%gs:4(%esi)
391 movl XREGS_fs+4(%esp),%eax
392 FLT26: movl %eax,%gs:8(%esi)
393 movl XREGS_gs+4(%esp),%eax
394 FLT27: movl %eax,%gs:12(%esi)
395 2: movb $0,TRAPBOUNCE_flags(%edx)
396 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
397 jz nvm86_3
398 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
399 movl %eax,XREGS_ds+4(%esp)
400 movl %eax,XREGS_es+4(%esp)
401 movl %eax,XREGS_fs+4(%esp)
402 movl %eax,XREGS_gs+4(%esp)
403 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
404 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
405 andl $0xfffcbeff,XREGS_eflags+4(%esp)
406 movl %gs,XREGS_ss+4(%esp)
407 movl %esi,XREGS_esp+4(%esp)
408 movzwl TRAPBOUNCE_cs(%edx),%eax
409 movl %eax,XREGS_cs+4(%esp)
410 movl TRAPBOUNCE_eip(%edx),%eax
411 movl %eax,XREGS_eip+4(%esp)
412 ret
413 .section .fixup,"ax"
414 FIX7: sti
415 popl %esi
416 addl $4,%esp # Discard create_b_frame return address
417 pushfl # EFLAGS
418 movl $__HYPERVISOR_CS,%eax
419 pushl %eax # CS
420 movl $DBLFLT2,%eax
421 pushl %eax # EIP
422 pushl %esi # error_code/entry_vector
423 jmp error_code
424 DBLFLT2:jmp process_guest_exception_and_events
425 .previous
426 .section __pre_ex_table,"a"
427 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
428 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
429 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
430 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
431 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
432 .previous
433 .section __ex_table,"a"
434 .long DBLFLT2,domain_crash
435 .previous
437 ALIGN
438 process_guest_exception_and_events:
439 leal EDOMAIN_trap_bounce(%ebx),%edx
440 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
441 jz test_all_events
442 cli # create_bounce_frame needs CLI for pre-exceptions to work
443 call create_bounce_frame
444 jmp test_all_events
446 ALIGN
447 ENTRY(ret_from_intr)
448 GET_CURRENT(%ebx)
449 movl XREGS_eflags(%esp),%eax
450 movb XREGS_cs(%esp),%al
451 testl $(3|X86_EFLAGS_VM),%eax
452 jnz test_all_events
453 jmp restore_all_xen
455 ENTRY(divide_error)
456 pushl $TRAP_divide_error<<16
457 ALIGN
458 error_code:
459 SAVE_ALL_NOSEGREGS(a)
460 SET_XEN_SEGMENTS(a)
461 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
462 jz exception_with_ints_disabled
463 1: sti # re-enable interrupts
464 xorl %eax,%eax
465 movw XREGS_entry_vector(%esp),%ax
466 movl %esp,%edx
467 pushl %edx # push the xen_regs pointer
468 GET_CURRENT(%ebx)
469 call *SYMBOL_NAME(exception_table)(,%eax,4)
470 addl $4,%esp
471 movl XREGS_eflags(%esp),%eax
472 movb XREGS_cs(%esp),%al
473 testl $(3|X86_EFLAGS_VM),%eax
474 jz restore_all_xen
475 jmp process_guest_exception_and_events
477 exception_with_ints_disabled:
478 movl XREGS_eflags(%esp),%eax
479 movb XREGS_cs(%esp),%al
480 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
481 jnz 1b # it really does happen!
482 # (e.g., DOM0 X server)
483 pushl XREGS_eip(%esp)
484 call search_pre_exception_table
485 addl $4,%esp
486 testl %eax,%eax # no fixup code for faulting EIP?
487 jz FATAL_exception_with_ints_disabled
488 movl %eax,XREGS_eip(%esp)
489 movl %esp,%esi
490 subl $4,%esp
491 movl %esp,%edi
492 movl $XREGS_kernel_sizeof/4,%ecx
493 rep; movsl # make room for error_code/entry_vector
494 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
495 movl %eax,XREGS_kernel_sizeof(%esp)
496 jmp restore_all_xen # return to fixup code
498 FATAL_exception_with_ints_disabled:
499 xorl %esi,%esi
500 movw XREGS_entry_vector(%esp),%si
501 movl %esp,%edx
502 pushl %edx # push the xen_regs pointer
503 pushl %esi # push the trapnr (entry vector)
504 call SYMBOL_NAME(fatal_trap)
505 ud2
507 ENTRY(coprocessor_error)
508 pushl $TRAP_copro_error<<16
509 jmp error_code
511 ENTRY(simd_coprocessor_error)
512 pushl $TRAP_simd_error<<16
513 jmp error_code
515 ENTRY(device_not_available)
516 pushl $TRAP_no_device<<16
517 jmp error_code
519 ENTRY(debug)
520 pushl $TRAP_debug<<16
521 jmp error_code
523 ENTRY(int3)
524 pushl $TRAP_int3<<16
525 jmp error_code
527 ENTRY(overflow)
528 pushl $TRAP_overflow<<16
529 jmp error_code
531 ENTRY(bounds)
532 pushl $TRAP_bounds<<16
533 jmp error_code
535 ENTRY(invalid_op)
536 pushl $TRAP_invalid_op<<16
537 jmp error_code
539 ENTRY(coprocessor_segment_overrun)
540 pushl $TRAP_copro_seg<<16
541 jmp error_code
543 ENTRY(invalid_TSS)
544 movw $TRAP_invalid_tss,2(%esp)
545 jmp error_code
547 ENTRY(segment_not_present)
548 movw $TRAP_no_segment,2(%esp)
549 jmp error_code
551 ENTRY(stack_segment)
552 movw $TRAP_stack_error,2(%esp)
553 jmp error_code
555 ENTRY(general_protection)
556 movw $TRAP_gp_fault,2(%esp)
557 jmp error_code
559 ENTRY(alignment_check)
560 movw $TRAP_alignment_check,2(%esp)
561 jmp error_code
563 ENTRY(page_fault)
564 movw $TRAP_page_fault,2(%esp)
565 jmp error_code
567 ENTRY(machine_check)
568 pushl $TRAP_machine_check<<16
569 jmp error_code
571 ENTRY(spurious_interrupt_bug)
572 pushl $TRAP_spurious_int<<16
573 jmp error_code
575 ENTRY(nmi)
576 # Save state but do not trash the segment registers!
577 # We may otherwise be unable to reload them or copy them to ring 1.
578 pushl %eax
579 SAVE_ALL_NOSEGREGS(a)
581 # Check for hardware problems.
582 inb $0x61,%al
583 testb $0x80,%al
584 jne nmi_parity_err
585 testb $0x40,%al
586 jne nmi_io_err
587 movl %eax,%ebx
589 # Okay, its almost a normal NMI tick. We can only process it if:
590 # A. We are the outermost Xen activation (in which case we have
591 # the selectors safely saved on our stack)
592 # B. DS and ES contain sane Xen values.
593 # In all other cases we bail without touching DS-GS, as we have
594 # interrupted an enclosing Xen activation in tricky prologue or
595 # epilogue code.
596 movl XREGS_eflags(%esp),%eax
597 movb XREGS_cs(%esp),%al
598 testl $(3|X86_EFLAGS_VM),%eax
599 jnz do_watchdog_tick
600 movl %ds,%eax
601 cmpw $(__HYPERVISOR_DS),%ax
602 jne defer_nmi
603 movl %es,%eax
604 cmpw $(__HYPERVISOR_DS),%ax
605 jne defer_nmi
607 do_watchdog_tick:
608 movl $(__HYPERVISOR_DS),%edx
609 movl %edx,%ds
610 movl %edx,%es
611 movl %esp,%edx
612 pushl %ebx # reason
613 pushl %edx # regs
614 call SYMBOL_NAME(do_nmi)
615 addl $8,%esp
616 movl XREGS_eflags(%esp),%eax
617 movb XREGS_cs(%esp),%al
618 testl $(3|X86_EFLAGS_VM),%eax
619 jz restore_all_xen
620 GET_CURRENT(%ebx)
621 jmp restore_all_guest
623 defer_nmi:
624 movl $FIXMAP_apic_base,%eax
625 # apic_wait_icr_idle()
626 1: movl %ss:APIC_ICR(%eax),%ebx
627 testl $APIC_ICR_BUSY,%ebx
628 jnz 1b
629 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
630 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
631 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
632 jmp restore_all_xen
634 nmi_parity_err:
635 # Clear and disable the parity-error line
636 andb $0xf,%al
637 orb $0x4,%al
638 outb %al,$0x61
639 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
640 je nmi_out
641 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
642 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
643 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
644 je nmi_out
645 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
646 movl %edx,%ds
647 movl %edx,%es
648 movl %esp,%edx
649 push %edx
650 call SYMBOL_NAME(mem_parity_error)
651 addl $4,%esp
652 nmi_out:movl %ss:XREGS_eflags(%esp),%eax
653 movb %ss:XREGS_cs(%esp),%al
654 testl $(3|X86_EFLAGS_VM),%eax
655 jz restore_all_xen
656 movl $(__HYPERVISOR_DS),%edx
657 movl %edx,%ds
658 movl %edx,%es
659 GET_CURRENT(%ebx)
660 jmp test_all_events
662 nmi_io_err:
663 # Clear and disable the I/O-error line
664 andb $0xf,%al
665 orb $0x8,%al
666 outb %al,$0x61
667 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
668 je nmi_out
669 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
670 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
671 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
672 je nmi_out
673 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
674 movl %edx,%ds
675 movl %edx,%es
676 movl %esp,%edx
677 push %edx
678 call SYMBOL_NAME(io_check_error)
679 addl $4,%esp
680 jmp nmi_out
683 ENTRY(setup_vm86_frame)
684 # Copies the entire stack frame forwards by 16 bytes.
685 .macro copy_vm86_words count=18
686 .if \count
687 pushl ((\count-1)*4)(%esp)
688 popl ((\count-1)*4)+16(%esp)
689 copy_vm86_words "(\count-1)"
690 .endif
691 .endm
692 copy_vm86_words
693 addl $16,%esp
694 ret
696 do_switch_vm86:
697 # Discard the return address
698 addl $4,%esp
700 movl XREGS_eflags(%esp),%edx
702 # GS:ESI == Ring-1 stack activation
703 movl XREGS_esp(%esp),%esi
704 VFLT1: movl XREGS_ss(%esp),%gs
706 # ES:EDI == Ring-0 stack activation
707 leal XREGS_eip(%esp),%edi
709 # Restore the hypercall-number-clobbered EAX on our stack frame
710 VFLT2: movl %gs:(%esi),%eax
711 movl %eax,XREGS_eax(%esp)
712 addl $4,%esi
714 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
715 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
716 VFLT3: movl %gs:(%esi),%eax
717 stosl
718 addl $4,%esi
719 loop VFLT3
721 # Fix up EFLAGS
722 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
723 andl $X86_EFLAGS_IOPL,%edx # Ignore attempts to change EFLAGS.IOPL
724 jnz 1f
725 orl $X86_EFLAGS_IF,%edx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
726 1: orl $X86_EFLAGS_VM,%edx # Force EFLAGS.VM
727 orl %edx,XREGS_eflags(%esp)
729 jmp test_all_events
731 .section __ex_table,"a"
732 .long VFLT1,domain_crash
733 .long VFLT2,domain_crash
734 .long VFLT3,domain_crash
735 .previous
737 .data
739 ENTRY(exception_table)
740 .long SYMBOL_NAME(do_divide_error)
741 .long SYMBOL_NAME(do_debug)
742 .long 0 # nmi
743 .long SYMBOL_NAME(do_int3)
744 .long SYMBOL_NAME(do_overflow)
745 .long SYMBOL_NAME(do_bounds)
746 .long SYMBOL_NAME(do_invalid_op)
747 .long SYMBOL_NAME(math_state_restore)
748 .long 0 # double fault
749 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
750 .long SYMBOL_NAME(do_invalid_TSS)
751 .long SYMBOL_NAME(do_segment_not_present)
752 .long SYMBOL_NAME(do_stack_segment)
753 .long SYMBOL_NAME(do_general_protection)
754 .long SYMBOL_NAME(do_page_fault)
755 .long SYMBOL_NAME(do_spurious_interrupt_bug)
756 .long SYMBOL_NAME(do_coprocessor_error)
757 .long SYMBOL_NAME(do_alignment_check)
758 .long SYMBOL_NAME(do_machine_check)
759 .long SYMBOL_NAME(do_simd_coprocessor_error)
761 ENTRY(hypercall_table)
762 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
763 .long SYMBOL_NAME(do_mmu_update)
764 .long SYMBOL_NAME(do_set_gdt)
765 .long SYMBOL_NAME(do_stack_switch)
766 .long SYMBOL_NAME(do_set_callbacks)
767 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
768 .long SYMBOL_NAME(do_sched_op)
769 .long SYMBOL_NAME(do_dom0_op)
770 .long SYMBOL_NAME(do_set_debugreg)
771 .long SYMBOL_NAME(do_get_debugreg)
772 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
773 .long SYMBOL_NAME(do_set_fast_trap)
774 .long SYMBOL_NAME(do_dom_mem_op)
775 .long SYMBOL_NAME(do_multicall)
776 .long SYMBOL_NAME(do_update_va_mapping)
777 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
778 .long SYMBOL_NAME(do_event_channel_op)
779 .long SYMBOL_NAME(do_xen_version)
780 .long SYMBOL_NAME(do_console_io)
781 .long SYMBOL_NAME(do_physdev_op)
782 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
783 .long SYMBOL_NAME(do_vm_assist)
784 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
785 .long SYMBOL_NAME(do_switch_vm86)
786 .long SYMBOL_NAME(do_boot_vcpu)
787 .rept NR_hypercalls-((.-hypercall_table)/4)
788 .long SYMBOL_NAME(do_ni_hypercall)
789 .endr