ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 2533:7d618b439da4

bitkeeper revision 1.1159.87.1 (4153f0b7-PpG0IgvgIzYd9Wgt3zaMQ)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-nbsd
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xeno.bk-nbsd
author cl349@freefall.cl.cam.ac.uk
date Fri Sep 24 10:02:31 2004 +0000 (2004-09-24)
parents 7ed44d755dda c627fa2b0dc3
children 4d222bf4d125
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 */
8 /*
9 * The idea for callbacks to guest OSes
10 * ====================================
11 *
12 * First, we require that all callbacks (either via a supplied
13 * interrupt-descriptor-table, or via the special event or failsafe callbacks
14 * in the shared-info-structure) are to ring 1. This just makes life easier,
15 * in that it means we don't have to do messy GDT/LDT lookups to find
16 * out which the privilege-level of the return code-selector. That code
17 * would just be a hassle to write, and would need to account for running
18 * off the end of the GDT/LDT, for example. For all callbacks we check
19 * that the provided
20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
24 * than the correct ring) and bad things are bound to ensue -- IRET is
25 * likely to fault, and we may end up killing the domain (no harm can
26 * come to Xen, though).
27 *
28 * When doing a callback, we check if the return CS is in ring 0. If so,
29 * callback is delayed until next return to ring != 0.
30 * If return CS is in ring 1, then we create a callback frame
31 * starting at return SS/ESP. The base of the frame does an intra-privilege
32 * interrupt-return.
33 * If return CS is in ring > 1, we create a callback frame starting
34 * at SS/ESP taken from appropriate section of the current TSS. The base
35 * of the frame does an inter-privilege interrupt-return.
36 *
37 * Note that the "failsafe callback" uses a special stackframe:
38 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
39 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
40 * That is, original values for DS/ES/FS/GS are placed on stack rather than
41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
42 * saved/restored in guest OS. Furthermore, if we load them we may cause
43 * a fault if they are invalid, which is a hassle to deal with. We avoid
44 * that problem if we don't load them :-) This property allows us to use
45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
46 * on return to ring != 0, we can simply package it up as a return via
47 * the failsafe callback, and let the guest OS sort it out (perhaps by
48 * killing an application process). Note that we also do this for any
49 * faulting IRET -- just let the guest OS handle it via the event
50 * callback.
51 *
52 * We terminate a domain in the following cases:
53 * - creating a callback stack frame (due to bad ring-1 stack).
54 * - faulting IRET on entry to failsafe callback handler.
55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
56 * handler in good order (absolutely no faults allowed!).
57 */
59 #include <xen/config.h>
60 #include <xen/errno.h>
61 #include <xen/softirq.h>
62 #include <hypervisor-ifs/hypervisor-if.h>
64 EBX = 0x00
65 ECX = 0x04
66 EDX = 0x08
67 ESI = 0x0C
68 EDI = 0x10
69 EBP = 0x14
70 EAX = 0x18
71 DS = 0x1C
72 ES = 0x20
73 FS = 0x24
74 GS = 0x28
75 ORIG_EAX = 0x2C
76 EIP = 0x30
77 CS = 0x34
78 EFLAGS = 0x38
79 OLDESP = 0x3C
80 OLDSS = 0x40
82 /* Offsets in domain structure */
83 PROCESSOR = 0
84 SHARED_INFO = 4
85 EVENT_SEL = 8
86 EVENT_ADDR = 12
87 FAILSAFE_BUFFER = 16
88 FAILSAFE_SEL = 32
89 FAILSAFE_ADDR = 36
91 /* Offsets in shared_info_t */
92 #define UPCALL_PENDING /* 0 */
93 #define UPCALL_MASK 1
95 /* Offsets in guest_trap_bounce */
96 GTB_ERROR_CODE = 0
97 GTB_CR2 = 4
98 GTB_FLAGS = 8
99 GTB_CS = 10
100 GTB_EIP = 12
101 GTBF_TRAP = 1
102 GTBF_TRAP_NOCODE = 2
103 GTBF_TRAP_CR2 = 4
105 CF_MASK = 0x00000001
106 IF_MASK = 0x00000200
107 NT_MASK = 0x00004000
109 #define SAVE_ALL_NOSEGREGS \
110 cld; \
111 pushl %gs; \
112 pushl %fs; \
113 pushl %es; \
114 pushl %ds; \
115 pushl %eax; \
116 pushl %ebp; \
117 pushl %edi; \
118 pushl %esi; \
119 pushl %edx; \
120 pushl %ecx; \
121 pushl %ebx; \
123 #define SAVE_ALL \
124 SAVE_ALL_NOSEGREGS \
125 movl $(__HYPERVISOR_DS),%edx; \
126 movl %edx,%ds; \
127 movl %edx,%es; \
128 movl %edx,%fs; \
129 movl %edx,%gs; \
130 sti;
132 #define GET_CURRENT(reg) \
133 movl $4096-4, reg; \
134 orl %esp, reg; \
135 andl $~3,reg; \
136 movl (reg),reg;
138 ENTRY(continue_nonidle_task)
139 GET_CURRENT(%ebx)
140 jmp test_all_events
142 ALIGN
143 /*
144 * HYPERVISOR_multicall(call_list, nr_calls)
145 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'.
146 * This is fairly easy except that:
147 * 1. We may fault reading the call list, and must patch that up; and
148 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
149 * caller could cause our stack to blow up.
150 */
151 #define MULTICALL_ENTRY_ORDER 5
152 do_multicall:
153 popl %eax
154 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
155 je multicall_return_from_call
156 pushl %ebx
157 movl 4(%esp),%ebx /* EBX == call_list */
158 movl 8(%esp),%ecx /* ECX == nr_calls */
159 /* Ensure the entire multicall list is below HYPERVISOR_VIRT_START. */
160 movl %ecx,%eax
161 shll $MULTICALL_ENTRY_ORDER,%eax
162 addl %ebx,%eax /* EAX == end of multicall list */
163 jc bad_multicall_address
164 cmpl $__HYPERVISOR_VIRT_START,%eax
165 jnc bad_multicall_address
166 multicall_loop:
167 pushl %ecx
168 multicall_fault1:
169 pushl 20(%ebx) # args[4]
170 multicall_fault2:
171 pushl 16(%ebx) # args[3]
172 multicall_fault3:
173 pushl 12(%ebx) # args[2]
174 multicall_fault4:
175 pushl 8(%ebx) # args[1]
176 multicall_fault5:
177 pushl 4(%ebx) # args[0]
178 multicall_fault6:
179 movl (%ebx),%eax # op
180 andl $(NR_hypercalls-1),%eax
181 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
182 multicall_return_from_call:
183 multicall_fault7:
184 movl %eax,24(%ebx) # args[5] == result
185 addl $20,%esp
186 popl %ecx
187 addl $(1<<MULTICALL_ENTRY_ORDER),%ebx
188 loop multicall_loop
189 popl %ebx
190 xorl %eax,%eax
191 jmp ret_from_hypercall
193 bad_multicall_address:
194 popl %ebx
195 movl $-EFAULT,%eax
196 jmp ret_from_hypercall
198 .section __ex_table,"a"
199 .align 4
200 .long multicall_fault1, multicall_fixup1
201 .long multicall_fault2, multicall_fixup2
202 .long multicall_fault3, multicall_fixup3
203 .long multicall_fault4, multicall_fixup4
204 .long multicall_fault5, multicall_fixup5
205 .long multicall_fault6, multicall_fixup6
206 .long multicall_fault7, multicall_fixup6
207 .previous
209 .section .fixup,"ax"
210 multicall_fixup6:
211 addl $4,%esp
212 multicall_fixup5:
213 addl $4,%esp
214 multicall_fixup4:
215 addl $4,%esp
216 multicall_fixup3:
217 addl $4,%esp
218 multicall_fixup2:
219 addl $4,%esp
220 multicall_fixup1:
221 addl $4,%esp
222 popl %ebx
223 movl $-EFAULT,%eax
224 jmp ret_from_hypercall
225 .previous
227 ALIGN
228 restore_all_guest:
229 # First, may need to restore %ds if clobbered by create_bounce_frame
230 pushl %ss
231 popl %ds
232 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
233 leal DS(%esp),%esi
234 leal FAILSAFE_BUFFER(%ebx),%edi
235 movsl
236 movsl
237 movsl
238 movsl
239 # Finally, restore guest registers -- faults will cause failsafe
240 popl %ebx
241 popl %ecx
242 popl %edx
243 popl %esi
244 popl %edi
245 popl %ebp
246 popl %eax
247 1: popl %ds
248 2: popl %es
249 3: popl %fs
250 4: popl %gs
251 addl $4,%esp
252 5: iret
253 .section .fixup,"ax"
254 10: subl $4,%esp
255 pushl %gs
256 9: pushl %fs
257 8: pushl %es
258 7: pushl %ds
259 6: pushl %eax
260 pushl %ebp
261 pushl %edi
262 pushl %esi
263 pushl %edx
264 pushl %ecx
265 pushl %ebx
266 pushl %ss
267 popl %ds
268 pushl %ss
269 popl %es
270 jmp failsafe_callback
271 .previous
272 .section __ex_table,"a"
273 .align 4
274 .long 1b,6b
275 .long 2b,7b
276 .long 3b,8b
277 .long 4b,9b
278 .long 5b,10b
279 .previous
281 /* No special register assumptions */
282 failsafe_callback:
283 GET_CURRENT(%ebx)
284 movl PROCESSOR(%ebx),%eax
285 shl $4,%eax
286 lea guest_trap_bounce(%eax),%edx
287 movl FAILSAFE_ADDR(%ebx),%eax
288 movl %eax,GTB_EIP(%edx)
289 movl FAILSAFE_SEL(%ebx),%eax
290 movw %ax,GTB_CS(%edx)
291 call create_bounce_frame
292 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
293 leal FAILSAFE_BUFFER(%ebx),%ebp
294 movl 0(%ebp),%eax # DS
295 FAULT1: movl %eax,(%esi)
296 movl 4(%ebp),%eax # ES
297 FAULT2: movl %eax,4(%esi)
298 movl 8(%ebp),%eax # FS
299 FAULT3: movl %eax,8(%esi)
300 movl 12(%ebp),%eax # GS
301 FAULT4: movl %eax,12(%esi)
302 movl %esi,OLDESP(%esp)
303 popl %ebx
304 popl %ecx
305 popl %edx
306 popl %esi
307 popl %edi
308 popl %ebp
309 popl %eax
310 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
311 FAULT5: iret
314 ALIGN
315 # Simple restore -- we should never fault as we we will only interrupt ring 0
316 # when sane values have been placed in all registers. The only exception is
317 # NMI, which may interrupt before good values have been placed in DS-GS.
318 # The NMI return code deals with this problem itself.
319 restore_all_xen:
320 popl %ebx
321 popl %ecx
322 popl %edx
323 popl %esi
324 popl %edi
325 popl %ebp
326 popl %eax
327 popl %ds
328 popl %es
329 popl %fs
330 popl %gs
331 addl $4,%esp
332 iret
334 ALIGN
335 ENTRY(hypercall)
336 pushl %eax # save orig_eax
337 SAVE_ALL
338 GET_CURRENT(%ebx)
339 andl $(NR_hypercalls-1),%eax
340 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
342 ret_from_hypercall:
343 movl %eax,EAX(%esp) # save the return value
345 test_all_events:
346 xorl %ecx,%ecx
347 notl %ecx
348 cli # tests must not race interrupts
349 /*test_softirqs:*/
350 movl PROCESSOR(%ebx),%eax
351 shl $6,%eax # sizeof(irq_cpustat) == 64
352 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
353 jnz process_softirqs
354 /*test_guest_events:*/
355 movl SHARED_INFO(%ebx),%eax
356 testb $0xFF,UPCALL_MASK(%eax)
357 jnz restore_all_guest
358 testb $0xFF,UPCALL_PENDING(%eax)
359 jz restore_all_guest
360 movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery
361 /*process_guest_events:*/
362 movl PROCESSOR(%ebx),%edx
363 shl $4,%edx # sizeof(guest_trap_bounce) == 16
364 lea guest_trap_bounce(%edx),%edx
365 movl EVENT_ADDR(%ebx),%eax
366 movl %eax,GTB_EIP(%edx)
367 movl EVENT_SEL(%ebx),%eax
368 movw %ax,GTB_CS(%edx)
369 call create_bounce_frame
370 jmp restore_all_guest
372 ALIGN
373 process_softirqs:
374 sti
375 call SYMBOL_NAME(do_softirq)
376 jmp test_all_events
378 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
379 /* {EIP, CS, EFLAGS, [ESP, SS]} */
380 /* %edx == guest_trap_bounce, %ebx == task_struct */
381 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
382 create_bounce_frame:
383 mov CS+4(%esp),%cl
384 test $2,%cl
385 jz 1f /* jump if returning to an existing ring-1 activation */
386 /* obtain ss/esp from TSS -- no current ring-1 activations */
387 movl PROCESSOR(%ebx),%eax
388 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
389 movl %eax, %ecx
390 shll $7, %ecx
391 shll $13, %eax
392 addl %ecx,%eax
393 addl $init_tss + 12,%eax
394 movl (%eax),%esi /* tss->esp1 */
395 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
396 /* base of stack frame must contain ss/esp (inter-priv iret) */
397 subl $8,%esi
398 movl OLDESP+4(%esp),%eax
399 FAULT7: movl %eax,(%esi)
400 movl OLDSS+4(%esp),%eax
401 FAULT8: movl %eax,4(%esi)
402 jmp 2f
403 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
404 movl OLDESP+4(%esp),%esi
405 FAULT9: movl OLDSS+4(%esp),%ds
406 2: /* Construct a stack frame: EFLAGS, CS/EIP */
407 subl $12,%esi
408 movl EIP+4(%esp),%eax
409 FAULT10:movl %eax,(%esi)
410 movl CS+4(%esp),%eax
411 FAULT11:movl %eax,4(%esi)
412 movl EFLAGS+4(%esp),%eax
413 FAULT12:movl %eax,8(%esi)
414 /* Rewrite our stack frame and return to ring 1. */
415 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
416 andl $0xfffcbeff,%eax
417 movl %eax,EFLAGS+4(%esp)
418 movl %ds,OLDSS+4(%esp)
419 movl %esi,OLDESP+4(%esp)
420 movzwl %es:GTB_CS(%edx),%eax
421 movl %eax,CS+4(%esp)
422 movl %es:GTB_EIP(%edx),%eax
423 movl %eax,EIP+4(%esp)
424 ret
427 .section __ex_table,"a"
428 .align 4
429 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
430 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
431 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
432 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
433 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
434 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
435 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
436 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
437 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
438 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
439 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
440 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
441 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
442 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
443 .previous
445 # This handler kills domains which experience unrecoverable faults.
446 .section .fixup,"ax"
447 crash_domain_fixup1:
448 subl $4,%esp
449 SAVE_ALL
450 jmp domain_crash
451 crash_domain_fixup2:
452 addl $4,%esp
453 crash_domain_fixup3:
454 pushl %ss
455 popl %ds
456 jmp domain_crash
457 .previous
459 ALIGN
460 process_guest_exception_and_events:
461 movl PROCESSOR(%ebx),%eax
462 shl $4,%eax
463 lea guest_trap_bounce(%eax),%edx
464 testb $~0,GTB_FLAGS(%edx)
465 jz test_all_events
466 call create_bounce_frame # just the basic frame
467 mov %es:GTB_FLAGS(%edx),%cl
468 test $GTBF_TRAP_NOCODE,%cl
469 jnz 2f
470 subl $4,%esi # push error_code onto guest frame
471 movl %es:GTB_ERROR_CODE(%edx),%eax
472 FAULT13:movl %eax,(%esi)
473 test $GTBF_TRAP_CR2,%cl
474 jz 1f
475 subl $4,%esi # push %cr2 onto guest frame
476 movl %es:GTB_CR2(%edx),%eax
477 FAULT14:movl %eax,(%esi)
478 1: movl %esi,OLDESP(%esp)
479 2: push %es # unclobber %ds
480 pop %ds
481 movb $0,GTB_FLAGS(%edx)
482 jmp test_all_events
484 ALIGN
485 ENTRY(ret_from_intr)
486 GET_CURRENT(%ebx)
487 movb CS(%esp),%al
488 testb $3,%al # return to non-supervisor?
489 jne test_all_events
490 jmp restore_all_xen
492 ENTRY(divide_error)
493 pushl $0 # no error code
494 pushl $ SYMBOL_NAME(do_divide_error)
495 ALIGN
496 error_code:
497 pushl %fs
498 pushl %es
499 pushl %ds
500 pushl %eax
501 xorl %eax,%eax
502 pushl %ebp
503 pushl %edi
504 pushl %esi
505 pushl %edx
506 decl %eax # eax = -1
507 pushl %ecx
508 pushl %ebx
509 cld
510 movl %gs,%ecx
511 movl ORIG_EAX(%esp), %esi # get the error code
512 movl GS(%esp), %edi # get the function address
513 movl %eax, ORIG_EAX(%esp)
514 movl %ecx, GS(%esp)
515 movl $(__HYPERVISOR_DS),%edx
516 movl %edx,%ds
517 movl %edx,%es
518 movl %edx,%fs
519 movl %edx,%gs
520 movl %esp,%edx
521 pushl %esi # push the error code
522 pushl %edx # push the pt_regs pointer
523 GET_CURRENT(%ebx)
524 call *%edi
525 addl $8,%esp
526 movb CS(%esp),%al
527 testb $3,%al
528 je restore_all_xen
529 jmp process_guest_exception_and_events
531 ENTRY(coprocessor_error)
532 pushl $0
533 pushl $ SYMBOL_NAME(do_coprocessor_error)
534 jmp error_code
536 ENTRY(simd_coprocessor_error)
537 pushl $0
538 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
539 jmp error_code
541 ENTRY(device_not_available)
542 pushl $0
543 pushl $SYMBOL_NAME(math_state_restore)
544 jmp error_code
546 ENTRY(debug)
547 pushl $0
548 pushl $ SYMBOL_NAME(do_debug)
549 jmp error_code
551 ENTRY(int3)
552 pushl $0
553 pushl $ SYMBOL_NAME(do_int3)
554 jmp error_code
556 ENTRY(overflow)
557 pushl $0
558 pushl $ SYMBOL_NAME(do_overflow)
559 jmp error_code
561 ENTRY(bounds)
562 pushl $0
563 pushl $ SYMBOL_NAME(do_bounds)
564 jmp error_code
566 ENTRY(invalid_op)
567 pushl $0
568 pushl $ SYMBOL_NAME(do_invalid_op)
569 jmp error_code
571 ENTRY(coprocessor_segment_overrun)
572 pushl $0
573 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
574 jmp error_code
576 ENTRY(invalid_TSS)
577 pushl $ SYMBOL_NAME(do_invalid_TSS)
578 jmp error_code
580 ENTRY(segment_not_present)
581 pushl $ SYMBOL_NAME(do_segment_not_present)
582 jmp error_code
584 ENTRY(stack_segment)
585 pushl $ SYMBOL_NAME(do_stack_segment)
586 jmp error_code
588 ENTRY(general_protection)
589 pushl $ SYMBOL_NAME(do_general_protection)
590 jmp error_code
592 ENTRY(alignment_check)
593 pushl $ SYMBOL_NAME(do_alignment_check)
594 jmp error_code
596 ENTRY(page_fault)
597 pushl $ SYMBOL_NAME(do_page_fault)
598 jmp error_code
600 ENTRY(machine_check)
601 pushl $0
602 pushl $ SYMBOL_NAME(do_machine_check)
603 jmp error_code
605 ENTRY(spurious_interrupt_bug)
606 pushl $0
607 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
608 jmp error_code
610 ENTRY(nmi)
611 # Save state but do not trash the segment registers!
612 # We may otherwise be unable to reload them or copy them to ring 1.
613 pushl %eax
614 SAVE_ALL_NOSEGREGS
616 # Check for hardware problems.
617 inb $0x61,%al
618 testb $0x80,%al
619 jne nmi_parity_err
620 testb $0x40,%al
621 jne nmi_io_err
622 movl %eax,%ebx
624 # Okay, its almost a normal NMI tick. We can only process it if:
625 # A. We are the outermost Xen activation (in which case we have
626 # the selectors safely saved on our stack)
627 # B. DS-GS all contain sane Xen values.
628 # In all other cases we bail without touching DS-GS, as we have
629 # interrupted an enclosing Xen activation in tricky prologue or
630 # epilogue code.
631 movb CS(%esp),%al
632 testb $3,%al
633 jne do_watchdog_tick
634 movl DS(%esp),%eax
635 cmpw $(__HYPERVISOR_DS),%ax
636 jne nmi_badseg
637 movl ES(%esp),%eax
638 cmpw $(__HYPERVISOR_DS),%ax
639 jne nmi_badseg
640 movl FS(%esp),%eax
641 cmpw $(__HYPERVISOR_DS),%ax
642 jne nmi_badseg
643 movl GS(%esp),%eax
644 cmpw $(__HYPERVISOR_DS),%ax
645 jne nmi_badseg
647 do_watchdog_tick:
648 movl $(__HYPERVISOR_DS),%edx
649 movl %edx,%ds
650 movl %edx,%es
651 movl %esp,%edx
652 pushl %ebx # reason
653 pushl %edx # regs
654 call SYMBOL_NAME(do_nmi)
655 addl $8,%esp
656 movb CS(%esp),%al
657 testb $3,%al
658 je restore_all_xen
659 GET_CURRENT(%ebx)
660 jmp restore_all_guest
662 nmi_badseg:
663 popl %ebx
664 popl %ecx
665 popl %edx
666 popl %esi
667 popl %edi
668 popl %ebp
669 popl %eax
670 addl $20,%esp
671 iret
673 nmi_parity_err:
674 # Clear and disable the parity-error line
675 andb $0xf,%al
676 orb $0x4,%al
677 outb %al,$0x61
678 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
679 je nmi_badseg
680 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
681 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
682 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
683 je nmi_badseg
684 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
685 movl %edx,%ds
686 movl %edx,%es
687 movl %esp,%edx
688 push %edx
689 call SYMBOL_NAME(mem_parity_error)
690 addl $4,%esp
691 jmp ret_from_intr
693 nmi_io_err:
694 # Clear and disable the I/O-error line
695 andb $0xf,%al
696 orb $0x8,%al
697 outb %al,$0x61
698 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
699 je nmi_badseg
700 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
701 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
702 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
703 je nmi_badseg
704 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
705 movl %edx,%ds
706 movl %edx,%es
707 movl %esp,%edx
708 push %edx
709 call SYMBOL_NAME(io_check_error)
710 addl $4,%esp
711 jmp ret_from_intr
713 .data
714 ENTRY(hypercall_table)
715 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
716 .long SYMBOL_NAME(do_mmu_update)
717 .long SYMBOL_NAME(do_set_gdt)
718 .long SYMBOL_NAME(do_stack_switch)
719 .long SYMBOL_NAME(do_set_callbacks)
720 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
721 .long SYMBOL_NAME(do_sched_op)
722 .long SYMBOL_NAME(do_dom0_op)
723 .long SYMBOL_NAME(do_set_debugreg)
724 .long SYMBOL_NAME(do_get_debugreg)
725 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
726 .long SYMBOL_NAME(do_set_fast_trap)
727 .long SYMBOL_NAME(do_dom_mem_op)
728 .long SYMBOL_NAME(do_multicall)
729 .long SYMBOL_NAME(do_update_va_mapping)
730 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
731 .long SYMBOL_NAME(do_event_channel_op)
732 .long SYMBOL_NAME(do_xen_version)
733 .long SYMBOL_NAME(do_console_io)
734 .long SYMBOL_NAME(do_physdev_op)
735 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
736 .long SYMBOL_NAME(do_vm_assist)
737 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
738 .rept NR_hypercalls-((.-hypercall_table)/4)
739 .long SYMBOL_NAME(do_ni_hypercall)
740 .endr