ia64/xen-unstable

view xen/arch/i386/entry.S @ 1368:08dedb434eb5

bitkeeper revision 1.891.4.1 (40a23669Ti0XmdPlXKEjdxaFGPM05Q)

Macro messup. Caused interrupts to be off during hypercalls.
author kaf24@scramble.cl.cam.ac.uk
date Wed May 12 14:36:25 2004 +0000 (2004-05-12)
parents a2abb67d5518
children a7062958deee
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * Stack layout in 'ret_from_system_call':
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - %gs
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
30 *
31 * "current" is in register %ebx during any slow entries.
32 */
33 /* The idea for callbacks from monitor -> guest OS.
34 *
35 * First, we require that all callbacks (either via a supplied
36 * interrupt-descriptor-table, or via the special event or failsafe callbacks
37 * in the shared-info-structure) are to ring 1. This just makes life easier,
38 * in that it means we don't have to do messy GDT/LDT lookups to find
39 * out which the privilege-level of the return code-selector. That code
40 * would just be a hassle to write, and would need to account for running
41 * off the end of the GDT/LDT, for example. For all callbacks we check
42 * that the provided
43 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
44 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
45 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
46 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
47 * than the correct ring) and bad things are bound to ensue -- IRET is
48 * likely to fault, and we may end up killing the domain (no harm can
49 * come to the hypervisor itself, though).
50 *
51 * When doing a callback, we check if the return CS is in ring 0. If so,
52 * callback is delayed until next return to ring != 0.
53 * If return CS is in ring 1, then we create a callback frame
54 * starting at return SS/ESP. The base of the frame does an intra-privilege
55 * interrupt-return.
56 * If return CS is in ring > 1, we create a callback frame starting
57 * at SS/ESP taken from appropriate section of the current TSS. The base
58 * of the frame does an inter-privilege interrupt-return.
59 *
60 * Note that the "failsafe callback" uses a special stackframe:
61 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
62 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
63 * That is, original values for DS/ES/FS/GS are placed on stack rather than
64 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
65 * saved/restored in guest OS. Furthermore, if we load them we may cause
66 * a fault if they are invalid, which is a hassle to deal with. We avoid
67 * that problem if we don't load them :-) This property allows us to use
68 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
69 * on return to ring != 0, we can simply package it up as a return via
70 * the failsafe callback, and let the guest OS sort it out (perhaps by
71 * killing an application process). Note that we also do this for any
72 * faulting IRET -- just let the guest OS handle it via the event
73 * callback.
74 *
75 * We terminate a domain in the following cases:
76 * - creating a callback stack frame (due to bad ring-1 stack).
77 * - faulting IRET on entry to failsafe callback handler.
78 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
79 * handler in good order (absolutely no faults allowed!).
80 */
82 #include <xen/config.h>
83 #include <xen/errno.h>
84 #include <hypervisor-ifs/hypervisor-if.h>
86 EBX = 0x00
87 ECX = 0x04
88 EDX = 0x08
89 ESI = 0x0C
90 EDI = 0x10
91 EBP = 0x14
92 EAX = 0x18
93 DS = 0x1C
94 ES = 0x20
95 FS = 0x24
96 GS = 0x28
97 ORIG_EAX = 0x2C
98 EIP = 0x30
99 CS = 0x34
100 EFLAGS = 0x38
101 OLDESP = 0x3C
102 OLDSS = 0x40
104 /* Offsets in task_struct */
105 PROCESSOR = 0
106 HYP_EVENTS = 2
107 SHARED_INFO = 4
108 EVENT_SEL = 8
109 EVENT_ADDR = 12
110 FAILSAFE_BUFFER = 16
111 FAILSAFE_SEL = 32
112 FAILSAFE_ADDR = 36
114 /* Offsets in shared_info_t */
115 #define UPCALL_PENDING /* 0 */
116 #define UPCALL_MASK 1
118 /* Offsets in guest_trap_bounce */
119 GTB_ERROR_CODE = 0
120 GTB_CR2 = 4
121 GTB_FLAGS = 8
122 GTB_CS = 10
123 GTB_EIP = 12
124 GTBF_TRAP = 1
125 GTBF_TRAP_NOCODE = 2
126 GTBF_TRAP_CR2 = 4
128 CF_MASK = 0x00000001
129 IF_MASK = 0x00000200
130 NT_MASK = 0x00004000
134 #define SAVE_ALL_NOSEGREGS \
135 cld; \
136 pushl %gs; \
137 pushl %fs; \
138 pushl %es; \
139 pushl %ds; \
140 pushl %eax; \
141 pushl %ebp; \
142 pushl %edi; \
143 pushl %esi; \
144 pushl %edx; \
145 pushl %ecx; \
146 pushl %ebx; \
148 #define SAVE_ALL \
149 SAVE_ALL_NOSEGREGS \
150 movl $(__HYPERVISOR_DS),%edx; \
151 movl %edx,%ds; \
152 movl %edx,%es; \
153 movl %edx,%fs; \
154 movl %edx,%gs; \
155 sti;
157 #define GET_CURRENT(reg) \
158 movl $4096-4, reg; \
159 orl %esp, reg; \
160 andl $~3,reg; \
161 movl (reg),reg;
163 ENTRY(continue_nonidle_task)
164 GET_CURRENT(%ebx)
165 jmp test_all_events
167 ALIGN
168 /*
169 * HYPERVISOR_multicall(call_list, nr_calls)
170 * Execute a list of 'nr_calls' system calls, pointed at by 'call_list'.
171 * This is fairly easy except that:
172 * 1. We may fault reading the call list, and must patch that up; and
173 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
174 * caller could cause our stack to blow up.
175 */
176 do_multicall:
177 popl %eax
178 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
179 je multicall_return_from_call
180 pushl %ebx
181 movl 4(%esp),%ebx /* EBX == call_list */
182 movl 8(%esp),%ecx /* ECX == nr_calls */
183 multicall_loop:
184 pushl %ecx
185 multicall_fault1:
186 pushl 20(%ebx)
187 multicall_fault2:
188 pushl 16(%ebx)
189 multicall_fault3:
190 pushl 12(%ebx)
191 multicall_fault4:
192 pushl 8(%ebx)
193 multicall_fault5:
194 pushl 4(%ebx)
195 multicall_fault6:
196 movl (%ebx),%eax
197 andl $255,%eax
198 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
199 multicall_return_from_call:
200 addl $20,%esp
201 popl %ecx
202 addl $(ARGS_PER_MULTICALL_ENTRY*4),%ebx
203 loop multicall_loop
204 popl %ebx
205 xorl %eax,%eax
206 jmp ret_from_hypervisor_call
208 .section __ex_table,"a"
209 .align 4
210 .long multicall_fault1, multicall_fixup1
211 .long multicall_fault2, multicall_fixup2
212 .long multicall_fault3, multicall_fixup3
213 .long multicall_fault4, multicall_fixup4
214 .long multicall_fault5, multicall_fixup5
215 .long multicall_fault6, multicall_fixup6
216 .previous
218 .section .fixup,"ax"
219 multicall_fixup6:
220 addl $4,%esp
221 multicall_fixup5:
222 addl $4,%esp
223 multicall_fixup4:
224 addl $4,%esp
225 multicall_fixup3:
226 addl $4,%esp
227 multicall_fixup2:
228 addl $4,%esp
229 multicall_fixup1:
230 addl $4,%esp
231 popl %ebx
232 movl $-EFAULT,%eax
233 jmp ret_from_hypervisor_call
234 .previous
236 ALIGN
237 restore_all_guest:
238 # First, may need to restore %ds if clobbered by create_bounce_frame
239 pushl %ss
240 popl %ds
241 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
242 leal DS(%esp),%esi
243 leal FAILSAFE_BUFFER(%ebx),%edi
244 movsl
245 movsl
246 movsl
247 movsl
248 # Finally, restore guest registers -- faults will cause failsafe
249 popl %ebx
250 popl %ecx
251 popl %edx
252 popl %esi
253 popl %edi
254 popl %ebp
255 popl %eax
256 1: popl %ds
257 2: popl %es
258 3: popl %fs
259 4: popl %gs
260 addl $4,%esp
261 5: iret
262 .section .fixup,"ax"
263 10: subl $4,%esp
264 pushl %gs
265 9: pushl %fs
266 8: pushl %es
267 7: pushl %ds
268 6: pushl %eax
269 pushl %ebp
270 pushl %edi
271 pushl %esi
272 pushl %edx
273 pushl %ecx
274 pushl %ebx
275 pushl %ss
276 popl %ds
277 pushl %ss
278 popl %es
279 jmp failsafe_callback
280 .previous
281 .section __ex_table,"a"
282 .align 4
283 .long 1b,6b
284 .long 2b,7b
285 .long 3b,8b
286 .long 4b,9b
287 .long 5b,10b
288 .previous
290 /* No special register assumptions */
291 failsafe_callback:
292 GET_CURRENT(%ebx)
293 movzwl PROCESSOR(%ebx),%eax
294 shl $4,%eax
295 lea guest_trap_bounce(%eax),%edx
296 movl FAILSAFE_ADDR(%ebx),%eax
297 movl %eax,GTB_EIP(%edx)
298 movl FAILSAFE_SEL(%ebx),%eax
299 movw %ax,GTB_CS(%edx)
300 call create_bounce_frame
301 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
302 leal FAILSAFE_BUFFER(%ebx),%ebp
303 movl 0(%ebp),%eax # DS
304 FAULT1: movl %eax,(%esi)
305 movl 4(%ebp),%eax # ES
306 FAULT2: movl %eax,4(%esi)
307 movl 8(%ebp),%eax # FS
308 FAULT3: movl %eax,8(%esi)
309 movl 12(%ebp),%eax # GS
310 FAULT4: movl %eax,12(%esi)
311 movl %esi,OLDESP(%esp)
312 popl %ebx
313 popl %ecx
314 popl %edx
315 popl %esi
316 popl %edi
317 popl %ebp
318 popl %eax
319 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
320 FAULT5: iret
323 ALIGN
324 # Simple restore -- we should never fault as we we will only interrupt ring 0
325 # when sane values have been placed in all registers. The only exception is
326 # NMI, which may interrupt before good values have been placed in DS-GS.
327 # The NMI return code deals with this problem itself.
328 restore_all_xen:
329 popl %ebx
330 popl %ecx
331 popl %edx
332 popl %esi
333 popl %edi
334 popl %ebp
335 popl %eax
336 popl %ds
337 popl %es
338 popl %fs
339 popl %gs
340 addl $4,%esp
341 iret
343 ALIGN
344 ENTRY(hypervisor_call)
345 pushl %eax # save orig_eax
346 SAVE_ALL
347 GET_CURRENT(%ebx)
348 andl $255,%eax
349 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
351 ret_from_hypervisor_call:
352 movl %eax,EAX(%esp) # save the return value
354 test_all_events:
355 xorl %ecx,%ecx
356 notl %ecx
357 cli # tests must not race interrupts
358 /*test_softirqs:*/
359 movzwl PROCESSOR(%ebx),%eax
360 shl $6,%eax # sizeof(irq_cpustat) == 64
361 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
362 jnz process_softirqs
363 /*test_hyp_events:*/
364 testw %cx, HYP_EVENTS(%ebx)
365 jnz process_hyp_events
366 /*test_guest_events:*/
367 movl SHARED_INFO(%ebx),%eax
368 testb $0xFF,UPCALL_MASK(%eax)
369 jnz restore_all_guest
370 testb $0xFF,UPCALL_PENDING(%eax)
371 jz restore_all_guest
372 movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery
373 /*process_guest_events:*/
374 movzwl PROCESSOR(%ebx),%edx
375 shl $4,%edx # sizeof(guest_trap_bounce) == 16
376 lea guest_trap_bounce(%edx),%edx
377 movl EVENT_ADDR(%ebx),%eax
378 movl %eax,GTB_EIP(%edx)
379 movl EVENT_SEL(%ebx),%eax
380 movw %ax,GTB_CS(%edx)
381 call create_bounce_frame
382 jmp restore_all_guest
384 ALIGN
385 process_softirqs:
386 sti
387 call SYMBOL_NAME(do_softirq)
388 jmp test_all_events
390 ALIGN
391 process_hyp_events:
392 sti
393 call SYMBOL_NAME(do_hyp_events)
394 jmp test_all_events
396 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
397 /* {EIP, CS, EFLAGS, [ESP, SS]} */
398 /* %edx == guest_trap_bounce, %ebx == task_struct */
399 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
400 create_bounce_frame:
401 mov CS+4(%esp),%cl
402 test $2,%cl
403 jz 1f /* jump if returning to an existing ring-1 activation */
404 /* obtain ss/esp from TSS -- no current ring-1 activations */
405 movzwl PROCESSOR(%ebx),%eax
406 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
407 movl %eax, %ecx
408 shll $7, %ecx
409 shll $13, %eax
410 addl %ecx,%eax
411 addl $init_tss + 12,%eax
412 movl (%eax),%esi /* tss->esp1 */
413 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
414 /* base of stack frame must contain ss/esp (inter-priv iret) */
415 subl $8,%esi
416 movl OLDESP+4(%esp),%eax
417 FAULT7: movl %eax,(%esi)
418 movl OLDSS+4(%esp),%eax
419 FAULT8: movl %eax,4(%esi)
420 jmp 2f
421 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
422 movl OLDESP+4(%esp),%esi
423 FAULT9: movl OLDSS+4(%esp),%ds
424 2: /* Construct a stack frame: EFLAGS, CS/EIP */
425 subl $12,%esi
426 movl EIP+4(%esp),%eax
427 FAULT10:movl %eax,(%esi)
428 movl CS+4(%esp),%eax
429 FAULT11:movl %eax,4(%esi)
430 movl EFLAGS+4(%esp),%eax
431 FAULT12:movl %eax,8(%esi)
432 /* Rewrite our stack frame and return to ring 1. */
433 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
434 andl $0xfffcbeff,%eax
435 movl %eax,EFLAGS+4(%esp)
436 movl %ds,OLDSS+4(%esp)
437 movl %esi,OLDESP+4(%esp)
438 movzwl %es:GTB_CS(%edx),%eax
439 movl %eax,CS+4(%esp)
440 movl %es:GTB_EIP(%edx),%eax
441 movl %eax,EIP+4(%esp)
442 ret
445 .section __ex_table,"a"
446 .align 4
447 .long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
448 .long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
449 .long FAULT3, kill_domain_fixup3 # Fault writing to ring-1 stack
450 .long FAULT4, kill_domain_fixup3 # Fault writing to ring-1 stack
451 .long FAULT5, kill_domain_fixup1 # Fault executing failsafe iret
452 .long FAULT6, kill_domain_fixup2 # Fault loading ring-1 stack selector
453 .long FAULT7, kill_domain_fixup2 # Fault writing to ring-1 stack
454 .long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
455 .long FAULT9, kill_domain_fixup2 # Fault loading ring-1 stack selector
456 .long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
457 .long FAULT11,kill_domain_fixup2 # Fault writing to ring-1 stack
458 .long FAULT12,kill_domain_fixup2 # Fault writing to ring-1 stack
459 .long FAULT13,kill_domain_fixup3 # Fault writing to ring-1 stack
460 .long FAULT14,kill_domain_fixup3 # Fault writing to ring-1 stack
461 .previous
463 # This handler kills domains which experience unrecoverable faults.
464 .section .fixup,"ax"
465 kill_domain_fixup1:
466 subl $4,%esp
467 SAVE_ALL
468 jmp kill_domain
469 kill_domain_fixup2:
470 addl $4,%esp
471 kill_domain_fixup3:
472 pushl %ss
473 popl %ds
474 jmp kill_domain
475 .previous
477 ALIGN
478 process_guest_exception_and_events:
479 movzwl PROCESSOR(%ebx),%eax
480 shl $4,%eax
481 lea guest_trap_bounce(%eax),%edx
482 testb $~0,GTB_FLAGS(%edx)
483 jz test_all_events
484 call create_bounce_frame # just the basic frame
485 mov %es:GTB_FLAGS(%edx),%cl
486 test $GTBF_TRAP_NOCODE,%cl
487 jnz 2f
488 subl $4,%esi # push error_code onto guest frame
489 movl %es:GTB_ERROR_CODE(%edx),%eax
490 FAULT13:movl %eax,(%esi)
491 test $GTBF_TRAP_CR2,%cl
492 jz 1f
493 subl $4,%esi # push %cr2 onto guest frame
494 movl %es:GTB_CR2(%edx),%eax
495 FAULT14:movl %eax,(%esi)
496 1: movl %esi,OLDESP(%esp)
497 2: push %es # unclobber %ds
498 pop %ds
499 movb $0,GTB_FLAGS(%edx)
500 jmp test_all_events
502 ALIGN
503 ENTRY(ret_from_intr)
504 GET_CURRENT(%ebx)
505 movb CS(%esp),%al
506 testb $3,%al # return to non-supervisor?
507 jne test_all_events
508 jmp restore_all_xen
510 ENTRY(divide_error)
511 pushl $0 # no error code
512 pushl $ SYMBOL_NAME(do_divide_error)
513 ALIGN
514 error_code:
515 pushl %fs
516 pushl %es
517 pushl %ds
518 pushl %eax
519 xorl %eax,%eax
520 pushl %ebp
521 pushl %edi
522 pushl %esi
523 pushl %edx
524 decl %eax # eax = -1
525 pushl %ecx
526 pushl %ebx
527 cld
528 movl %gs,%ecx
529 movl ORIG_EAX(%esp), %esi # get the error code
530 movl GS(%esp), %edi # get the function address
531 movl %eax, ORIG_EAX(%esp)
532 movl %ecx, GS(%esp)
533 movl $(__HYPERVISOR_DS),%edx
534 movl %edx,%ds
535 movl %edx,%es
536 movl %edx,%fs
537 movl %edx,%gs
538 movl EFLAGS(%esp),%edx
539 testl $0x200,%edx # Is IF asserted in saved EFLAGS?
540 jz 1f # Don't STI if it isn't.
541 sti
542 1: movl %esp,%edx
543 pushl %esi # push the error code
544 pushl %edx # push the pt_regs pointer
545 GET_CURRENT(%ebx)
546 call *%edi
547 addl $8,%esp
548 movb CS(%esp),%al
549 testb $3,%al
550 je restore_all_xen
551 jmp process_guest_exception_and_events
553 ENTRY(coprocessor_error)
554 pushl $0
555 pushl $ SYMBOL_NAME(do_coprocessor_error)
556 jmp error_code
558 ENTRY(simd_coprocessor_error)
559 pushl $0
560 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
561 jmp error_code
563 ENTRY(device_not_available)
564 pushl $0
565 pushl $SYMBOL_NAME(math_state_restore)
566 jmp error_code
568 ENTRY(debug)
569 pushl $0
570 pushl $ SYMBOL_NAME(do_debug)
571 jmp error_code
573 ENTRY(int3)
574 pushl $0
575 pushl $ SYMBOL_NAME(do_int3)
576 jmp error_code
578 ENTRY(overflow)
579 pushl $0
580 pushl $ SYMBOL_NAME(do_overflow)
581 jmp error_code
583 ENTRY(bounds)
584 pushl $0
585 pushl $ SYMBOL_NAME(do_bounds)
586 jmp error_code
588 ENTRY(invalid_op)
589 pushl $0
590 pushl $ SYMBOL_NAME(do_invalid_op)
591 jmp error_code
593 ENTRY(coprocessor_segment_overrun)
594 pushl $0
595 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
596 jmp error_code
598 ENTRY(invalid_TSS)
599 pushl $ SYMBOL_NAME(do_invalid_TSS)
600 jmp error_code
602 ENTRY(segment_not_present)
603 pushl $ SYMBOL_NAME(do_segment_not_present)
604 jmp error_code
606 ENTRY(stack_segment)
607 pushl $ SYMBOL_NAME(do_stack_segment)
608 jmp error_code
610 ENTRY(general_protection)
611 pushl $ SYMBOL_NAME(do_general_protection)
612 jmp error_code
614 ENTRY(alignment_check)
615 pushl $ SYMBOL_NAME(do_alignment_check)
616 jmp error_code
618 ENTRY(page_fault)
619 pushl $ SYMBOL_NAME(do_page_fault)
620 jmp error_code
622 ENTRY(machine_check)
623 pushl $0
624 pushl $ SYMBOL_NAME(do_machine_check)
625 jmp error_code
627 ENTRY(spurious_interrupt_bug)
628 pushl $0
629 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
630 jmp error_code
632 ENTRY(nmi)
633 # Save state but do not trash the segment registers!
634 # We may otherwise be unable to reload them or copy them to ring 1.
635 pushl %eax
636 SAVE_ALL_NOSEGREGS
638 # Check for hardware problems. These are always fatal so we can
639 # reload DS and ES when handling them.
640 inb $0x61,%al
641 testb $0x80,%al
642 jne nmi_parity_err
643 testb $0x40,%al
644 jne nmi_io_err
645 movl %eax,%ebx
647 # Okay, its almost a normal NMI tick. We can only process it if:
648 # A. We are the outermost Xen activation (in which case we have
649 # the selectors safely saved on our stack)
650 # B. DS-GS all contain sane Xen values.
651 # In all other cases we bail without touching DS-GS, as we have
652 # interrupted an enclosing Xen activation in tricky prologue or
653 # epilogue code.
654 movb CS(%esp),%al
655 testb $3,%al
656 jne do_watchdog_tick
657 movl DS(%esp),%eax
658 cmpw $(__HYPERVISOR_DS),%ax
659 jne nmi_badseg
660 movl ES(%esp),%eax
661 cmpw $(__HYPERVISOR_DS),%ax
662 jne nmi_badseg
663 movl FS(%esp),%eax
664 cmpw $(__HYPERVISOR_DS),%ax
665 jne nmi_badseg
666 movl GS(%esp),%eax
667 cmpw $(__HYPERVISOR_DS),%ax
668 jne nmi_badseg
670 do_watchdog_tick:
671 movl $(__HYPERVISOR_DS),%edx
672 movl %edx,%ds
673 movl %edx,%es
674 movl %esp,%edx
675 pushl %ebx # reason
676 pushl %edx # regs
677 call SYMBOL_NAME(do_nmi)
678 addl $8,%esp
679 movb CS(%esp),%al
680 testb $3,%al
681 je restore_all_xen
682 GET_CURRENT(%ebx)
683 jmp restore_all_guest
685 nmi_badseg:
686 popl %ebx
687 popl %ecx
688 popl %edx
689 popl %esi
690 popl %edi
691 popl %ebp
692 popl %eax
693 addl $20,%esp
694 iret
696 nmi_parity_err:
697 movl $(__HYPERVISOR_DS),%edx
698 movl %edx,%ds
699 movl %edx,%es
700 jmp SYMBOL_NAME(mem_parity_error)
702 nmi_io_err:
703 movl $(__HYPERVISOR_DS),%edx
704 movl %edx,%ds
705 movl %edx,%es
706 jmp SYMBOL_NAME(io_check_error)
708 .data
709 ENTRY(hypervisor_call_table)
710 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
711 .long SYMBOL_NAME(do_mmu_update)
712 .long SYMBOL_NAME(do_console_write)
713 .long SYMBOL_NAME(do_set_gdt)
714 .long SYMBOL_NAME(do_stack_switch)
715 .long SYMBOL_NAME(do_set_callbacks) /* 5 */
716 #ifndef NO_DEVICES_IN_XEN
717 .long SYMBOL_NAME(do_net_io_op)
718 #else
719 .long SYMBOL_NAME(do_ni_syscall)
720 #endif
721 .long SYMBOL_NAME(do_fpu_taskswitch)
722 .long SYMBOL_NAME(do_sched_op)
723 .long SYMBOL_NAME(do_dom0_op)
724 #ifndef NO_DEVICES_IN_XEN
725 .long SYMBOL_NAME(do_network_op) /* 10 */
726 .long SYMBOL_NAME(do_block_io_op)
727 #else
728 .long SYMBOL_NAME(do_ni_syscall) /* 10 */
729 .long SYMBOL_NAME(do_ni_syscall)
730 #endif
731 .long SYMBOL_NAME(do_set_debugreg)
732 .long SYMBOL_NAME(do_get_debugreg)
733 .long SYMBOL_NAME(do_update_descriptor)
734 .long SYMBOL_NAME(do_set_fast_trap) /* 15 */
735 .long SYMBOL_NAME(do_dom_mem_op)
736 .long SYMBOL_NAME(do_multicall)
737 .long SYMBOL_NAME(do_kbd_op)
738 .long SYMBOL_NAME(do_update_va_mapping)
739 .long SYMBOL_NAME(do_set_timer_op) /* 20 */
740 .long SYMBOL_NAME(do_event_channel_op)
741 .long SYMBOL_NAME(do_xen_version)
742 .long SYMBOL_NAME(do_console_io)
743 .long SYMBOL_NAME(do_physdev_op)
744 .rept NR_syscalls-((.-hypervisor_call_table)/4)
745 .long SYMBOL_NAME(do_ni_syscall)
746 .endr