ia64/xen-unstable

view xen/arch/i386/entry.S @ 877:d1833d5b387b

bitkeeper revision 1.546.1.1 (3fa3e1b4UwJQtnD-lZcvMsbqR-XhSA)

sched hypercall unification -- tidying things up in
anticipation of suspend/resume
author akw27@labyrinth.cl.cam.ac.uk
date Sat Nov 01 16:39:16 2003 +0000 (2003-11-01)
parents 64754ee21add
children 61c3759bc7be
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * Stack layout in 'ret_from_system_call':
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - %gs
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
30 *
31 * "current" is in register %ebx during any slow entries.
32 */
33 /* The idea for callbacks from monitor -> guest OS.
34 *
35 * First, we require that all callbacks (either via a supplied
36 * interrupt-descriptor-table, or via the special event or failsafe callbacks
37 * in the shared-info-structure) are to ring 1. This just makes life easier,
38 * in that it means we don't have to do messy GDT/LDT lookups to find
39 * out which the privilege-level of the return code-selector. That code
40 * would just be a hassle to write, and would need to account for running
41 * off the end of the GDT/LDT, for example. For all callbacks we check
42 * that the provided
43 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
44 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
45 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
46 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
47 * than the correct ring) and bad things are bound to ensue -- IRET is
48 * likely to fault, and we may end up killing the domain (no harm can
49 * come to the hypervisor itself, though).
50 *
51 * When doing a callback, we check if the return CS is in ring 0. If so,
52 * callback is delayed until next return to ring != 0.
53 * If return CS is in ring 1, then we create a callback frame
54 * starting at return SS/ESP. The base of the frame does an intra-privilege
55 * interrupt-return.
56 * If return CS is in ring > 1, we create a callback frame starting
57 * at SS/ESP taken from appropriate section of the current TSS. The base
58 * of the frame does an inter-privilege interrupt-return.
59 *
60 * Note that the "failsafe callback" uses a special stackframe:
61 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
62 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
63 * That is, original values for DS/ES/FS/GS are placed on stack rather than
64 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
65 * saved/restored in guest OS. Furthermore, if we load them we may cause
66 * a fault if they are invalid, which is a hassle to deal with. We avoid
67 * that problem if we don't load them :-) This property allows us to use
68 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
69 * on return to ring != 0, we can simply package it up as a return via
70 * the failsafe callback, and let the guest OS sort it out (perhaps by
71 * killing an application process). Note that we also do this for any
72 * faulting IRET -- just let the guest OS handle it via the event
73 * callback.
74 *
75 * We terminate a domain in the following cases:
76 * - creating a callback stack frame (due to bad ring-1 stack).
77 * - faulting IRET on entry to failsafe callback handler.
78 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
79 * handler in good order (absolutely no faults allowed!).
80 */
82 #include <xeno/config.h>
83 #include <xeno/errno.h>
84 #include <hypervisor-ifs/hypervisor-if.h>
85 #include <asm/smp.h>
87 EBX = 0x00
88 ECX = 0x04
89 EDX = 0x08
90 ESI = 0x0C
91 EDI = 0x10
92 EBP = 0x14
93 EAX = 0x18
94 DS = 0x1C
95 ES = 0x20
96 FS = 0x24
97 GS = 0x28
98 ORIG_EAX = 0x2C
99 EIP = 0x30
100 CS = 0x34
101 EFLAGS = 0x38
102 OLDESP = 0x3C
103 OLDSS = 0x40
105 /* Offsets in task_struct */
106 PROCESSOR = 0
107 HYP_EVENTS = 2
108 SHARED_INFO = 4
109 EVENT_SEL = 8
110 EVENT_ADDR = 12
111 FAILSAFE_BUFFER = 16
112 FAILSAFE_SEL = 32
113 FAILSAFE_ADDR = 36
115 /* Offsets in shared_info_t */
116 EVENTS = 0
117 EVENTS_MASK = 4
119 /* Offsets in guest_trap_bounce */
120 GTB_ERROR_CODE = 0
121 GTB_CR2 = 4
122 GTB_FLAGS = 8
123 GTB_CS = 10
124 GTB_EIP = 12
125 GTBF_TRAP = 1
126 GTBF_TRAP_NOCODE = 2
127 GTBF_TRAP_CR2 = 4
129 CF_MASK = 0x00000001
130 IF_MASK = 0x00000200
131 NT_MASK = 0x00004000
135 #define SAVE_ALL_NOSEGREGS \
136 cld; \
137 pushl %gs; \
138 pushl %fs; \
139 pushl %es; \
140 pushl %ds; \
141 pushl %eax; \
142 pushl %ebp; \
143 pushl %edi; \
144 pushl %esi; \
145 pushl %edx; \
146 pushl %ecx; \
147 pushl %ebx; \
149 #define SAVE_ALL_NOSTI \
150 SAVE_ALL_NOSEGREGS \
151 movl $(__HYPERVISOR_DS),%edx; \
152 movl %edx,%ds; \
153 movl %edx,%es; \
154 movl %edx,%fs; \
155 movl %edx,%gs;
157 #define SAVE_ALL \
158 SAVE_ALL_NOSTI \
159 sti;
161 #define GET_CURRENT(reg) \
162 movl $4096-4, reg; \
163 orl %esp, reg; \
164 andl $~3,reg; \
165 movl (reg),reg;
167 ENTRY(continue_nonidle_task)
168 GET_CURRENT(%ebx)
169 jmp test_all_events
171 ALIGN
172 /*
173 * HYPERVISOR_multicall(call_list, nr_calls)
174 * Execute a list of 'nr_calls' system calls, pointed at by 'call_list'.
175 * This is fairly easy except that:
176 * 1. We may fault reading the call list, and must patch that up; and
177 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
178 * caller could cause our stack to blow up.
179 */
180 do_multicall:
181 popl %eax
182 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
183 je multicall_return_from_call
184 pushl %ebx
185 movl 4(%esp),%ebx /* EBX == call_list */
186 movl 8(%esp),%ecx /* ECX == nr_calls */
187 multicall_loop:
188 pushl %ecx
189 multicall_fault1:
190 pushl 20(%ebx)
191 multicall_fault2:
192 pushl 16(%ebx)
193 multicall_fault3:
194 pushl 12(%ebx)
195 multicall_fault4:
196 pushl 8(%ebx)
197 multicall_fault5:
198 pushl 4(%ebx)
199 multicall_fault6:
200 movl (%ebx),%eax
201 andl $255,%eax
202 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
203 multicall_return_from_call:
204 addl $20,%esp
205 popl %ecx
206 addl $BYTES_PER_MULTICALL_ENTRY,%ebx
207 loop multicall_loop
208 popl %ebx
209 xorl %eax,%eax
210 jmp ret_from_hypervisor_call
212 .section __ex_table,"a"
213 .align 4
214 .long multicall_fault1, multicall_fixup1
215 .long multicall_fault2, multicall_fixup2
216 .long multicall_fault3, multicall_fixup3
217 .long multicall_fault4, multicall_fixup4
218 .long multicall_fault5, multicall_fixup5
219 .long multicall_fault6, multicall_fixup6
220 .previous
222 .section .fixup,"ax"
223 multicall_fixup6:
224 addl $4,%esp
225 multicall_fixup5:
226 addl $4,%esp
227 multicall_fixup4:
228 addl $4,%esp
229 multicall_fixup3:
230 addl $4,%esp
231 multicall_fixup2:
232 addl $4,%esp
233 multicall_fixup1:
234 addl $4,%esp
235 popl %ebx
236 movl $-EFAULT,%eax
237 jmp ret_from_hypervisor_call
238 .previous
240 ALIGN
241 restore_all_guest:
242 # First, may need to restore %ds if clobbered by create_bounce_frame
243 pushl %ss
244 popl %ds
245 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
246 leal DS(%esp),%esi
247 leal FAILSAFE_BUFFER(%ebx),%edi
248 movsl
249 movsl
250 movsl
251 movsl
252 # Finally, restore guest registers -- faults will cause failsafe
253 popl %ebx
254 popl %ecx
255 popl %edx
256 popl %esi
257 popl %edi
258 popl %ebp
259 popl %eax
260 1: popl %ds
261 2: popl %es
262 3: popl %fs
263 4: popl %gs
264 addl $4,%esp
265 5: iret
266 .section .fixup,"ax"
267 10: subl $4,%esp
268 pushl %gs
269 9: pushl %fs
270 8: pushl %es
271 7: pushl %ds
272 6: pushl %eax
273 pushl %ebp
274 pushl %edi
275 pushl %esi
276 pushl %edx
277 pushl %ecx
278 pushl %ebx
279 pushl %ss
280 popl %ds
281 pushl %ss
282 popl %es
283 jmp failsafe_callback
284 .previous
285 .section __ex_table,"a"
286 .align 4
287 .long 1b,6b
288 .long 2b,7b
289 .long 3b,8b
290 .long 4b,9b
291 .long 5b,10b
292 .previous
294 /* No special register assumptions */
295 failsafe_callback:
296 GET_CURRENT(%ebx)
297 movzwl PROCESSOR(%ebx),%eax
298 shl $4,%eax
299 lea guest_trap_bounce(%eax),%edx
300 movl FAILSAFE_ADDR(%ebx),%eax
301 movl %eax,GTB_EIP(%edx)
302 movl FAILSAFE_SEL(%ebx),%eax
303 movw %ax,GTB_CS(%edx)
304 call create_bounce_frame
305 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
306 leal FAILSAFE_BUFFER(%ebx),%ebp
307 movl 0(%ebp),%eax # DS
308 FAULT1: movl %eax,(%esi)
309 movl 4(%ebp),%eax # ES
310 FAULT2: movl %eax,4(%esi)
311 movl 8(%ebp),%eax # FS
312 FAULT3: movl %eax,8(%esi)
313 movl 12(%ebp),%eax # GS
314 FAULT4: movl %eax,12(%esi)
315 movl %esi,OLDESP(%esp)
316 popl %ebx
317 popl %ecx
318 popl %edx
319 popl %esi
320 popl %edi
321 popl %ebp
322 popl %eax
323 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
324 FAULT5: iret
327 ALIGN
328 # Simple restore -- we should never fault as we we will only interrupt ring 0
329 # when sane values have been placed in all registers. The only exception is
330 # NMI, which may interrupt before good values have been placed in DS-GS.
331 # The NMI return code deals with this problem itself.
332 restore_all_xen:
333 popl %ebx
334 popl %ecx
335 popl %edx
336 popl %esi
337 popl %edi
338 popl %ebp
339 popl %eax
340 popl %ds
341 popl %es
342 popl %fs
343 popl %gs
344 addl $4,%esp
345 iret
347 ALIGN
348 ENTRY(hypervisor_call)
349 pushl %eax # save orig_eax
350 SAVE_ALL
351 GET_CURRENT(%ebx)
352 andl $255,%eax
353 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
355 ret_from_hypervisor_call:
356 movl %eax,EAX(%esp) # save the return value
358 test_all_events:
359 xorl %ecx,%ecx
360 notl %ecx
361 cli # tests must not race interrupts
362 /*test_softirqs:*/
363 movzwl PROCESSOR(%ebx),%eax
364 shl $6,%eax # sizeof(irq_cpustat) == 64
365 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
366 jnz process_softirqs
367 /*test_hyp_events:*/
368 testw %cx, HYP_EVENTS(%ebx)
369 jnz process_hyp_events
370 /*test_guest_events:*/
371 movl SHARED_INFO(%ebx),%eax
372 shl $31,%ecx # %ecx = EVENTS_MASTER_ENABLE_MASK
373 test %ecx,EVENTS_MASK(%eax)
374 jz restore_all_guest # only notify if master switch enabled
375 movl EVENTS(%eax),%ecx
376 andl EVENTS_MASK(%eax),%ecx
377 jz restore_all_guest # skip if no events to deliver
378 notl %ecx
379 btrl $31,%ecx # NB. We clear all events that are
380 andl %ecx,EVENTS_MASK(%eax) # being delivered + master enable.
381 /*process_guest_events:*/
382 movzwl PROCESSOR(%ebx),%edx
383 shl $4,%edx # sizeof(guest_trap_bounce) == 16
384 lea guest_trap_bounce(%edx),%edx
385 movl EVENT_ADDR(%ebx),%eax
386 movl %eax,GTB_EIP(%edx)
387 movl EVENT_SEL(%ebx),%eax
388 movw %ax,GTB_CS(%edx)
389 call create_bounce_frame
390 jmp restore_all_guest
392 ALIGN
393 process_softirqs:
394 sti
395 call SYMBOL_NAME(do_softirq)
396 jmp test_all_events
398 ALIGN
399 process_hyp_events:
400 sti
401 call SYMBOL_NAME(do_hyp_events)
402 jmp test_all_events
404 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
405 /* {EIP, CS, EFLAGS, [ESP, SS]} */
406 /* %edx == guest_trap_bounce, %ebx == task_struct */
407 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
408 create_bounce_frame:
409 mov CS+4(%esp),%cl
410 test $2,%cl
411 jz 1f /* jump if returning to an existing ring-1 activation */
412 /* obtain ss/esp from TSS -- no current ring-1 activations */
413 movzwl PROCESSOR(%ebx),%eax
414 shll $8,%eax /* multiply by 256 */
415 addl $init_tss + 12,%eax
416 movl (%eax),%esi /* tss->esp1 */
417 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
418 /* base of stack frame must contain ss/esp (inter-priv iret) */
419 subl $8,%esi
420 movl OLDESP+4(%esp),%eax
421 FAULT7: movl %eax,(%esi)
422 movl OLDSS+4(%esp),%eax
423 FAULT8: movl %eax,4(%esi)
424 jmp 2f
425 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
426 movl OLDESP+4(%esp),%esi
427 FAULT9: movl OLDSS+4(%esp),%ds
428 2: /* Construct a stack frame: EFLAGS, CS/EIP */
429 subl $12,%esi
430 movl EIP+4(%esp),%eax
431 FAULT10:movl %eax,(%esi)
432 movl CS+4(%esp),%eax
433 FAULT11:movl %eax,4(%esi)
434 movl EFLAGS+4(%esp),%eax
435 FAULT12:movl %eax,8(%esi)
436 /* Rewrite our stack frame and return to ring 1. */
437 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
438 andl $0xfffcbeff,%eax
439 movl %eax,EFLAGS+4(%esp)
440 movl %ds,OLDSS+4(%esp)
441 movl %esi,OLDESP+4(%esp)
442 movzwl %es:GTB_CS(%edx),%eax
443 movl %eax,CS+4(%esp)
444 movl %es:GTB_EIP(%edx),%eax
445 movl %eax,EIP+4(%esp)
446 ret
449 .section __ex_table,"a"
450 .align 4
451 .long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
452 .long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
453 .long FAULT3, kill_domain_fixup3 # Fault writing to ring-1 stack
454 .long FAULT4, kill_domain_fixup3 # Fault writing to ring-1 stack
455 .long FAULT5, kill_domain_fixup1 # Fault executing failsafe iret
456 .long FAULT6, kill_domain_fixup2 # Fault loading ring-1 stack selector
457 .long FAULT7, kill_domain_fixup2 # Fault writing to ring-1 stack
458 .long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
459 .long FAULT9, kill_domain_fixup2 # Fault loading ring-1 stack selector
460 .long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
461 .long FAULT11,kill_domain_fixup2 # Fault writing to ring-1 stack
462 .long FAULT12,kill_domain_fixup2 # Fault writing to ring-1 stack
463 .long FAULT13,kill_domain_fixup3 # Fault writing to ring-1 stack
464 .long FAULT14,kill_domain_fixup3 # Fault writing to ring-1 stack
465 .previous
467 # This handler kills domains which experience unrecoverable faults.
468 .section .fixup,"ax"
469 kill_domain_fixup1:
470 subl $4,%esp
471 SAVE_ALL
472 jmp kill_domain
473 kill_domain_fixup2:
474 addl $4,%esp
475 kill_domain_fixup3:
476 pushl %ss
477 popl %ds
478 jmp kill_domain
479 .previous
481 ALIGN
482 process_guest_exception_and_events:
483 movzwl PROCESSOR(%ebx),%eax
484 shl $4,%eax
485 lea guest_trap_bounce(%eax),%edx
486 testb $~0,GTB_FLAGS(%edx)
487 jz test_all_events
488 call create_bounce_frame # just the basic frame
489 mov %es:GTB_FLAGS(%edx),%cl
490 test $GTBF_TRAP_NOCODE,%cl
491 jnz 2f
492 subl $4,%esi # push error_code onto guest frame
493 movl %es:GTB_ERROR_CODE(%edx),%eax
494 FAULT13:movl %eax,(%esi)
495 test $GTBF_TRAP_CR2,%cl
496 jz 1f
497 subl $4,%esi # push %cr2 onto guest frame
498 movl %es:GTB_CR2(%edx),%eax
499 FAULT14:movl %eax,(%esi)
500 1: movl %esi,OLDESP(%esp)
501 2: push %es # unclobber %ds
502 pop %ds
503 movb $0,GTB_FLAGS(%edx)
504 jmp test_all_events
506 ALIGN
507 ENTRY(ret_from_intr)
508 GET_CURRENT(%ebx)
509 movb CS(%esp),%al
510 testb $3,%al # return to non-supervisor?
511 jne test_all_events
512 jmp restore_all_xen
514 ENTRY(divide_error)
515 pushl $0 # no error code
516 pushl $ SYMBOL_NAME(do_divide_error)
517 ALIGN
518 error_code:
519 pushl %fs
520 pushl %es
521 pushl %ds
522 pushl %eax
523 xorl %eax,%eax
524 pushl %ebp
525 pushl %edi
526 pushl %esi
527 pushl %edx
528 decl %eax # eax = -1
529 pushl %ecx
530 pushl %ebx
531 cld
532 movl %gs,%ecx
533 movl ORIG_EAX(%esp), %esi # get the error code
534 movl GS(%esp), %edi # get the function address
535 movl %eax, ORIG_EAX(%esp)
536 movl %ecx, GS(%esp)
537 movl %esp,%edx
538 pushl %esi # push the error code
539 pushl %edx # push the pt_regs pointer
540 movl $(__HYPERVISOR_DS),%edx
541 movl %edx,%ds
542 movl %edx,%es
543 GET_CURRENT(%ebx)
544 call *%edi
545 addl $8,%esp
546 movb CS(%esp),%al
547 testb $3,%al
548 je restore_all_xen
549 jmp process_guest_exception_and_events
551 ENTRY(coprocessor_error)
552 pushl $0
553 pushl $ SYMBOL_NAME(do_coprocessor_error)
554 jmp error_code
556 ENTRY(simd_coprocessor_error)
557 pushl $0
558 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
559 jmp error_code
561 ENTRY(device_not_available)
562 pushl $0
563 pushl $SYMBOL_NAME(math_state_restore)
564 jmp error_code
566 ENTRY(debug)
567 pushl $0
568 pushl $ SYMBOL_NAME(do_debug)
569 jmp error_code
571 ENTRY(int3)
572 pushl $0
573 pushl $ SYMBOL_NAME(do_int3)
574 jmp error_code
576 ENTRY(overflow)
577 pushl $0
578 pushl $ SYMBOL_NAME(do_overflow)
579 jmp error_code
581 ENTRY(bounds)
582 pushl $0
583 pushl $ SYMBOL_NAME(do_bounds)
584 jmp error_code
586 ENTRY(invalid_op)
587 pushl $0
588 pushl $ SYMBOL_NAME(do_invalid_op)
589 jmp error_code
591 ENTRY(coprocessor_segment_overrun)
592 pushl $0
593 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
594 jmp error_code
596 ENTRY(invalid_TSS)
597 pushl $ SYMBOL_NAME(do_invalid_TSS)
598 jmp error_code
600 ENTRY(segment_not_present)
601 pushl $ SYMBOL_NAME(do_segment_not_present)
602 jmp error_code
604 ENTRY(stack_segment)
605 pushl $ SYMBOL_NAME(do_stack_segment)
606 jmp error_code
608 ENTRY(general_protection)
609 pushl $ SYMBOL_NAME(do_general_protection)
610 jmp error_code
612 ENTRY(alignment_check)
613 pushl $ SYMBOL_NAME(do_alignment_check)
614 jmp error_code
616 ENTRY(page_fault)
617 pushl $ SYMBOL_NAME(do_page_fault)
618 jmp error_code
620 ENTRY(machine_check)
621 pushl $0
622 pushl $ SYMBOL_NAME(do_machine_check)
623 jmp error_code
625 ENTRY(spurious_interrupt_bug)
626 pushl $0
627 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
628 jmp error_code
630 ENTRY(nmi)
631 # Save state but do not trash the segment registers!
632 # We may otherwise be unable to reload them or copy them to ring 1.
633 pushl %eax
634 SAVE_ALL_NOSEGREGS
636 # Check for hardware problems. These are always fatal so we can
637 # reload DS and ES when handling them.
638 inb $0x61,%al
639 testb $0x80,%al
640 jne nmi_parity_err
641 testb $0x40,%al
642 jne nmi_io_err
643 movl %eax,%ebx
645 # Okay, it's almost a normal NMI tick. We can only process it if:
646 # 1. We're the outermost Xen activation (in which case we have
647 # the selectors safely saved on our stack)
648 # 2. DS-GS all contain sane Xen values.
649 # In all other cases we bail without touching DS-GS, as we've
650 # interrupted an enclosing Xen activation in tricky prologue or
651 # epilogue code.
652 movb CS(%esp),%al
653 testb $3,%al
654 jne do_watchdog_tick
655 movl DS(%esp),%eax
656 cmpw $(__HYPERVISOR_DS),%ax
657 jne nmi_badseg
658 movl ES(%esp),%eax
659 cmpw $(__HYPERVISOR_DS),%ax
660 jne nmi_badseg
661 movl FS(%esp),%eax
662 cmpw $(__HYPERVISOR_DS),%ax
663 jne nmi_badseg
664 movl GS(%esp),%eax
665 cmpw $(__HYPERVISOR_DS),%ax
666 jne nmi_badseg
668 do_watchdog_tick:
669 movl $(__HYPERVISOR_DS),%edx
670 movl %edx,%ds
671 movl %edx,%es
672 movl %esp,%edx
673 pushl %ebx # reason
674 pushl %edx # regs
675 call SYMBOL_NAME(do_nmi)
676 addl $8,%esp
677 movb CS(%esp),%al
678 testb $3,%al
679 je restore_all_xen
680 GET_CURRENT(%ebx)
681 jmp restore_all_guest
683 nmi_badseg:
684 popl %ebx
685 popl %ecx
686 popl %edx
687 popl %esi
688 popl %edi
689 popl %ebp
690 popl %eax
691 addl $20,%esp
692 iret
694 nmi_parity_err:
695 movl $(__HYPERVISOR_DS),%edx
696 movl %edx,%ds
697 movl %edx,%es
698 jmp SYMBOL_NAME(mem_parity_error)
700 nmi_io_err:
701 movl $(__HYPERVISOR_DS),%edx
702 movl %edx,%ds
703 movl %edx,%es
704 jmp SYMBOL_NAME(io_check_error)
706 .data
707 ENTRY(hypervisor_call_table)
708 .long SYMBOL_NAME(do_set_trap_table)
709 .long SYMBOL_NAME(do_mmu_update)
710 .long SYMBOL_NAME(do_console_write)
711 .long SYMBOL_NAME(do_set_gdt)
712 .long SYMBOL_NAME(do_stack_switch)
713 .long SYMBOL_NAME(do_set_callbacks)
714 .long SYMBOL_NAME(do_net_io_op)
715 .long SYMBOL_NAME(do_fpu_taskswitch)
716 .long SYMBOL_NAME(do_sched_op)
717 .long SYMBOL_NAME(do_dom0_op)
718 .long SYMBOL_NAME(do_network_op)
719 .long SYMBOL_NAME(do_block_io_op)
720 .long SYMBOL_NAME(do_set_debugreg)
721 .long SYMBOL_NAME(do_get_debugreg)
722 .long SYMBOL_NAME(do_update_descriptor)
723 .long SYMBOL_NAME(do_set_fast_trap)
724 .long SYMBOL_NAME(do_dom_mem_op)
725 .long SYMBOL_NAME(do_multicall)
726 .long SYMBOL_NAME(do_kbd_op)
727 .long SYMBOL_NAME(do_update_va_mapping)
728 .rept NR_syscalls-((.-hypervisor_call_table)/4)
729 .long SYMBOL_NAME(sys_ni_syscall)
730 .endr