ia64/xen-unstable

view xen/arch/i386/entry.S @ 1382:362ebd8f741f

bitkeeper revision 1.891.1.18 (40a3a977q1-ubqTLYFcSD7GOUOTVkA)

Bah.
author kaf24@scramble.cl.cam.ac.uk
date Thu May 13 16:59:35 2004 +0000 (2004-05-13)
parents d2776001835f
children 769e154137df
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * Stack layout in 'ret_from_system_call':
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - %gs
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
30 *
31 * "current" is in register %ebx during any slow entries.
32 */
33 /* The idea for callbacks from monitor -> guest OS.
34 *
35 * First, we require that all callbacks (either via a supplied
36 * interrupt-descriptor-table, or via the special event or failsafe callbacks
37 * in the shared-info-structure) are to ring 1. This just makes life easier,
38 * in that it means we don't have to do messy GDT/LDT lookups to find
39 * out which the privilege-level of the return code-selector. That code
40 * would just be a hassle to write, and would need to account for running
41 * off the end of the GDT/LDT, for example. For all callbacks we check
42 * that the provided
43 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
44 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
45 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
46 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
47 * than the correct ring) and bad things are bound to ensue -- IRET is
48 * likely to fault, and we may end up killing the domain (no harm can
49 * come to the hypervisor itself, though).
50 *
51 * When doing a callback, we check if the return CS is in ring 0. If so,
52 * callback is delayed until next return to ring != 0.
53 * If return CS is in ring 1, then we create a callback frame
54 * starting at return SS/ESP. The base of the frame does an intra-privilege
55 * interrupt-return.
56 * If return CS is in ring > 1, we create a callback frame starting
57 * at SS/ESP taken from appropriate section of the current TSS. The base
58 * of the frame does an inter-privilege interrupt-return.
59 *
60 * Note that the "failsafe callback" uses a special stackframe:
61 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
62 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
63 * That is, original values for DS/ES/FS/GS are placed on stack rather than
64 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
65 * saved/restored in guest OS. Furthermore, if we load them we may cause
66 * a fault if they are invalid, which is a hassle to deal with. We avoid
67 * that problem if we don't load them :-) This property allows us to use
68 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
69 * on return to ring != 0, we can simply package it up as a return via
70 * the failsafe callback, and let the guest OS sort it out (perhaps by
71 * killing an application process). Note that we also do this for any
72 * faulting IRET -- just let the guest OS handle it via the event
73 * callback.
74 *
75 * We terminate a domain in the following cases:
76 * - creating a callback stack frame (due to bad ring-1 stack).
77 * - faulting IRET on entry to failsafe callback handler.
78 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
79 * handler in good order (absolutely no faults allowed!).
80 */
82 #include <xen/config.h>
83 #include <xen/errno.h>
84 #include <hypervisor-ifs/hypervisor-if.h>
86 EBX = 0x00
87 ECX = 0x04
88 EDX = 0x08
89 ESI = 0x0C
90 EDI = 0x10
91 EBP = 0x14
92 EAX = 0x18
93 DS = 0x1C
94 ES = 0x20
95 FS = 0x24
96 GS = 0x28
97 ORIG_EAX = 0x2C
98 EIP = 0x30
99 CS = 0x34
100 EFLAGS = 0x38
101 OLDESP = 0x3C
102 OLDSS = 0x40
104 /* Offsets in task_struct */
105 PROCESSOR = 0
106 HYP_EVENTS = 2
107 SHARED_INFO = 4
108 EVENT_SEL = 8
109 EVENT_ADDR = 12
110 FAILSAFE_BUFFER = 16
111 FAILSAFE_SEL = 32
112 FAILSAFE_ADDR = 36
114 /* Offsets in shared_info_t */
115 #define UPCALL_PENDING /* 0 */
116 #define UPCALL_MASK 1
118 /* Offsets in guest_trap_bounce */
119 GTB_ERROR_CODE = 0
120 GTB_CR2 = 4
121 GTB_FLAGS = 8
122 GTB_CS = 10
123 GTB_EIP = 12
124 GTBF_TRAP = 1
125 GTBF_TRAP_NOCODE = 2
126 GTBF_TRAP_CR2 = 4
128 CF_MASK = 0x00000001
129 IF_MASK = 0x00000200
130 NT_MASK = 0x00004000
134 #define SAVE_ALL_NOSEGREGS \
135 cld; \
136 pushl %gs; \
137 pushl %fs; \
138 pushl %es; \
139 pushl %ds; \
140 pushl %eax; \
141 pushl %ebp; \
142 pushl %edi; \
143 pushl %esi; \
144 pushl %edx; \
145 pushl %ecx; \
146 pushl %ebx; \
148 #define SAVE_ALL \
149 SAVE_ALL_NOSEGREGS \
150 movl $(__HYPERVISOR_DS),%edx; \
151 movl %edx,%ds; \
152 movl %edx,%es; \
153 movl %edx,%fs; \
154 movl %edx,%gs; \
155 sti;
157 #define GET_CURRENT(reg) \
158 movl $4096-4, reg; \
159 orl %esp, reg; \
160 andl $~3,reg; \
161 movl (reg),reg;
163 ENTRY(continue_nonidle_task)
164 GET_CURRENT(%ebx)
165 jmp test_all_events
167 ALIGN
168 /*
169 * HYPERVISOR_multicall(call_list, nr_calls)
170 * Execute a list of 'nr_calls' system calls, pointed at by 'call_list'.
171 * This is fairly easy except that:
172 * 1. We may fault reading the call list, and must patch that up; and
173 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
174 * caller could cause our stack to blow up.
175 */
176 do_multicall:
177 popl %eax
178 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
179 je multicall_return_from_call
180 pushl %ebx
181 movl 4(%esp),%ebx /* EBX == call_list */
182 movl 8(%esp),%ecx /* ECX == nr_calls */
183 multicall_loop:
184 pushl %ecx
185 multicall_fault1:
186 pushl 20(%ebx) # args[4]
187 multicall_fault2:
188 pushl 16(%ebx) # args[3]
189 multicall_fault3:
190 pushl 12(%ebx) # args[2]
191 multicall_fault4:
192 pushl 8(%ebx) # args[1]
193 multicall_fault5:
194 pushl 4(%ebx) # args[0]
195 multicall_fault6:
196 movl (%ebx),%eax # op
197 andl $255,%eax
198 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
199 multicall_return_from_call:
200 multicall_fault7:
201 movl %eax,24(%ebx) # args[5] == result
202 addl $20,%esp
203 popl %ecx
204 addl $(ARGS_PER_MULTICALL_ENTRY*4),%ebx
205 loop multicall_loop
206 popl %ebx
207 xorl %eax,%eax
208 jmp ret_from_hypervisor_call
210 .section __ex_table,"a"
211 .align 4
212 .long multicall_fault1, multicall_fixup1
213 .long multicall_fault2, multicall_fixup2
214 .long multicall_fault3, multicall_fixup3
215 .long multicall_fault4, multicall_fixup4
216 .long multicall_fault5, multicall_fixup5
217 .long multicall_fault6, multicall_fixup6
218 .previous
220 .section .fixup,"ax"
221 multicall_fixup6:
222 addl $4,%esp
223 multicall_fixup5:
224 addl $4,%esp
225 multicall_fixup4:
226 addl $4,%esp
227 multicall_fixup3:
228 addl $4,%esp
229 multicall_fixup2:
230 addl $4,%esp
231 multicall_fixup1:
232 addl $4,%esp
233 popl %ebx
234 movl $-EFAULT,%eax
235 jmp ret_from_hypervisor_call
236 .previous
238 ALIGN
239 restore_all_guest:
240 # First, may need to restore %ds if clobbered by create_bounce_frame
241 pushl %ss
242 popl %ds
243 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
244 leal DS(%esp),%esi
245 leal FAILSAFE_BUFFER(%ebx),%edi
246 movsl
247 movsl
248 movsl
249 movsl
250 # Finally, restore guest registers -- faults will cause failsafe
251 popl %ebx
252 popl %ecx
253 popl %edx
254 popl %esi
255 popl %edi
256 popl %ebp
257 popl %eax
258 1: popl %ds
259 2: popl %es
260 3: popl %fs
261 4: popl %gs
262 addl $4,%esp
263 5: iret
264 .section .fixup,"ax"
265 10: subl $4,%esp
266 pushl %gs
267 9: pushl %fs
268 8: pushl %es
269 7: pushl %ds
270 6: pushl %eax
271 pushl %ebp
272 pushl %edi
273 pushl %esi
274 pushl %edx
275 pushl %ecx
276 pushl %ebx
277 pushl %ss
278 popl %ds
279 pushl %ss
280 popl %es
281 jmp failsafe_callback
282 .previous
283 .section __ex_table,"a"
284 .align 4
285 .long 1b,6b
286 .long 2b,7b
287 .long 3b,8b
288 .long 4b,9b
289 .long 5b,10b
290 .previous
292 /* No special register assumptions */
293 failsafe_callback:
294 GET_CURRENT(%ebx)
295 movzwl PROCESSOR(%ebx),%eax
296 shl $4,%eax
297 lea guest_trap_bounce(%eax),%edx
298 movl FAILSAFE_ADDR(%ebx),%eax
299 movl %eax,GTB_EIP(%edx)
300 movl FAILSAFE_SEL(%ebx),%eax
301 movw %ax,GTB_CS(%edx)
302 call create_bounce_frame
303 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
304 leal FAILSAFE_BUFFER(%ebx),%ebp
305 movl 0(%ebp),%eax # DS
306 FAULT1: movl %eax,(%esi)
307 movl 4(%ebp),%eax # ES
308 FAULT2: movl %eax,4(%esi)
309 movl 8(%ebp),%eax # FS
310 FAULT3: movl %eax,8(%esi)
311 movl 12(%ebp),%eax # GS
312 FAULT4: movl %eax,12(%esi)
313 movl %esi,OLDESP(%esp)
314 popl %ebx
315 popl %ecx
316 popl %edx
317 popl %esi
318 popl %edi
319 popl %ebp
320 popl %eax
321 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
322 FAULT5: iret
325 ALIGN
326 # Simple restore -- we should never fault as we we will only interrupt ring 0
327 # when sane values have been placed in all registers. The only exception is
328 # NMI, which may interrupt before good values have been placed in DS-GS.
329 # The NMI return code deals with this problem itself.
330 restore_all_xen:
331 popl %ebx
332 popl %ecx
333 popl %edx
334 popl %esi
335 popl %edi
336 popl %ebp
337 popl %eax
338 popl %ds
339 popl %es
340 popl %fs
341 popl %gs
342 addl $4,%esp
343 iret
345 ALIGN
346 ENTRY(hypervisor_call)
347 pushl %eax # save orig_eax
348 SAVE_ALL
349 GET_CURRENT(%ebx)
350 andl $255,%eax
351 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
353 ret_from_hypervisor_call:
354 movl %eax,EAX(%esp) # save the return value
356 test_all_events:
357 xorl %ecx,%ecx
358 notl %ecx
359 cli # tests must not race interrupts
360 /*test_softirqs:*/
361 movzwl PROCESSOR(%ebx),%eax
362 shl $6,%eax # sizeof(irq_cpustat) == 64
363 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
364 jnz process_softirqs
365 /*test_hyp_events:*/
366 testw %cx, HYP_EVENTS(%ebx)
367 jnz process_hyp_events
368 /*test_guest_events:*/
369 movl SHARED_INFO(%ebx),%eax
370 testb $0xFF,UPCALL_MASK(%eax)
371 jnz restore_all_guest
372 testb $0xFF,UPCALL_PENDING(%eax)
373 jz restore_all_guest
374 movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery
375 /*process_guest_events:*/
376 movzwl PROCESSOR(%ebx),%edx
377 shl $4,%edx # sizeof(guest_trap_bounce) == 16
378 lea guest_trap_bounce(%edx),%edx
379 movl EVENT_ADDR(%ebx),%eax
380 movl %eax,GTB_EIP(%edx)
381 movl EVENT_SEL(%ebx),%eax
382 movw %ax,GTB_CS(%edx)
383 call create_bounce_frame
384 jmp restore_all_guest
386 ALIGN
387 process_softirqs:
388 sti
389 call SYMBOL_NAME(do_softirq)
390 jmp test_all_events
392 ALIGN
393 process_hyp_events:
394 sti
395 call SYMBOL_NAME(do_hyp_events)
396 jmp test_all_events
398 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
399 /* {EIP, CS, EFLAGS, [ESP, SS]} */
400 /* %edx == guest_trap_bounce, %ebx == task_struct */
401 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
402 create_bounce_frame:
403 mov CS+4(%esp),%cl
404 test $2,%cl
405 jz 1f /* jump if returning to an existing ring-1 activation */
406 /* obtain ss/esp from TSS -- no current ring-1 activations */
407 movzwl PROCESSOR(%ebx),%eax
408 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
409 movl %eax, %ecx
410 shll $7, %ecx
411 shll $13, %eax
412 addl %ecx,%eax
413 addl $init_tss + 12,%eax
414 movl (%eax),%esi /* tss->esp1 */
415 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
416 /* base of stack frame must contain ss/esp (inter-priv iret) */
417 subl $8,%esi
418 movl OLDESP+4(%esp),%eax
419 FAULT7: movl %eax,(%esi)
420 movl OLDSS+4(%esp),%eax
421 FAULT8: movl %eax,4(%esi)
422 jmp 2f
423 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
424 movl OLDESP+4(%esp),%esi
425 FAULT9: movl OLDSS+4(%esp),%ds
426 2: /* Construct a stack frame: EFLAGS, CS/EIP */
427 subl $12,%esi
428 movl EIP+4(%esp),%eax
429 FAULT10:movl %eax,(%esi)
430 movl CS+4(%esp),%eax
431 FAULT11:movl %eax,4(%esi)
432 movl EFLAGS+4(%esp),%eax
433 FAULT12:movl %eax,8(%esi)
434 /* Rewrite our stack frame and return to ring 1. */
435 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
436 andl $0xfffcbeff,%eax
437 movl %eax,EFLAGS+4(%esp)
438 movl %ds,OLDSS+4(%esp)
439 movl %esi,OLDESP+4(%esp)
440 movzwl %es:GTB_CS(%edx),%eax
441 movl %eax,CS+4(%esp)
442 movl %es:GTB_EIP(%edx),%eax
443 movl %eax,EIP+4(%esp)
444 ret
447 .section __ex_table,"a"
448 .align 4
449 .long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
450 .long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
451 .long FAULT3, kill_domain_fixup3 # Fault writing to ring-1 stack
452 .long FAULT4, kill_domain_fixup3 # Fault writing to ring-1 stack
453 .long FAULT5, kill_domain_fixup1 # Fault executing failsafe iret
454 .long FAULT6, kill_domain_fixup2 # Fault loading ring-1 stack selector
455 .long FAULT7, kill_domain_fixup2 # Fault writing to ring-1 stack
456 .long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
457 .long FAULT9, kill_domain_fixup2 # Fault loading ring-1 stack selector
458 .long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
459 .long FAULT11,kill_domain_fixup2 # Fault writing to ring-1 stack
460 .long FAULT12,kill_domain_fixup2 # Fault writing to ring-1 stack
461 .long FAULT13,kill_domain_fixup3 # Fault writing to ring-1 stack
462 .long FAULT14,kill_domain_fixup3 # Fault writing to ring-1 stack
463 .previous
465 # This handler kills domains which experience unrecoverable faults.
466 .section .fixup,"ax"
467 kill_domain_fixup1:
468 subl $4,%esp
469 SAVE_ALL
470 jmp kill_domain
471 kill_domain_fixup2:
472 addl $4,%esp
473 kill_domain_fixup3:
474 pushl %ss
475 popl %ds
476 jmp kill_domain
477 .previous
479 ALIGN
480 process_guest_exception_and_events:
481 movzwl PROCESSOR(%ebx),%eax
482 shl $4,%eax
483 lea guest_trap_bounce(%eax),%edx
484 testb $~0,GTB_FLAGS(%edx)
485 jz test_all_events
486 call create_bounce_frame # just the basic frame
487 mov %es:GTB_FLAGS(%edx),%cl
488 test $GTBF_TRAP_NOCODE,%cl
489 jnz 2f
490 subl $4,%esi # push error_code onto guest frame
491 movl %es:GTB_ERROR_CODE(%edx),%eax
492 FAULT13:movl %eax,(%esi)
493 test $GTBF_TRAP_CR2,%cl
494 jz 1f
495 subl $4,%esi # push %cr2 onto guest frame
496 movl %es:GTB_CR2(%edx),%eax
497 FAULT14:movl %eax,(%esi)
498 1: movl %esi,OLDESP(%esp)
499 2: push %es # unclobber %ds
500 pop %ds
501 movb $0,GTB_FLAGS(%edx)
502 jmp test_all_events
504 ALIGN
505 ENTRY(ret_from_intr)
506 GET_CURRENT(%ebx)
507 movb CS(%esp),%al
508 testb $3,%al # return to non-supervisor?
509 jne test_all_events
510 jmp restore_all_xen
512 ENTRY(divide_error)
513 pushl $0 # no error code
514 pushl $ SYMBOL_NAME(do_divide_error)
515 ALIGN
516 error_code:
517 pushl %fs
518 pushl %es
519 pushl %ds
520 pushl %eax
521 xorl %eax,%eax
522 pushl %ebp
523 pushl %edi
524 pushl %esi
525 pushl %edx
526 decl %eax # eax = -1
527 pushl %ecx
528 pushl %ebx
529 cld
530 movl %gs,%ecx
531 movl ORIG_EAX(%esp), %esi # get the error code
532 movl GS(%esp), %edi # get the function address
533 movl %eax, ORIG_EAX(%esp)
534 movl %ecx, GS(%esp)
535 movl $(__HYPERVISOR_DS),%edx
536 movl %edx,%ds
537 movl %edx,%es
538 movl %edx,%fs
539 movl %edx,%gs
540 # We force a STI here. In most cases it is illegal to fault with
541 # interrupts disabled, so no need to check EFLAGS. There is one
542 # case when it /is/ valid -- on final return to guest context, we
543 # CLI so we can atomically check for events to notify guest about and
544 # return, all in one go. If we fault it is necessary to STI and the
545 # worst that will happen is that our return code is no longer atomic.
546 # This will do -- noone will ever notice. :-)
547 sti
548 movl %esp,%edx
549 pushl %esi # push the error code
550 pushl %edx # push the pt_regs pointer
551 GET_CURRENT(%ebx)
552 call *%edi
553 addl $8,%esp
554 movb CS(%esp),%al
555 testb $3,%al
556 je restore_all_xen
557 jmp process_guest_exception_and_events
559 ENTRY(coprocessor_error)
560 pushl $0
561 pushl $ SYMBOL_NAME(do_coprocessor_error)
562 jmp error_code
564 ENTRY(simd_coprocessor_error)
565 pushl $0
566 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
567 jmp error_code
569 ENTRY(device_not_available)
570 pushl $0
571 pushl $SYMBOL_NAME(math_state_restore)
572 jmp error_code
574 ENTRY(debug)
575 pushl $0
576 pushl $ SYMBOL_NAME(do_debug)
577 jmp error_code
579 ENTRY(int3)
580 pushl $0
581 pushl $ SYMBOL_NAME(do_int3)
582 jmp error_code
584 ENTRY(overflow)
585 pushl $0
586 pushl $ SYMBOL_NAME(do_overflow)
587 jmp error_code
589 ENTRY(bounds)
590 pushl $0
591 pushl $ SYMBOL_NAME(do_bounds)
592 jmp error_code
594 ENTRY(invalid_op)
595 pushl $0
596 pushl $ SYMBOL_NAME(do_invalid_op)
597 jmp error_code
599 ENTRY(coprocessor_segment_overrun)
600 pushl $0
601 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
602 jmp error_code
604 ENTRY(invalid_TSS)
605 pushl $ SYMBOL_NAME(do_invalid_TSS)
606 jmp error_code
608 ENTRY(segment_not_present)
609 pushl $ SYMBOL_NAME(do_segment_not_present)
610 jmp error_code
612 ENTRY(stack_segment)
613 pushl $ SYMBOL_NAME(do_stack_segment)
614 jmp error_code
616 ENTRY(general_protection)
617 pushl $ SYMBOL_NAME(do_general_protection)
618 jmp error_code
620 ENTRY(alignment_check)
621 pushl $ SYMBOL_NAME(do_alignment_check)
622 jmp error_code
624 ENTRY(page_fault)
625 pushl $ SYMBOL_NAME(do_page_fault)
626 jmp error_code
628 ENTRY(machine_check)
629 pushl $0
630 pushl $ SYMBOL_NAME(do_machine_check)
631 jmp error_code
633 ENTRY(spurious_interrupt_bug)
634 pushl $0
635 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
636 jmp error_code
638 ENTRY(nmi)
639 # Save state but do not trash the segment registers!
640 # We may otherwise be unable to reload them or copy them to ring 1.
641 pushl %eax
642 SAVE_ALL_NOSEGREGS
644 # Check for hardware problems. These are always fatal so we can
645 # reload DS and ES when handling them.
646 inb $0x61,%al
647 testb $0x80,%al
648 jne nmi_parity_err
649 testb $0x40,%al
650 jne nmi_io_err
651 movl %eax,%ebx
653 # Okay, its almost a normal NMI tick. We can only process it if:
654 # A. We are the outermost Xen activation (in which case we have
655 # the selectors safely saved on our stack)
656 # B. DS-GS all contain sane Xen values.
657 # In all other cases we bail without touching DS-GS, as we have
658 # interrupted an enclosing Xen activation in tricky prologue or
659 # epilogue code.
660 movb CS(%esp),%al
661 testb $3,%al
662 jne do_watchdog_tick
663 movl DS(%esp),%eax
664 cmpw $(__HYPERVISOR_DS),%ax
665 jne nmi_badseg
666 movl ES(%esp),%eax
667 cmpw $(__HYPERVISOR_DS),%ax
668 jne nmi_badseg
669 movl FS(%esp),%eax
670 cmpw $(__HYPERVISOR_DS),%ax
671 jne nmi_badseg
672 movl GS(%esp),%eax
673 cmpw $(__HYPERVISOR_DS),%ax
674 jne nmi_badseg
676 do_watchdog_tick:
677 movl $(__HYPERVISOR_DS),%edx
678 movl %edx,%ds
679 movl %edx,%es
680 movl %esp,%edx
681 pushl %ebx # reason
682 pushl %edx # regs
683 call SYMBOL_NAME(do_nmi)
684 addl $8,%esp
685 movb CS(%esp),%al
686 testb $3,%al
687 je restore_all_xen
688 GET_CURRENT(%ebx)
689 jmp restore_all_guest
691 nmi_badseg:
692 popl %ebx
693 popl %ecx
694 popl %edx
695 popl %esi
696 popl %edi
697 popl %ebp
698 popl %eax
699 addl $20,%esp
700 iret
702 nmi_parity_err:
703 movl $(__HYPERVISOR_DS),%edx
704 movl %edx,%ds
705 movl %edx,%es
706 jmp SYMBOL_NAME(mem_parity_error)
708 nmi_io_err:
709 movl $(__HYPERVISOR_DS),%edx
710 movl %edx,%ds
711 movl %edx,%es
712 jmp SYMBOL_NAME(io_check_error)
714 .data
715 ENTRY(hypervisor_call_table)
716 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
717 .long SYMBOL_NAME(do_mmu_update)
718 .long SYMBOL_NAME(do_console_write)
719 .long SYMBOL_NAME(do_set_gdt)
720 .long SYMBOL_NAME(do_stack_switch)
721 .long SYMBOL_NAME(do_set_callbacks) /* 5 */
722 #ifndef NO_DEVICES_IN_XEN
723 .long SYMBOL_NAME(do_net_io_op)
724 #else
725 .long SYMBOL_NAME(do_ni_syscall)
726 #endif
727 .long SYMBOL_NAME(do_fpu_taskswitch)
728 .long SYMBOL_NAME(do_sched_op)
729 .long SYMBOL_NAME(do_dom0_op)
730 #ifndef NO_DEVICES_IN_XEN
731 .long SYMBOL_NAME(do_network_op) /* 10 */
732 .long SYMBOL_NAME(do_block_io_op)
733 #else
734 .long SYMBOL_NAME(do_ni_syscall) /* 10 */
735 .long SYMBOL_NAME(do_ni_syscall)
736 #endif
737 .long SYMBOL_NAME(do_set_debugreg)
738 .long SYMBOL_NAME(do_get_debugreg)
739 .long SYMBOL_NAME(do_update_descriptor)
740 .long SYMBOL_NAME(do_set_fast_trap) /* 15 */
741 .long SYMBOL_NAME(do_dom_mem_op)
742 .long SYMBOL_NAME(do_multicall)
743 .long SYMBOL_NAME(do_kbd_op)
744 .long SYMBOL_NAME(do_update_va_mapping)
745 .long SYMBOL_NAME(do_set_timer_op) /* 20 */
746 .long SYMBOL_NAME(do_event_channel_op)
747 .long SYMBOL_NAME(do_xen_version)
748 .long SYMBOL_NAME(do_console_io)
749 .long SYMBOL_NAME(do_physdev_op)
750 .long SYMBOL_NAME(do_update_va_mapping_otherdomain) /* 25 */
751 .rept NR_syscalls-((.-hypervisor_call_table)/4)
752 .long SYMBOL_NAME(do_ni_syscall)
753 .endr