ia64/xen-unstable

view xen/arch/i386/entry.S @ 722:7a9d47fea66c

bitkeeper revision 1.428 (3f677454_j81KDQLm_L7AscjYn2nYg)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Tue Sep 16 20:36:36 2003 +0000 (2003-09-16)
parents c085fac641e2 d0cdb9994a2b
children b45bc774c22c
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * Stack layout in 'ret_from_system_call':
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - %gs
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
30 *
31 * "current" is in register %ebx during any slow entries.
32 */
33 /* The idea for callbacks from monitor -> guest OS.
34 *
35 * First, we require that all callbacks (either via a supplied
36 * interrupt-descriptor-table, or via the special event or failsafe callbacks
37 * in the shared-info-structure) are to ring 1. This just makes life easier,
38 * in that it means we don't have to do messy GDT/LDT lookups to find
39 * out which the privilege-level of the return code-selector. That code
40 * would just be a hassle to write, and would need to account for running
41 * off the end of the GDT/LDT, for example. For all callbacks we check
42 * that the provided
43 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
44 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
45 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
46 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
47 * than the correct ring) and bad things are bound to ensue -- IRET is
48 * likely to fault, and we may end up killing the domain (no harm can
49 * come to the hypervisor itself, though).
50 *
51 * When doing a callback, we check if the return CS is in ring 0. If so,
52 * callback is delayed until next return to ring != 0.
53 * If return CS is in ring 1, then we create a callback frame
54 * starting at return SS/ESP. The base of the frame does an intra-privilege
55 * interrupt-return.
56 * If return CS is in ring > 1, we create a callback frame starting
57 * at SS/ESP taken from appropriate section of the current TSS. The base
58 * of the frame does an inter-privilege interrupt-return.
59 *
60 * Note that the "failsafe callback" uses a special stackframe:
61 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
62 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
63 * That is, original values for DS/ES/FS/GS are placed on stack rather than
64 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
65 * saved/restored in guest OS. Furthermore, if we load them we may cause
66 * a fault if they are invalid, which is a hassle to deal with. We avoid
67 * that problem if we don't load them :-) This property allows us to use
68 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
69 * on return to ring != 0, we can simply package it up as a return via
70 * the failsafe callback, and let the guest OS sort it out (perhaps by
71 * killing an application process). Note that we also do this for any
72 * faulting IRET -- just let the guest OS handle it via the event
73 * callback.
74 *
75 * We terminate a domain in the following cases:
76 * - creating a callback stack frame (due to bad ring-1 stack).
77 * - faulting IRET on entry to failsafe callback handler.
78 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
79 * handler in good order (absolutely no faults allowed!).
80 */
82 #include <xeno/config.h>
83 #include <xeno/errno.h>
84 #include <hypervisor-ifs/hypervisor-if.h>
85 #include <asm/smp.h>
87 EBX = 0x00
88 ECX = 0x04
89 EDX = 0x08
90 ESI = 0x0C
91 EDI = 0x10
92 EBP = 0x14
93 EAX = 0x18
94 DS = 0x1C
95 ES = 0x20
96 FS = 0x24
97 GS = 0x28
98 ORIG_EAX = 0x2C
99 EIP = 0x30
100 CS = 0x34
101 EFLAGS = 0x38
102 OLDESP = 0x3C
103 OLDSS = 0x40
105 /* Offsets in task_struct */
106 PROCESSOR = 0
107 STATE = 4
108 HYP_EVENTS = 8
109 DOMAIN = 12
110 SHARED_INFO = 16
111 EVENT_SEL = 20
112 EVENT_ADDR = 24
113 FAILSAFE_SEL = 28
114 FAILSAFE_ADDR = 32
116 /* Offsets in shared_info_t */
117 EVENTS = 0
118 EVENTS_MASK = 4
120 /* Offsets in guest_trap_bounce */
121 GTB_ERROR_CODE = 0
122 GTB_CR2 = 4
123 GTB_FLAGS = 8
124 GTB_CS = 10
125 GTB_EIP = 12
126 GTBF_TRAP = 1
127 GTBF_TRAP_NOCODE = 2
128 GTBF_TRAP_CR2 = 4
130 CF_MASK = 0x00000001
131 IF_MASK = 0x00000200
132 NT_MASK = 0x00004000
134 #define SAVE_ALL_NOSTI \
135 cld; \
136 pushl %gs; \
137 pushl %fs; \
138 pushl %es; \
139 pushl %ds; \
140 pushl %eax; \
141 pushl %ebp; \
142 pushl %edi; \
143 pushl %esi; \
144 pushl %edx; \
145 pushl %ecx; \
146 pushl %ebx; \
147 movl $(__HYPERVISOR_DS),%edx; \
148 movl %edx,%ds; \
149 movl %edx,%es;
151 #define SAVE_ALL \
152 SAVE_ALL_NOSTI \
153 sti;
155 #define RESTORE_ALL \
156 popl %ebx; \
157 popl %ecx; \
158 popl %edx; \
159 popl %esi; \
160 popl %edi; \
161 popl %ebp; \
162 popl %eax; \
163 1: popl %ds; \
164 2: popl %es; \
165 3: popl %fs; \
166 4: popl %gs; \
167 addl $4,%esp; \
168 5: iret; \
169 .section .fixup,"ax"; \
170 10: subl $4,%esp; \
171 pushl %gs; \
172 9: pushl %fs; \
173 8: pushl %es; \
174 7: pushl %ds; \
175 6: pushl %eax; \
176 pushl %ebp; \
177 pushl %edi; \
178 pushl %esi; \
179 pushl %edx; \
180 pushl %ecx; \
181 pushl %ebx; \
182 pushl %ss; \
183 popl %ds; \
184 pushl %ss; \
185 popl %es; \
186 jmp failsafe_callback; \
187 .previous; \
188 .section __ex_table,"a"; \
189 .align 4; \
190 .long 1b,6b; \
191 .long 2b,7b; \
192 .long 3b,8b; \
193 .long 4b,9b; \
194 .long 5b,10b; \
195 .previous
197 #define GET_CURRENT(reg) \
198 movl $-8192, reg; \
199 andl %esp, reg
201 ENTRY(ret_from_newdomain)
202 GET_CURRENT(%ebx)
203 jmp test_all_events
205 ALIGN
206 /*
207 * HYPERVISOR_multicall(call_list, nr_calls)
208 * Execute a list of 'nr_calls' system calls, pointed at by 'call_list'.
209 * This is fairly easy except that:
210 * 1. We may fault reading the call list, and must patch that up; and
211 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
212 * caller could cause our stack to blow up.
213 */
214 do_multicall:
215 popl %eax
216 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
217 je multicall_return_from_call
218 pushl %ebx
219 movl 4(%esp),%ebx /* EBX == call_list */
220 movl 8(%esp),%ecx /* ECX == nr_calls */
221 multicall_loop:
222 pushl %ecx
223 multicall_fault1:
224 pushl 20(%ebx)
225 multicall_fault2:
226 pushl 16(%ebx)
227 multicall_fault3:
228 pushl 12(%ebx)
229 multicall_fault4:
230 pushl 8(%ebx)
231 multicall_fault5:
232 pushl 4(%ebx)
233 multicall_fault6:
234 movl (%ebx),%eax
235 andl $255,%eax
236 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
237 multicall_return_from_call:
238 addl $20,%esp
239 popl %ecx
240 addl $BYTES_PER_MULTICALL_ENTRY,%ebx
241 loop multicall_loop
242 popl %ebx
243 xorl %eax,%eax
244 jmp ret_from_hypervisor_call
246 .section __ex_table,"a"
247 .align 4
248 .long multicall_fault1, multicall_fixup1
249 .long multicall_fault2, multicall_fixup2
250 .long multicall_fault3, multicall_fixup3
251 .long multicall_fault4, multicall_fixup4
252 .long multicall_fault5, multicall_fixup5
253 .long multicall_fault6, multicall_fixup6
254 .previous
256 .section .fixup,"ax"
257 multicall_fixup6:
258 addl $4,%esp
259 multicall_fixup5:
260 addl $4,%esp
261 multicall_fixup4:
262 addl $4,%esp
263 multicall_fixup3:
264 addl $4,%esp
265 multicall_fixup2:
266 addl $4,%esp
267 multicall_fixup1:
268 addl $4,%esp
269 popl %ebx
270 movl $-EFAULT,%eax
271 jmp ret_from_hypervisor_call
272 .previous
274 ALIGN
275 restore_all:
276 RESTORE_ALL
278 ALIGN
279 ENTRY(hypervisor_call)
280 pushl %eax # save orig_eax
281 SAVE_ALL
282 GET_CURRENT(%ebx)
283 andl $255,%eax
284 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
286 ret_from_hypervisor_call:
287 movl %eax,EAX(%esp) # save the return value
289 test_all_events:
290 xorl %ecx,%ecx
291 notl %ecx
292 cli # tests must not race interrupts
293 /*test_softirqs:*/
294 mov PROCESSOR(%ebx),%eax
295 shl $6,%eax # sizeof(irq_cpustat) == 64
296 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
297 jnz process_softirqs
298 /*test_hyp_events:*/
299 test %ecx, HYP_EVENTS(%ebx)
300 jnz process_hyp_events
301 /*test_guest_events:*/
302 movl SHARED_INFO(%ebx),%eax
303 shl $31,%ecx # %ecx = EVENTS_MASTER_ENABLE_MASK
304 test %ecx,EVENTS_MASK(%eax)
305 jz restore_all # only notify if master switch enabled
306 movl EVENTS(%eax),%ecx
307 andl EVENTS_MASK(%eax),%ecx
308 jz restore_all # skip if no events to deliver
309 notl %ecx
310 btrl $31,%ecx # NB. We clear all events that are
311 andl %ecx,EVENTS_MASK(%eax) # being delivered + master enable.
312 /*process_guest_events:*/
313 mov PROCESSOR(%ebx),%edx
314 shl $4,%edx # sizeof(guest_trap_bounce) == 16
315 lea guest_trap_bounce(%edx),%edx
316 movl EVENT_ADDR(%ebx),%eax
317 movl %eax,GTB_EIP(%edx)
318 movl EVENT_SEL(%ebx),%eax
319 movw %ax,GTB_CS(%edx)
320 call create_bounce_frame
321 jmp restore_all
323 ALIGN
324 process_softirqs:
325 sti
326 call SYMBOL_NAME(do_softirq)
327 jmp test_all_events
329 ALIGN
330 process_hyp_events:
331 sti
332 call SYMBOL_NAME(do_hyp_events)
333 jmp test_all_events
335 /* No special register assumptions */
336 failsafe_callback:
337 # Check that we are actually returning to ring != 0 because
338 # we may fault when returning to another ring 0 activation.
339 # This can only occur when restoring FS and GS, which can be avoided
340 # by zeroing those registers and trying again. The outermost ring 0
341 # activation will do a full failsafe callback to the guest OS.
342 # Note that the outermost activation certainly has the "bad" selector
343 # value saved away, since interrupts are always disabled in ring 0
344 # until all segment registers have been saved.
345 movb CS(%esp),%al
346 test $3,%al
347 jnz 1f
348 xorl %eax,%eax
349 movl %eax,FS(%esp)
350 movl %eax,GS(%esp)
351 jmp restore_all
352 1: GET_CURRENT(%ebx)
353 mov PROCESSOR(%ebx),%eax
354 shl $4,%eax
355 lea guest_trap_bounce(%eax),%edx
356 movl FAILSAFE_ADDR(%ebx),%eax
357 movl %eax,GTB_EIP(%edx)
358 movl FAILSAFE_SEL(%ebx),%eax
359 movw %ax,GTB_CS(%edx)
360 call create_bounce_frame
361 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
362 movl DS(%esp),%eax
363 FAULT1: movl %eax,(%esi)
364 movl ES(%esp),%eax
365 FAULT2: movl %eax,4(%esi)
366 movl FS(%esp),%eax
367 FAULT3: movl %eax,8(%esi)
368 movl GS(%esp),%eax
369 FAULT4: movl %eax,12(%esi)
370 movl %esi,OLDESP(%esp)
371 popl %ebx
372 popl %ecx
373 popl %edx
374 popl %esi
375 popl %edi
376 popl %ebp
377 popl %eax
378 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
379 FAULT5: iret
382 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
383 /* {EIP, CS, EFLAGS, [ESP, SS]} */
384 /* %edx == guest_trap_bounce, %ebx == task_struct */
385 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
386 create_bounce_frame:
387 mov CS+4(%esp),%cl
388 test $2,%cl
389 jz 1f /* jump if returning to an existing ring-1 activation */
390 /* obtain ss/esp from TSS -- no current ring-1 activations */
391 movl PROCESSOR(%ebx),%eax
392 shll $8,%eax /* multiply by 256 */
393 addl $init_tss + 12,%eax
394 movl (%eax),%esi /* tss->esp1 */
395 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
396 /* base of stack frame must contain ss/esp (inter-priv iret) */
397 subl $8,%esi
398 movl OLDESP+4(%esp),%eax
399 FAULT7: movl %eax,(%esi)
400 movl OLDSS+4(%esp),%eax
401 FAULT8: movl %eax,4(%esi)
402 jmp 2f
403 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
404 movl OLDESP+4(%esp),%esi
405 FAULT9: movl OLDSS+4(%esp),%ds
406 2: /* Construct a stack frame: EFLAGS, CS/EIP */
407 subl $12,%esi
408 movl EIP+4(%esp),%eax
409 FAULT10:movl %eax,(%esi)
410 movl CS+4(%esp),%eax
411 FAULT11:movl %eax,4(%esi)
412 movl EFLAGS+4(%esp),%eax
413 FAULT12:movl %eax,8(%esi)
414 /* Rewrite our stack frame and return to ring 1. */
415 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
416 andl $0xfffcbeff,%eax
417 movl %eax,EFLAGS+4(%esp)
418 movl %ds,OLDSS+4(%esp)
419 movl %esi,OLDESP+4(%esp)
420 movzwl %es:GTB_CS(%edx),%eax
421 movl %eax,CS+4(%esp)
422 movl %es:GTB_EIP(%edx),%eax
423 movl %eax,EIP+4(%esp)
424 ret
427 .section __ex_table,"a"
428 .align 4
429 .long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
430 .long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
431 .long FAULT3, kill_domain_fixup3 # Fault writing to ring-1 stack
432 .long FAULT4, kill_domain_fixup3 # Fault writing to ring-1 stack
433 .long FAULT5, kill_domain_fixup1 # Fault executing failsafe iret
434 .long FAULT6, kill_domain_fixup2 # Fault loading ring-1 stack selector
435 .long FAULT7, kill_domain_fixup2 # Fault writing to ring-1 stack
436 .long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
437 .long FAULT9, kill_domain_fixup2 # Fault loading ring-1 stack selector
438 .long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
439 .long FAULT11,kill_domain_fixup2 # Fault writing to ring-1 stack
440 .long FAULT12,kill_domain_fixup2 # Fault writing to ring-1 stack
441 .long FAULT13,kill_domain_fixup3 # Fault writing to ring-1 stack
442 .long FAULT14,kill_domain_fixup3 # Fault writing to ring-1 stack
443 .previous
445 # This handler kills domains which experience unrecoverable faults.
446 .section .fixup,"ax"
447 kill_domain_fixup1:
448 subl $4,%esp
449 SAVE_ALL
450 jmp kill_domain
451 kill_domain_fixup2:
452 addl $4,%esp
453 kill_domain_fixup3:
454 pushl %ss
455 popl %ds
456 jmp kill_domain
457 .previous
459 ALIGN
460 process_guest_exception_and_events:
461 mov PROCESSOR(%ebx),%eax
462 shl $4,%eax
463 lea guest_trap_bounce(%eax),%edx
464 testb $~0,GTB_FLAGS(%edx)
465 jz test_all_events
466 call create_bounce_frame # just the basic frame
467 mov %es:GTB_FLAGS(%edx),%cl
468 test $GTBF_TRAP_NOCODE,%cl
469 jnz 2f
470 subl $4,%esi # push error_code onto guest frame
471 movl %es:GTB_ERROR_CODE(%edx),%eax
472 FAULT13:movl %eax,(%esi)
473 test $GTBF_TRAP_CR2,%cl
474 jz 1f
475 subl $4,%esi # push %cr2 onto guest frame
476 movl %es:GTB_CR2(%edx),%eax
477 FAULT14:movl %eax,(%esi)
478 1: movl %esi,OLDESP(%esp)
479 2: push %es # unclobber %ds
480 pop %ds
481 movb $0,GTB_FLAGS(%edx)
482 jmp test_all_events
484 ALIGN
485 ENTRY(ret_from_intr)
486 GET_CURRENT(%ebx)
487 movb CS(%esp),%al
488 testb $3,%al # return to non-supervisor?
489 jne test_all_events
490 jmp restore_all
492 ALIGN
493 ret_from_exception:
494 movb CS(%esp),%al
495 testb $3,%al # return to non-supervisor?
496 jne process_guest_exception_and_events
497 jmp restore_all
499 ALIGN
501 ENTRY(divide_error)
502 pushl $0 # no error code
503 pushl $ SYMBOL_NAME(do_divide_error)
504 ALIGN
505 error_code:
506 pushl %fs
507 pushl %es
508 pushl %ds
509 pushl %eax
510 xorl %eax,%eax
511 pushl %ebp
512 pushl %edi
513 pushl %esi
514 pushl %edx
515 decl %eax # eax = -1
516 pushl %ecx
517 pushl %ebx
518 cld
519 movl %gs,%ecx
520 movl ORIG_EAX(%esp), %esi # get the error code
521 movl GS(%esp), %edi # get the function address
522 movl %eax, ORIG_EAX(%esp)
523 movl %ecx, GS(%esp)
524 movl %esp,%edx
525 pushl %esi # push the error code
526 pushl %edx # push the pt_regs pointer
527 movl $(__HYPERVISOR_DS),%edx
528 movl %edx,%ds
529 movl %edx,%es
530 GET_CURRENT(%ebx)
531 call *%edi
532 # NB. We reenable interrupts AFTER exception processing, as that is
533 # required by the page fault handler (needs to save %cr2)
534 sti
535 addl $8,%esp
536 jmp ret_from_exception
538 ENTRY(coprocessor_error)
539 pushl $0
540 pushl $ SYMBOL_NAME(do_coprocessor_error)
541 jmp error_code
543 ENTRY(simd_coprocessor_error)
544 pushl $0
545 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
546 jmp error_code
548 ENTRY(device_not_available)
549 pushl $0
550 pushl $SYMBOL_NAME(math_state_restore)
551 jmp error_code
553 ENTRY(debug)
554 pushl $0
555 pushl $ SYMBOL_NAME(do_debug)
556 jmp error_code
558 ENTRY(nmi)
559 pushl %eax
560 SAVE_ALL_NOSTI
561 movl %esp,%edx
562 pushl $0
563 pushl %edx
564 call SYMBOL_NAME(do_nmi)
565 addl $8,%esp
566 RESTORE_ALL
568 ENTRY(int3)
569 pushl $0
570 pushl $ SYMBOL_NAME(do_int3)
571 jmp error_code
573 ENTRY(overflow)
574 pushl $0
575 pushl $ SYMBOL_NAME(do_overflow)
576 jmp error_code
578 ENTRY(bounds)
579 pushl $0
580 pushl $ SYMBOL_NAME(do_bounds)
581 jmp error_code
583 ENTRY(invalid_op)
584 pushl $0
585 pushl $ SYMBOL_NAME(do_invalid_op)
586 jmp error_code
588 ENTRY(coprocessor_segment_overrun)
589 pushl $0
590 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
591 jmp error_code
593 ENTRY(invalid_TSS)
594 pushl $ SYMBOL_NAME(do_invalid_TSS)
595 jmp error_code
597 ENTRY(segment_not_present)
598 pushl $ SYMBOL_NAME(do_segment_not_present)
599 jmp error_code
601 ENTRY(stack_segment)
602 pushl $ SYMBOL_NAME(do_stack_segment)
603 jmp error_code
605 ENTRY(general_protection)
606 pushl $ SYMBOL_NAME(do_general_protection)
607 jmp error_code
609 ENTRY(alignment_check)
610 pushl $ SYMBOL_NAME(do_alignment_check)
611 jmp error_code
613 ENTRY(page_fault)
614 pushl $ SYMBOL_NAME(do_page_fault)
615 jmp error_code
617 ENTRY(machine_check)
618 pushl $0
619 pushl $ SYMBOL_NAME(do_machine_check)
620 jmp error_code
622 ENTRY(spurious_interrupt_bug)
623 pushl $0
624 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
625 jmp error_code
627 .data
628 ENTRY(hypervisor_call_table)
629 .long SYMBOL_NAME(do_set_trap_table)
630 .long SYMBOL_NAME(do_process_page_updates)
631 .long SYMBOL_NAME(do_console_write)
632 .long SYMBOL_NAME(do_set_gdt)
633 .long SYMBOL_NAME(do_stack_switch)
634 .long SYMBOL_NAME(do_set_callbacks)
635 .long SYMBOL_NAME(do_net_update)
636 .long SYMBOL_NAME(do_fpu_taskswitch)
637 .long SYMBOL_NAME(do_yield)
638 .long SYMBOL_NAME(kill_domain)
639 .long SYMBOL_NAME(do_dom0_op)
640 .long SYMBOL_NAME(do_network_op)
641 .long SYMBOL_NAME(do_block_io_op)
642 .long SYMBOL_NAME(do_set_debugreg)
643 .long SYMBOL_NAME(do_get_debugreg)
644 .long SYMBOL_NAME(do_update_descriptor)
645 .long SYMBOL_NAME(do_set_fast_trap)
646 .long SYMBOL_NAME(do_dom_mem_op)
647 .long SYMBOL_NAME(do_multicall)
648 .long SYMBOL_NAME(do_kbd_op)
649 .rept NR_syscalls-((.-hypervisor_call_table)/4)
650 .long SYMBOL_NAME(sys_ni_syscall)
651 .endr