ia64/xen-unstable

view xen/arch/x86/x86_64/entry.S @ 6756:f752e0c873a6

merge?
author cl349@firebug.cl.cam.ac.uk
date Mon Sep 12 12:32:20 2005 +0000 (2005-09-12)
parents dd668f7527cb 939fd35d58da
children 4d899a738d59 8ca0f98ba8e2
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 movq VCPU_syscall_addr(%rbx),%rax
31 movq %rax,TRAPBOUNCE_eip(%rdx)
32 movw $0,TRAPBOUNCE_flags(%rdx)
33 call create_bounce_frame
34 jmp test_all_events
36 /* %rbx: struct vcpu, interrupts disabled */
37 restore_all_guest:
38 RESTORE_ALL
39 testw $TRAP_syscall,4(%rsp)
40 jz iret_exit_to_guest
42 addq $8,%rsp
43 popq %rcx # RIP
44 popq %r11 # CS
45 cmpw $__GUEST_CS32,%r11
46 popq %r11 # RFLAGS
47 popq %rsp # RSP
48 je 1f
49 sysretq
50 1: sysretl
52 ALIGN
53 /* No special register assumptions. */
54 iret_exit_to_guest:
55 addq $8,%rsp
56 FLT1: iretq
58 .section .fixup,"ax"
59 FIX1: popq -15*8-8(%rsp) # error_code/entry_vector
60 SAVE_ALL # 15*8 bytes pushed
61 movq -8(%rsp),%rsi # error_code/entry_vector
62 sti # after stack abuse (-1024(%rsp))
63 pushq $__HYPERVISOR_DS # SS
64 leaq 8(%rsp),%rax
65 pushq %rax # RSP
66 pushf # RFLAGS
67 pushq $__HYPERVISOR_CS # CS
68 leaq DBLFLT1(%rip),%rax
69 pushq %rax # RIP
70 pushq %rsi # error_code/entry_vector
71 jmp error_code
72 DBLFLT1:GET_CURRENT(%rbx)
73 jmp test_all_events
74 failsafe_callback:
75 GET_CURRENT(%rbx)
76 leaq VCPU_trap_bounce(%rbx),%rdx
77 movq VCPU_failsafe_addr(%rbx),%rax
78 movq %rax,TRAPBOUNCE_eip(%rdx)
79 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
80 call create_bounce_frame
81 jmp test_all_events
82 .previous
83 .section __pre_ex_table,"a"
84 .quad FLT1,FIX1
85 .previous
86 .section __ex_table,"a"
87 .quad DBLFLT1,failsafe_callback
88 .previous
90 ALIGN
91 /* No special register assumptions. */
92 restore_all_xen:
93 RESTORE_ALL
94 addq $8,%rsp
95 iretq
97 /*
98 * When entering SYSCALL from kernel mode:
99 * %rax = hypercall vector
100 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
101 * %r11, %rcx = SYSCALL-saved %rflags and %rip
102 * NB. We must move %r10 to %rcx for C function-calling ABI.
103 *
104 * When entering SYSCALL from user mode:
105 * Vector directly to the registered arch.syscall_addr.
106 *
107 * Initial work is done by per-CPU stack trampolines. At this point %rsp
108 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
109 * and %cs have been saved. All other registers are still to be saved onto
110 * the stack, starting with %rip, and an appropriate %ss must be saved into
111 * the space left by the trampoline.
112 */
113 ALIGN
114 ENTRY(syscall_enter)
115 sti
116 movl $__GUEST_SS,24(%rsp)
117 pushq %rcx
118 pushq $0
119 movl $TRAP_syscall,4(%rsp)
120 SAVE_ALL
121 GET_CURRENT(%rbx)
122 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
123 jz switch_to_kernel
125 /*hypercall:*/
126 movq %r10,%rcx
127 andq $(NR_hypercalls-1),%rax
128 #ifndef NDEBUG
129 /* Deliberately corrupt parameter regs not used by this hypercall. */
130 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
131 leaq hypercall_args_table(%rip),%r10
132 movq $6,%rcx
133 sub (%r10,%rax,1),%cl
134 movq %rsp,%rdi
135 movl $0xDEADBEEF,%eax
136 rep stosq
137 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
138 movq UREGS_rax(%rsp),%rax
139 andq $(NR_hypercalls-1),%rax
140 pushq %rax
141 pushq UREGS_rip+8(%rsp)
142 #endif
143 leaq hypercall_table(%rip),%r10
144 PERFC_INCR(PERFC_hypercalls, %rax)
145 callq *(%r10,%rax,8)
146 #ifndef NDEBUG
147 /* Deliberately corrupt parameter regs used by this hypercall. */
148 popq %r10 # Shadow RIP
149 cmpq %r10,UREGS_rip(%rsp)
150 popq %rcx # Shadow hypercall index
151 jne skip_clobber /* If RIP has changed then don't clobber. */
152 leaq hypercall_args_table(%rip),%r10
153 movb (%r10,%rcx,1),%cl
154 movl $0xDEADBEEF,%r10d
155 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
156 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
157 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
158 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
159 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
160 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
161 skip_clobber:
162 #endif
163 movq %rax,UREGS_rax(%rsp) # save the return value
165 /* %rbx: struct vcpu */
166 test_all_events:
167 cli # tests must not race interrupts
168 /*test_softirqs:*/
169 movl VCPU_processor(%rbx),%eax
170 shl $IRQSTAT_shift,%rax
171 leaq irq_stat(%rip),%rcx
172 testl $~0,(%rcx,%rax,1)
173 jnz process_softirqs
174 /*test_guest_events:*/
175 movq VCPU_vcpu_info(%rbx),%rax
176 testb $0xFF,VCPUINFO_upcall_mask(%rax)
177 jnz restore_all_guest
178 testb $0xFF,VCPUINFO_upcall_pending(%rax)
179 jz restore_all_guest
180 /*process_guest_events:*/
181 sti
182 leaq VCPU_trap_bounce(%rbx),%rdx
183 movq VCPU_event_addr(%rbx),%rax
184 movq %rax,TRAPBOUNCE_eip(%rdx)
185 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
186 call create_bounce_frame
187 jmp test_all_events
189 #ifdef CONFIG_VMX
190 /*
191 * At VMExit time the processor saves the guest selectors, rsp, rip,
192 * and rflags. Therefore we don't save them, but simply decrement
193 * the kernel stack pointer to make it consistent with the stack frame
194 * at usual interruption time. The rflags of the host is not saved by VMX,
195 * and we set it to the fixed value.
196 *
197 * We also need the room, especially because orig_eax field is used
198 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
199 * (10) u64 gs;
200 * (9) u64 fs;
201 * (8) u64 ds;
202 * (7) u64 es;
203 * <- get_stack_bottom() (= HOST_ESP)
204 * (6) u64 ss;
205 * (5) u64 rsp;
206 * (4) u64 rflags;
207 * (3) u64 cs;
208 * (2) u64 rip;
209 * (2/1) u32 entry_vector;
210 * (1/1) u32 error_code;
211 */
212 #define VMX_MONITOR_RFLAGS 0x202 /* IF on */
213 #define NR_SKIPPED_REGS 6 /* See the above explanation */
214 #define VMX_SAVE_ALL_NOSEGREGS \
215 pushq $VMX_MONITOR_RFLAGS; \
216 popfq; \
217 subq $(NR_SKIPPED_REGS*8), %rsp; \
218 pushq %rdi; \
219 pushq %rsi; \
220 pushq %rdx; \
221 pushq %rcx; \
222 pushq %rax; \
223 pushq %r8; \
224 pushq %r9; \
225 pushq %r10; \
226 pushq %r11; \
227 pushq %rbx; \
228 pushq %rbp; \
229 pushq %r12; \
230 pushq %r13; \
231 pushq %r14; \
232 pushq %r15; \
234 #define VMX_RESTORE_ALL_NOSEGREGS \
235 popq %r15; \
236 popq %r14; \
237 popq %r13; \
238 popq %r12; \
239 popq %rbp; \
240 popq %rbx; \
241 popq %r11; \
242 popq %r10; \
243 popq %r9; \
244 popq %r8; \
245 popq %rax; \
246 popq %rcx; \
247 popq %rdx; \
248 popq %rsi; \
249 popq %rdi; \
250 addq $(NR_SKIPPED_REGS*8), %rsp; \
252 ENTRY(vmx_asm_vmexit_handler)
253 /* selectors are restored/saved by VMX */
254 VMX_SAVE_ALL_NOSEGREGS
255 call vmx_vmexit_handler
256 jmp vmx_asm_do_resume
258 .macro vmx_asm_common launch initialized
259 1:
260 .if \initialized
261 /* vmx_test_all_events */
262 GET_CURRENT(%rbx)
263 /* test_all_events: */
264 cli # tests must not race interrupts
265 /*test_softirqs:*/
266 movl VCPU_processor(%rbx),%eax
267 shl $IRQSTAT_shift,%rax
268 leaq irq_stat(%rip), %rdx
269 testl $~0,(%rdx,%rax,1)
270 jnz 2f
272 /* vmx_restore_all_guest */
273 call vmx_intr_assist
274 call load_cr2
275 .endif
276 /*
277 * Check if we are going back to VMX-based VM
278 * By this time, all the setups in the VMCS must be complete.
279 */
280 VMX_RESTORE_ALL_NOSEGREGS
281 .if \launch
282 /* VMLUANCH */
283 .byte 0x0f,0x01,0xc2
284 pushfq
285 call vm_launch_fail
286 .else
287 /* VMRESUME */
288 .byte 0x0f,0x01,0xc3
289 pushfq
290 call vm_resume_fail
291 .endif
292 /* Should never reach here */
293 hlt
295 ALIGN
297 .if \initialized
298 2:
299 /* vmx_process_softirqs */
300 sti
301 call do_softirq
302 jmp 1b
303 ALIGN
304 .endif
305 .endm
307 ENTRY(vmx_asm_do_launch)
308 vmx_asm_common 1 0
310 ENTRY(vmx_asm_do_resume)
311 vmx_asm_common 0 1
313 ENTRY(vmx_asm_do_relaunch)
314 vmx_asm_common 1 1
316 #endif
318 ALIGN
319 /* %rbx: struct vcpu */
320 process_softirqs:
321 sti
322 call do_softirq
323 jmp test_all_events
325 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
326 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
327 /* %rdx: trap_bounce, %rbx: struct vcpu */
328 /* On return only %rbx is guaranteed non-clobbered. */
329 create_bounce_frame:
330 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
331 jnz 1f
332 /* Push new frame at registered guest-OS stack base. */
333 pushq %rdx
334 movq %rbx,%rdi
335 call toggle_guest_mode
336 popq %rdx
337 movq VCPU_kernel_sp(%rbx),%rsi
338 jmp 2f
339 1: /* In kernel context already: push new frame at existing %rsp. */
340 movq UREGS_rsp+8(%rsp),%rsi
341 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
342 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
343 movq $HYPERVISOR_VIRT_START,%rax
344 cmpq %rax,%rsi
345 jb 1f # In +ve address space? Then okay.
346 movq $HYPERVISOR_VIRT_END+60,%rax
347 cmpq %rax,%rsi
348 jb domain_crash_synchronous # Above Xen private area? Then okay.
349 1: movb TRAPBOUNCE_flags(%rdx),%cl
350 subq $40,%rsi
351 movq UREGS_ss+8(%rsp),%rax
352 FLT2: movq %rax,32(%rsi) # SS
353 movq UREGS_rsp+8(%rsp),%rax
354 FLT3: movq %rax,24(%rsi) # RSP
355 movq UREGS_eflags+8(%rsp),%rax
356 FLT4: movq %rax,16(%rsi) # RFLAGS
357 movq VCPU_vcpu_info(%rbx),%rax
358 pushq VCPUINFO_upcall_mask(%rax)
359 testb $TBF_INTERRUPT,%cl
360 setnz %ch # TBF_INTERRUPT -> set upcall mask
361 orb %ch,VCPUINFO_upcall_mask(%rax)
362 popq %rax
363 shlq $32,%rax # Bits 32-39: saved_upcall_mask
364 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
365 FLT5: movq %rax,8(%rsi) # CS/saved_upcall_mask
366 movq UREGS_rip+8(%rsp),%rax
367 FLT6: movq %rax,(%rsi) # RIP
368 testb $TBF_EXCEPTION_ERRCODE,%cl
369 jz 1f
370 subq $8,%rsi
371 movl TRAPBOUNCE_error_code(%rdx),%eax
372 FLT7: movq %rax,(%rsi) # ERROR CODE
373 testb $TBF_EXCEPTION_CR2,%cl
374 jz 2f
375 subq $8,%rsi
376 movq TRAPBOUNCE_cr2(%rdx),%rax
377 FLT8: movq %rax,(%rsi) # CR2
378 1: testb $TBF_FAILSAFE,%cl
379 jz 2f
380 subq $32,%rsi
381 movl %gs,%eax
382 FLT9: movq %rax,24(%rsi) # GS
383 movl %fs,%eax
384 FLT10: movq %rax,16(%rsi) # FS
385 movl %es,%eax
386 FLT11: movq %rax,8(%rsi) # ES
387 movl %ds,%eax
388 FLT12: movq %rax,(%rsi) # DS
389 2: subq $16,%rsi
390 movq UREGS_r11+8(%rsp),%rax
391 FLT13: movq %rax,8(%rsi) # R11
392 movq UREGS_rcx+8(%rsp),%rax
393 FLT14: movq %rax,(%rsi) # RCX
394 /* Rewrite our stack frame and return to guest-OS mode. */
395 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
396 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
397 andl $0xfffcbeff,UREGS_eflags+8(%rsp)
398 movq $__GUEST_SS,UREGS_ss+8(%rsp)
399 movq %rsi,UREGS_rsp+8(%rsp)
400 movq $__GUEST_CS,UREGS_cs+8(%rsp)
401 movq TRAPBOUNCE_eip(%rdx),%rax
402 testq %rax,%rax
403 jz domain_crash_synchronous
404 movq %rax,UREGS_rip+8(%rsp)
405 movb $0,TRAPBOUNCE_flags(%rdx)
406 ret
407 .section __ex_table,"a"
408 .quad FLT2,domain_crash_synchronous , FLT3,domain_crash_synchronous
409 .quad FLT4,domain_crash_synchronous , FLT5,domain_crash_synchronous
410 .quad FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
411 .quad FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
412 .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
413 .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
414 .quad FLT14,domain_crash_synchronous
415 .previous
417 ALIGN
418 /* %rbx: struct vcpu */
419 process_guest_exception_and_events:
420 leaq VCPU_trap_bounce(%rbx),%rdx
421 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
422 jz test_all_events
423 call create_bounce_frame
424 jmp test_all_events
426 ALIGN
427 /* No special register assumptions. */
428 ENTRY(ret_from_intr)
429 GET_CURRENT(%rbx)
430 testb $3,UREGS_cs(%rsp)
431 jnz test_all_events
432 jmp restore_all_xen
434 ALIGN
435 /* No special register assumptions. */
436 error_code:
437 SAVE_ALL
438 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
439 jz exception_with_ints_disabled
440 sti
441 movq %rsp,%rdi
442 movl UREGS_entry_vector(%rsp),%eax
443 leaq exception_table(%rip),%rdx
444 GET_CURRENT(%rbx)
445 PERFC_INCR(PERFC_exceptions, %rax)
446 callq *(%rdx,%rax,8)
447 testb $3,UREGS_cs(%rsp)
448 jz restore_all_xen
449 jmp process_guest_exception_and_events
451 /* No special register assumptions. */
452 exception_with_ints_disabled:
453 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
454 jnz FATAL_exception_with_ints_disabled
455 movq %rsp,%rdi
456 call search_pre_exception_table
457 testq %rax,%rax # no fixup code for faulting EIP?
458 jz FATAL_exception_with_ints_disabled
459 movq %rax,UREGS_rip(%rsp)
460 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
461 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
462 jz 1f # then there is a pad quadword already
463 movq %rsp,%rsi
464 subq $8,%rsp
465 movq %rsp,%rdi
466 movq $UREGS_kernel_sizeof/8,%rcx
467 rep; movsq # make room for ec/ev
468 1: movq UREGS_error_code(%rsp),%rax # ec/ev
469 movq %rax,UREGS_kernel_sizeof(%rsp)
470 jmp restore_all_xen # return to fixup code
472 /* No special register assumptions. */
473 FATAL_exception_with_ints_disabled:
474 movl UREGS_entry_vector(%rsp),%edi
475 movq %rsp,%rsi
476 call fatal_trap
477 ud2
479 ENTRY(divide_error)
480 pushq $0
481 movl $TRAP_divide_error,4(%rsp)
482 jmp error_code
484 ENTRY(coprocessor_error)
485 pushq $0
486 movl $TRAP_copro_error,4(%rsp)
487 jmp error_code
489 ENTRY(simd_coprocessor_error)
490 pushq $0
491 movl $TRAP_simd_error,4(%rsp)
492 jmp error_code
494 ENTRY(device_not_available)
495 pushq $0
496 movl $TRAP_no_device,4(%rsp)
497 jmp error_code
499 ENTRY(debug)
500 pushq $0
501 movl $TRAP_debug,4(%rsp)
502 jmp error_code
504 ENTRY(int3)
505 pushq $0
506 movl $TRAP_int3,4(%rsp)
507 jmp error_code
509 ENTRY(overflow)
510 pushq $0
511 movl $TRAP_overflow,4(%rsp)
512 jmp error_code
514 ENTRY(bounds)
515 pushq $0
516 movl $TRAP_bounds,4(%rsp)
517 jmp error_code
519 ENTRY(invalid_op)
520 pushq $0
521 movl $TRAP_invalid_op,4(%rsp)
522 jmp error_code
524 ENTRY(coprocessor_segment_overrun)
525 pushq $0
526 movl $TRAP_copro_seg,4(%rsp)
527 jmp error_code
529 ENTRY(invalid_TSS)
530 movl $TRAP_invalid_tss,4(%rsp)
531 jmp error_code
533 ENTRY(segment_not_present)
534 movl $TRAP_no_segment,4(%rsp)
535 jmp error_code
537 ENTRY(stack_segment)
538 movl $TRAP_stack_error,4(%rsp)
539 jmp error_code
541 ENTRY(general_protection)
542 movl $TRAP_gp_fault,4(%rsp)
543 jmp error_code
545 ENTRY(alignment_check)
546 movl $TRAP_alignment_check,4(%rsp)
547 jmp error_code
549 ENTRY(page_fault)
550 movl $TRAP_page_fault,4(%rsp)
551 jmp error_code
553 ENTRY(machine_check)
554 pushq $0
555 movl $TRAP_machine_check,4(%rsp)
556 jmp error_code
558 ENTRY(spurious_interrupt_bug)
559 pushq $0
560 movl $TRAP_spurious_int,4(%rsp)
561 jmp error_code
563 ENTRY(double_fault)
564 movl $TRAP_double_fault,4(%rsp)
565 jmp error_code
567 ENTRY(nmi)
568 pushq $0
569 SAVE_ALL
570 inb $0x61,%al
571 movl %eax,%esi # reason
572 movq %rsp,%rdi # regs
573 call do_nmi
574 jmp restore_all_xen
576 do_arch_sched_op:
577 # Ensure we return success even if we return via schedule_tail()
578 xorl %eax,%eax
579 GET_GUEST_REGS(%r10)
580 movq %rax,UREGS_rax(%r10)
581 jmp do_sched_op
583 .data
585 ENTRY(exception_table)
586 .quad do_divide_error
587 .quad do_debug
588 .quad 0 # nmi
589 .quad do_int3
590 .quad do_overflow
591 .quad do_bounds
592 .quad do_invalid_op
593 .quad math_state_restore
594 .quad do_double_fault
595 .quad do_coprocessor_segment_overrun
596 .quad do_invalid_TSS
597 .quad do_segment_not_present
598 .quad do_stack_segment
599 .quad do_general_protection
600 .quad do_page_fault
601 .quad do_spurious_interrupt_bug
602 .quad do_coprocessor_error
603 .quad do_alignment_check
604 .quad do_machine_check
605 .quad do_simd_coprocessor_error
607 ENTRY(hypercall_table)
608 .quad do_set_trap_table /* 0 */
609 .quad do_mmu_update
610 .quad do_set_gdt
611 .quad do_stack_switch
612 .quad do_set_callbacks
613 .quad do_fpu_taskswitch /* 5 */
614 .quad do_arch_sched_op
615 .quad do_dom0_op
616 .quad do_set_debugreg
617 .quad do_get_debugreg
618 .quad do_update_descriptor /* 10 */
619 .quad do_ni_hypercall
620 .quad do_memory_op
621 .quad do_multicall
622 .quad do_update_va_mapping
623 .quad do_set_timer_op /* 15 */
624 .quad do_event_channel_op
625 .quad do_xen_version
626 .quad do_console_io
627 .quad do_physdev_op
628 .quad do_grant_table_op /* 20 */
629 .quad do_vm_assist
630 .quad do_update_va_mapping_otherdomain
631 .quad do_switch_to_user
632 .quad do_boot_vcpu
633 .quad do_set_segment_base /* 25 */
634 .quad do_mmuext_op
635 .quad do_acm_op
636 .rept NR_hypercalls-((.-hypercall_table)/4)
637 .quad do_ni_hypercall
638 .endr
640 ENTRY(hypercall_args_table)
641 .byte 1 /* do_set_trap_table */ /* 0 */
642 .byte 4 /* do_mmu_update */
643 .byte 2 /* do_set_gdt */
644 .byte 2 /* do_stack_switch */
645 .byte 3 /* do_set_callbacks */
646 .byte 1 /* do_fpu_taskswitch */ /* 5 */
647 .byte 2 /* do_arch_sched_op */
648 .byte 1 /* do_dom0_op */
649 .byte 2 /* do_set_debugreg */
650 .byte 1 /* do_get_debugreg */
651 .byte 2 /* do_update_descriptor */ /* 10 */
652 .byte 0 /* do_ni_hypercall */
653 .byte 2 /* do_memory_op */
654 .byte 2 /* do_multicall */
655 .byte 3 /* do_update_va_mapping */
656 .byte 1 /* do_set_timer_op */ /* 15 */
657 .byte 1 /* do_event_channel_op */
658 .byte 2 /* do_xen_version */
659 .byte 3 /* do_console_io */
660 .byte 1 /* do_physdev_op */
661 .byte 3 /* do_grant_table_op */ /* 20 */
662 .byte 2 /* do_vm_assist */
663 .byte 4 /* do_update_va_mapping_otherdomain */
664 .byte 0 /* do_switch_to_user */
665 .byte 2 /* do_boot_vcpu */
666 .byte 2 /* do_set_segment_base */ /* 25 */
667 .byte 4 /* do_mmuext_op */
668 .byte 1 /* do_acm_op */
669 .rept NR_hypercalls-(.-hypercall_args_table)
670 .byte 0 /* do_ni_hypercall */
671 .endr