direct-io.hg

view xen/arch/x86/x86_64/entry.S @ 14445:522a1cd17b6d

[XEN] Implement faster int 0x80 handling for compat mode guests.

Using the GPF handler to spot the software interrupt and pass it back
to the guest increases the base syscall time by a factor of 2.7
compared with 32on32 using direct trap to ring 1. (0.3270->0.8680
microseconds, measured with lmbench lat_syscall).

Since the 64 bit IDT can only contain 64 bit segment selectors we
cannot trap directly to compat mode ring 1. However implementing a
dedicated 64 bit ring 0 trap handler allows us to avoid much of the
GPF handler overhead and reduces the overhead to 1.7 times
(0.3270->0.5497 microseconds).

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Tue Mar 20 14:33:15 2007 +0000 (2007-03-20)
parents bca284f67702
children 96f167771979
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 movq VCPU_syscall_addr(%rbx),%rax
31 movq %rax,TRAPBOUNCE_eip(%rdx)
32 movw $0,TRAPBOUNCE_flags(%rdx)
33 bt $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
34 jnc 1f
35 orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
36 1: call create_bounce_frame
37 jmp test_all_events
39 /* %rbx: struct vcpu, interrupts disabled */
40 restore_all_guest:
41 RESTORE_ALL
42 testw $TRAP_syscall,4(%rsp)
43 jz iret_exit_to_guest
45 addq $8,%rsp
46 popq %rcx # RIP
47 popq %r11 # CS
48 cmpw $FLAT_KERNEL_CS32,%r11
49 popq %r11 # RFLAGS
50 popq %rsp # RSP
51 je 1f
52 sysretq
53 1: sysretl
55 ALIGN
56 /* No special register assumptions. */
57 iret_exit_to_guest:
58 addq $8,%rsp
59 .Lft1: iretq
61 .section .fixup,"ax"
62 .Lfx1: popq -15*8-8(%rsp) # error_code/entry_vector
63 SAVE_ALL # 15*8 bytes pushed
64 movq -8(%rsp),%rsi # error_code/entry_vector
65 sti # after stack abuse (-1024(%rsp))
66 pushq $__HYPERVISOR_DS # SS
67 leaq 8(%rsp),%rax
68 pushq %rax # RSP
69 pushf # RFLAGS
70 pushq $__HYPERVISOR_CS # CS
71 leaq .Ldf1(%rip),%rax
72 pushq %rax # RIP
73 pushq %rsi # error_code/entry_vector
74 jmp handle_exception
75 .Ldf1: GET_CURRENT(%rbx)
76 jmp test_all_events
77 failsafe_callback:
78 GET_CURRENT(%rbx)
79 leaq VCPU_trap_bounce(%rbx),%rdx
80 movq VCPU_failsafe_addr(%rbx),%rax
81 movq %rax,TRAPBOUNCE_eip(%rdx)
82 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
83 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
84 jnc 1f
85 orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
86 1: call create_bounce_frame
87 jmp test_all_events
88 .previous
89 .section __pre_ex_table,"a"
90 .quad .Lft1,.Lfx1
91 .previous
92 .section __ex_table,"a"
93 .quad .Ldf1,failsafe_callback
94 .previous
96 ALIGN
97 /* No special register assumptions. */
98 restore_all_xen:
99 RESTORE_ALL
100 addq $8,%rsp
101 iretq
103 /*
104 * When entering SYSCALL from kernel mode:
105 * %rax = hypercall vector
106 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
107 * %r11, %rcx = SYSCALL-saved %rflags and %rip
108 * NB. We must move %r10 to %rcx for C function-calling ABI.
109 *
110 * When entering SYSCALL from user mode:
111 * Vector directly to the registered arch.syscall_addr.
112 *
113 * Initial work is done by per-CPU stack trampolines. At this point %rsp
114 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
115 * and %cs have been saved. All other registers are still to be saved onto
116 * the stack, starting with %rip, and an appropriate %ss must be saved into
117 * the space left by the trampoline.
118 */
119 ALIGN
120 ENTRY(syscall_enter)
121 sti
122 movl $FLAT_KERNEL_SS,24(%rsp)
123 pushq %rcx
124 pushq $0
125 movl $TRAP_syscall,4(%rsp)
126 SAVE_ALL
127 GET_CURRENT(%rbx)
128 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
129 jz switch_to_kernel
131 /*hypercall:*/
132 movq %r10,%rcx
133 cmpq $NR_hypercalls,%rax
134 jae bad_hypercall
135 #ifndef NDEBUG
136 /* Deliberately corrupt parameter regs not used by this hypercall. */
137 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
138 leaq hypercall_args_table(%rip),%r10
139 movq $6,%rcx
140 sub (%r10,%rax,1),%cl
141 movq %rsp,%rdi
142 movl $0xDEADBEEF,%eax
143 rep stosq
144 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
145 movq UREGS_rax(%rsp),%rax
146 pushq %rax
147 pushq UREGS_rip+8(%rsp)
148 #endif
149 leaq hypercall_table(%rip),%r10
150 PERFC_INCR(PERFC_hypercalls, %rax)
151 callq *(%r10,%rax,8)
152 #ifndef NDEBUG
153 /* Deliberately corrupt parameter regs used by this hypercall. */
154 popq %r10 # Shadow RIP
155 cmpq %r10,UREGS_rip+8(%rsp)
156 popq %rcx # Shadow hypercall index
157 jne skip_clobber /* If RIP has changed then don't clobber. */
158 leaq hypercall_args_table(%rip),%r10
159 movb (%r10,%rcx,1),%cl
160 movl $0xDEADBEEF,%r10d
161 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
162 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
163 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
164 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
165 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
166 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
167 skip_clobber:
168 #endif
169 movq %rax,UREGS_rax(%rsp) # save the return value
171 /* %rbx: struct vcpu */
172 test_all_events:
173 cli # tests must not race interrupts
174 /*test_softirqs:*/
175 movl VCPU_processor(%rbx),%eax
176 shl $IRQSTAT_shift,%rax
177 leaq irq_stat(%rip),%rcx
178 testl $~0,(%rcx,%rax,1)
179 jnz process_softirqs
180 btr $_VCPUF_nmi_pending,VCPU_flags(%rbx)
181 jc process_nmi
182 test_guest_events:
183 movq VCPU_vcpu_info(%rbx),%rax
184 testb $0xFF,VCPUINFO_upcall_mask(%rax)
185 jnz restore_all_guest
186 testb $0xFF,VCPUINFO_upcall_pending(%rax)
187 jz restore_all_guest
188 /*process_guest_events:*/
189 sti
190 leaq VCPU_trap_bounce(%rbx),%rdx
191 movq VCPU_event_addr(%rbx),%rax
192 movq %rax,TRAPBOUNCE_eip(%rdx)
193 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
194 call create_bounce_frame
195 jmp test_all_events
197 ALIGN
198 /* %rbx: struct vcpu */
199 process_softirqs:
200 sti
201 call do_softirq
202 jmp test_all_events
204 ALIGN
205 /* %rbx: struct vcpu */
206 process_nmi:
207 movq VCPU_nmi_addr(%rbx),%rax
208 test %rax,%rax
209 jz test_all_events
210 bts $_VCPUF_nmi_masked,VCPU_flags(%rbx)
211 jc 1f
212 sti
213 leaq VCPU_trap_bounce(%rbx),%rdx
214 movq %rax,TRAPBOUNCE_eip(%rdx)
215 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
216 call create_bounce_frame
217 jmp test_all_events
218 1: bts $_VCPUF_nmi_pending,VCPU_flags(%rbx)
219 jmp test_guest_events
221 bad_hypercall:
222 movq $-ENOSYS,UREGS_rax(%rsp)
223 jmp test_all_events
225 ENTRY(int80_direct_trap)
226 pushq $0
227 SAVE_ALL
229 GET_CURRENT(%rbx)
231 /* Check that the callback is non-null. */
232 leaq VCPU_int80_bounce(%rbx),%rdx
233 cmp $0, TRAPBOUNCE_flags(%rdx)
234 jz int80_slow_path
236 movq VCPU_domain(%rbx),%rax
237 btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
238 jc compat_int80_direct_trap
240 call create_bounce_frame
241 jmp restore_all_guest
243 int80_slow_path:
244 /*
245 * Setup entry vector and error code as if this was a GPF caused by an
246 * IDT entry with DPL==0.
247 */
248 movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
249 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
250 /* A GPF wouldn't have incremented the instruction pointer. */
251 sub $2,UREGS_rip(%rsp)
252 jmp handle_exception_saved
254 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
255 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
256 /* %rdx: trap_bounce, %rbx: struct vcpu */
257 /* On return only %rbx is guaranteed non-clobbered. */
258 create_bounce_frame:
259 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
260 jnz 1f
261 /* Push new frame at registered guest-OS stack base. */
262 pushq %rdx
263 movq %rbx,%rdi
264 call toggle_guest_mode
265 popq %rdx
266 movq VCPU_kernel_sp(%rbx),%rsi
267 jmp 2f
268 1: /* In kernel context already: push new frame at existing %rsp. */
269 movq UREGS_rsp+8(%rsp),%rsi
270 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
271 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
272 movq $HYPERVISOR_VIRT_START,%rax
273 cmpq %rax,%rsi
274 jb 1f # In +ve address space? Then okay.
275 movq $HYPERVISOR_VIRT_END+60,%rax
276 cmpq %rax,%rsi
277 jb domain_crash_synchronous # Above Xen private area? Then okay.
278 1: movb TRAPBOUNCE_flags(%rdx),%cl
279 subq $40,%rsi
280 movq UREGS_ss+8(%rsp),%rax
281 .Lft2: movq %rax,32(%rsi) # SS
282 movq UREGS_rsp+8(%rsp),%rax
283 .Lft3: movq %rax,24(%rsi) # RSP
284 movq VCPU_vcpu_info(%rbx),%rax
285 pushq VCPUINFO_upcall_mask(%rax)
286 testb $TBF_INTERRUPT,%cl
287 setnz %ch # TBF_INTERRUPT -> set upcall mask
288 orb %ch,VCPUINFO_upcall_mask(%rax)
289 popq %rax
290 shlq $32,%rax # Bits 32-39: saved_upcall_mask
291 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
292 .Lft4: movq %rax,8(%rsi) # CS / saved_upcall_mask
293 shrq $32,%rax
294 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
295 setz %ch # %ch == !saved_upcall_mask
296 movq UREGS_eflags+8(%rsp),%rax
297 andq $~X86_EFLAGS_IF,%rax
298 shlb $1,%ch # Bit 9 (EFLAGS.IF)
299 orb %ch,%ah # Fold EFLAGS.IF into %eax
300 .Lft5: movq %rax,16(%rsi) # RFLAGS
301 movq UREGS_rip+8(%rsp),%rax
302 .Lft6: movq %rax,(%rsi) # RIP
303 testb $TBF_EXCEPTION_ERRCODE,%cl
304 jz 1f
305 subq $8,%rsi
306 movl TRAPBOUNCE_error_code(%rdx),%eax
307 .Lft7: movq %rax,(%rsi) # ERROR CODE
308 1: testb $TBF_FAILSAFE,%cl
309 jz 2f
310 subq $32,%rsi
311 movl %gs,%eax
312 .Lft8: movq %rax,24(%rsi) # GS
313 movl %fs,%eax
314 .Lft9: movq %rax,16(%rsi) # FS
315 movl %es,%eax
316 .Lft10: movq %rax,8(%rsi) # ES
317 movl %ds,%eax
318 .Lft11: movq %rax,(%rsi) # DS
319 2: subq $16,%rsi
320 movq UREGS_r11+8(%rsp),%rax
321 .Lft12: movq %rax,8(%rsi) # R11
322 movq UREGS_rcx+8(%rsp),%rax
323 .Lft13: movq %rax,(%rsi) # RCX
324 /* Rewrite our stack frame and return to guest-OS mode. */
325 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
326 /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
327 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
328 andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
329 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
330 movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
331 movq %rsi,UREGS_rsp+8(%rsp)
332 movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
333 movq TRAPBOUNCE_eip(%rdx),%rax
334 testq %rax,%rax
335 jz domain_crash_synchronous
336 movq %rax,UREGS_rip+8(%rsp)
337 movb $0,TRAPBOUNCE_flags(%rdx)
338 ret
339 .section __ex_table,"a"
340 .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous
341 .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous
342 .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
343 .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
344 .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
345 .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
346 .previous
348 domain_crash_synchronous_string:
349 .asciz "domain_crash_sync called from entry.S\n"
351 ENTRY(domain_crash_synchronous)
352 # Get out of the guest-save area of the stack.
353 GET_GUEST_REGS(%rax)
354 movq %rax,%rsp
355 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
356 #ifdef CONFIG_COMPAT
357 movq CPUINFO_current_vcpu(%rax),%rax
358 movq VCPU_domain(%rax),%rax
359 btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
360 setnc %al
361 leal (%rax,%rax,2),%eax
362 orb %al,UREGS_cs(%rsp)
363 #else
364 orb $3,UREGS_cs(%rsp)
365 #endif
366 # printk(domain_crash_synchronous_string)
367 leaq domain_crash_synchronous_string(%rip),%rdi
368 xorl %eax,%eax
369 call printk
370 jmp __domain_crash_synchronous
372 ALIGN
373 /* No special register assumptions. */
374 ENTRY(ret_from_intr)
375 GET_CURRENT(%rbx)
376 testb $3,UREGS_cs(%rsp)
377 jz restore_all_xen
378 #ifndef CONFIG_COMPAT
379 jmp test_all_events
380 #else
381 movq VCPU_domain(%rbx),%rax
382 btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
383 jnc test_all_events
384 jmp compat_test_all_events
385 #endif
387 ALIGN
388 /* No special register assumptions. */
389 ENTRY(handle_exception)
390 SAVE_ALL
391 handle_exception_saved:
392 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
393 jz exception_with_ints_disabled
394 sti
395 1: movq %rsp,%rdi
396 movl UREGS_entry_vector(%rsp),%eax
397 leaq exception_table(%rip),%rdx
398 GET_CURRENT(%rbx)
399 PERFC_INCR(PERFC_exceptions, %rax)
400 callq *(%rdx,%rax,8)
401 testb $3,UREGS_cs(%rsp)
402 jz restore_all_xen
403 leaq VCPU_trap_bounce(%rbx),%rdx
404 #ifdef CONFIG_COMPAT
405 movq VCPU_domain(%rbx),%rax
406 btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
407 jc compat_post_handle_exception
408 #endif
409 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
410 jz test_all_events
411 call create_bounce_frame
412 jmp test_all_events
414 /* No special register assumptions. */
415 exception_with_ints_disabled:
416 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
417 jnz FATAL_exception_with_ints_disabled
418 movq %rsp,%rdi
419 call search_pre_exception_table
420 testq %rax,%rax # no fixup code for faulting EIP?
421 jz 1b
422 movq %rax,UREGS_rip(%rsp)
423 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
424 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
425 jz 1f # then there is a pad quadword already
426 movq %rsp,%rsi
427 subq $8,%rsp
428 movq %rsp,%rdi
429 movq $UREGS_kernel_sizeof/8,%rcx
430 rep; movsq # make room for ec/ev
431 1: movq UREGS_error_code(%rsp),%rax # ec/ev
432 movq %rax,UREGS_kernel_sizeof(%rsp)
433 jmp restore_all_xen # return to fixup code
435 /* No special register assumptions. */
436 FATAL_exception_with_ints_disabled:
437 movl UREGS_entry_vector(%rsp),%edi
438 movq %rsp,%rsi
439 call fatal_trap
440 ud2
442 ENTRY(divide_error)
443 pushq $0
444 movl $TRAP_divide_error,4(%rsp)
445 jmp handle_exception
447 ENTRY(coprocessor_error)
448 pushq $0
449 movl $TRAP_copro_error,4(%rsp)
450 jmp handle_exception
452 ENTRY(simd_coprocessor_error)
453 pushq $0
454 movl $TRAP_simd_error,4(%rsp)
455 jmp handle_exception
457 ENTRY(device_not_available)
458 pushq $0
459 movl $TRAP_no_device,4(%rsp)
460 jmp handle_exception
462 ENTRY(debug)
463 pushq $0
464 movl $TRAP_debug,4(%rsp)
465 jmp handle_exception
467 ENTRY(int3)
468 pushq $0
469 movl $TRAP_int3,4(%rsp)
470 jmp handle_exception
472 ENTRY(overflow)
473 pushq $0
474 movl $TRAP_overflow,4(%rsp)
475 jmp handle_exception
477 ENTRY(bounds)
478 pushq $0
479 movl $TRAP_bounds,4(%rsp)
480 jmp handle_exception
482 ENTRY(invalid_op)
483 pushq $0
484 movl $TRAP_invalid_op,4(%rsp)
485 jmp handle_exception
487 ENTRY(coprocessor_segment_overrun)
488 pushq $0
489 movl $TRAP_copro_seg,4(%rsp)
490 jmp handle_exception
492 ENTRY(invalid_TSS)
493 movl $TRAP_invalid_tss,4(%rsp)
494 jmp handle_exception
496 ENTRY(segment_not_present)
497 movl $TRAP_no_segment,4(%rsp)
498 jmp handle_exception
500 ENTRY(stack_segment)
501 movl $TRAP_stack_error,4(%rsp)
502 jmp handle_exception
504 ENTRY(general_protection)
505 movl $TRAP_gp_fault,4(%rsp)
506 jmp handle_exception
508 ENTRY(alignment_check)
509 movl $TRAP_alignment_check,4(%rsp)
510 jmp handle_exception
512 ENTRY(page_fault)
513 movl $TRAP_page_fault,4(%rsp)
514 jmp handle_exception
516 ENTRY(machine_check)
517 pushq $0
518 movl $TRAP_machine_check,4(%rsp)
519 jmp handle_exception
521 ENTRY(spurious_interrupt_bug)
522 pushq $0
523 movl $TRAP_spurious_int,4(%rsp)
524 jmp handle_exception
526 ENTRY(double_fault)
527 SAVE_ALL
528 movq %rsp,%rdi
529 call do_double_fault
530 ud2
532 ENTRY(early_page_fault)
533 SAVE_ALL
534 movq %rsp,%rdi
535 call do_early_page_fault
536 jmp restore_all_xen
538 ENTRY(nmi)
539 pushq $0
540 SAVE_ALL
541 testb $3,UREGS_cs(%rsp)
542 jz nmi_in_hypervisor_mode
543 /* Interrupted guest context. Copy the context to stack bottom. */
544 GET_GUEST_REGS(%rbx)
545 movl $UREGS_kernel_sizeof/8,%ecx
546 1: popq %rax
547 movq %rax,(%rbx)
548 addq $8,%rbx
549 loop 1b
550 subq $UREGS_kernel_sizeof,%rbx
551 movq %rbx,%rsp
552 nmi_in_hypervisor_mode:
553 movq %rsp,%rdi
554 call do_nmi
555 jmp ret_from_intr
557 .data
559 ENTRY(exception_table)
560 .quad do_divide_error
561 .quad do_debug
562 .quad 0 # nmi
563 .quad do_int3
564 .quad do_overflow
565 .quad do_bounds
566 .quad do_invalid_op
567 .quad math_state_restore
568 .quad 0 # double_fault
569 .quad do_coprocessor_segment_overrun
570 .quad do_invalid_TSS
571 .quad do_segment_not_present
572 .quad do_stack_segment
573 .quad do_general_protection
574 .quad do_page_fault
575 .quad do_spurious_interrupt_bug
576 .quad do_coprocessor_error
577 .quad do_alignment_check
578 .quad do_machine_check
579 .quad do_simd_coprocessor_error
581 ENTRY(hypercall_table)
582 .quad do_set_trap_table /* 0 */
583 .quad do_mmu_update
584 .quad do_set_gdt
585 .quad do_stack_switch
586 .quad do_set_callbacks
587 .quad do_fpu_taskswitch /* 5 */
588 .quad do_sched_op_compat
589 .quad do_platform_op
590 .quad do_set_debugreg
591 .quad do_get_debugreg
592 .quad do_update_descriptor /* 10 */
593 .quad do_ni_hypercall
594 .quad do_memory_op
595 .quad do_multicall
596 .quad do_update_va_mapping
597 .quad do_set_timer_op /* 15 */
598 .quad do_event_channel_op_compat
599 .quad do_xen_version
600 .quad do_console_io
601 .quad do_physdev_op_compat
602 .quad do_grant_table_op /* 20 */
603 .quad do_vm_assist
604 .quad do_update_va_mapping_otherdomain
605 .quad do_iret
606 .quad do_vcpu_op
607 .quad do_set_segment_base /* 25 */
608 .quad do_mmuext_op
609 .quad do_acm_op
610 .quad do_nmi_op
611 .quad do_sched_op
612 .quad do_callback_op /* 30 */
613 .quad do_xenoprof_op
614 .quad do_event_channel_op
615 .quad do_physdev_op
616 .quad do_hvm_op
617 .quad do_sysctl /* 35 */
618 .quad do_domctl
619 .quad do_kexec_op
620 .rept NR_hypercalls-((.-hypercall_table)/8)
621 .quad do_ni_hypercall
622 .endr
624 ENTRY(hypercall_args_table)
625 .byte 1 /* do_set_trap_table */ /* 0 */
626 .byte 4 /* do_mmu_update */
627 .byte 2 /* do_set_gdt */
628 .byte 2 /* do_stack_switch */
629 .byte 3 /* do_set_callbacks */
630 .byte 1 /* do_fpu_taskswitch */ /* 5 */
631 .byte 2 /* do_sched_op_compat */
632 .byte 1 /* do_platform_op */
633 .byte 2 /* do_set_debugreg */
634 .byte 1 /* do_get_debugreg */
635 .byte 2 /* do_update_descriptor */ /* 10 */
636 .byte 0 /* do_ni_hypercall */
637 .byte 2 /* do_memory_op */
638 .byte 2 /* do_multicall */
639 .byte 3 /* do_update_va_mapping */
640 .byte 1 /* do_set_timer_op */ /* 15 */
641 .byte 1 /* do_event_channel_op_compat */
642 .byte 2 /* do_xen_version */
643 .byte 3 /* do_console_io */
644 .byte 1 /* do_physdev_op_compat */
645 .byte 3 /* do_grant_table_op */ /* 20 */
646 .byte 2 /* do_vm_assist */
647 .byte 4 /* do_update_va_mapping_otherdomain */
648 .byte 0 /* do_iret */
649 .byte 3 /* do_vcpu_op */
650 .byte 2 /* do_set_segment_base */ /* 25 */
651 .byte 4 /* do_mmuext_op */
652 .byte 1 /* do_acm_op */
653 .byte 2 /* do_nmi_op */
654 .byte 2 /* do_sched_op */
655 .byte 2 /* do_callback_op */ /* 30 */
656 .byte 2 /* do_xenoprof_op */
657 .byte 2 /* do_event_channel_op */
658 .byte 2 /* do_physdev_op */
659 .byte 2 /* do_hvm_op */
660 .byte 1 /* do_sysctl */ /* 35 */
661 .byte 1 /* do_domctl */
662 .byte 2 /* do_kexec */
663 .rept NR_hypercalls-(.-hypercall_args_table)
664 .byte 0 /* do_ni_hypercall */
665 .endr