direct-io.hg

view xen/arch/x86/x86_64/entry.S @ 15416:b35b8053012e

Fix x86/64 failsafe callback handling.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jun 21 18:02:50 2007 +0100 (2007-06-21)
parents 3cf5052ba5e5
children c192e3241eb7
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 movq VCPU_syscall_addr(%rbx),%rax
31 movq %rax,TRAPBOUNCE_eip(%rdx)
32 movb $0,TRAPBOUNCE_flags(%rdx)
33 bt $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
34 jnc 1f
35 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
36 1: call create_bounce_frame
37 jmp test_all_events
39 /* %rbx: struct vcpu, interrupts disabled */
40 restore_all_guest:
41 ASSERT_INTERRUPTS_DISABLED
42 RESTORE_ALL
43 testw $TRAP_syscall,4(%rsp)
44 jz iret_exit_to_guest
46 addq $8,%rsp
47 popq %rcx # RIP
48 popq %r11 # CS
49 cmpw $FLAT_KERNEL_CS32,%r11
50 popq %r11 # RFLAGS
51 popq %rsp # RSP
52 je 1f
53 sysretq
54 1: sysretl
56 ALIGN
57 /* No special register assumptions. */
58 iret_exit_to_guest:
59 addq $8,%rsp
60 .Lft0: iretq
62 .section .fixup,"ax"
63 .Lfx0: sti
64 SAVE_ALL
65 movq UREGS_error_code(%rsp),%rsi
66 movq %rsp,%rax
67 andq $~0xf,%rsp
68 pushq $__HYPERVISOR_DS # SS
69 pushq %rax # RSP
70 pushfq # RFLAGS
71 pushq $__HYPERVISOR_CS # CS
72 leaq .Ldf0(%rip),%rax
73 pushq %rax # RIP
74 pushq %rsi # error_code/entry_vector
75 jmp handle_exception
76 .Ldf0: GET_CURRENT(%rbx)
77 jmp test_all_events
78 failsafe_callback:
79 GET_CURRENT(%rbx)
80 leaq VCPU_trap_bounce(%rbx),%rdx
81 movq VCPU_failsafe_addr(%rbx),%rax
82 movq %rax,TRAPBOUNCE_eip(%rdx)
83 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
84 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
85 jnc 1f
86 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
87 1: call create_bounce_frame
88 jmp test_all_events
89 .previous
90 .section __pre_ex_table,"a"
91 .quad .Lft0,.Lfx0
92 .previous
93 .section __ex_table,"a"
94 .quad .Ldf0,failsafe_callback
95 .previous
97 ALIGN
98 /* No special register assumptions. */
99 restore_all_xen:
100 RESTORE_ALL
101 addq $8,%rsp
102 iretq
104 /*
105 * When entering SYSCALL from kernel mode:
106 * %rax = hypercall vector
107 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
108 * %r11, %rcx = SYSCALL-saved %rflags and %rip
109 * NB. We must move %r10 to %rcx for C function-calling ABI.
110 *
111 * When entering SYSCALL from user mode:
112 * Vector directly to the registered arch.syscall_addr.
113 *
114 * Initial work is done by per-CPU stack trampolines. At this point %rsp
115 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
116 * and %cs have been saved. All other registers are still to be saved onto
117 * the stack, starting with %rip, and an appropriate %ss must be saved into
118 * the space left by the trampoline.
119 */
120 ALIGN
121 ENTRY(syscall_enter)
122 sti
123 movl $FLAT_KERNEL_SS,24(%rsp)
124 pushq %rcx
125 pushq $0
126 movl $TRAP_syscall,4(%rsp)
127 SAVE_ALL
128 GET_CURRENT(%rbx)
129 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
130 jz switch_to_kernel
132 /*hypercall:*/
133 movq %r10,%rcx
134 cmpq $NR_hypercalls,%rax
135 jae bad_hypercall
136 #ifndef NDEBUG
137 /* Deliberately corrupt parameter regs not used by this hypercall. */
138 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
139 leaq hypercall_args_table(%rip),%r10
140 movq $6,%rcx
141 sub (%r10,%rax,1),%cl
142 movq %rsp,%rdi
143 movl $0xDEADBEEF,%eax
144 rep stosq
145 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
146 movq UREGS_rax(%rsp),%rax
147 pushq %rax
148 pushq UREGS_rip+8(%rsp)
149 #endif
150 leaq hypercall_table(%rip),%r10
151 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
152 callq *(%r10,%rax,8)
153 #ifndef NDEBUG
154 /* Deliberately corrupt parameter regs used by this hypercall. */
155 popq %r10 # Shadow RIP
156 cmpq %r10,UREGS_rip+8(%rsp)
157 popq %rcx # Shadow hypercall index
158 jne skip_clobber /* If RIP has changed then don't clobber. */
159 leaq hypercall_args_table(%rip),%r10
160 movb (%r10,%rcx,1),%cl
161 movl $0xDEADBEEF,%r10d
162 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
163 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
164 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
165 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
166 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
167 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
168 skip_clobber:
169 #endif
170 movq %rax,UREGS_rax(%rsp) # save the return value
172 /* %rbx: struct vcpu */
173 test_all_events:
174 cli # tests must not race interrupts
175 /*test_softirqs:*/
176 movl VCPU_processor(%rbx),%eax
177 shl $IRQSTAT_shift,%rax
178 leaq irq_stat(%rip),%rcx
179 testl $~0,(%rcx,%rax,1)
180 jnz process_softirqs
181 testb $1,VCPU_nmi_pending(%rbx)
182 jnz process_nmi
183 test_guest_events:
184 movq VCPU_vcpu_info(%rbx),%rax
185 testb $0xFF,VCPUINFO_upcall_mask(%rax)
186 jnz restore_all_guest
187 testb $0xFF,VCPUINFO_upcall_pending(%rax)
188 jz restore_all_guest
189 /*process_guest_events:*/
190 sti
191 leaq VCPU_trap_bounce(%rbx),%rdx
192 movq VCPU_event_addr(%rbx),%rax
193 movq %rax,TRAPBOUNCE_eip(%rdx)
194 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
195 call create_bounce_frame
196 jmp test_all_events
198 ALIGN
199 /* %rbx: struct vcpu */
200 process_softirqs:
201 sti
202 call do_softirq
203 jmp test_all_events
205 ALIGN
206 /* %rbx: struct vcpu */
207 process_nmi:
208 testb $1,VCPU_nmi_masked(%rbx)
209 jnz test_guest_events
210 movb $0,VCPU_nmi_pending(%rbx)
211 movq VCPU_nmi_addr(%rbx),%rax
212 test %rax,%rax
213 jz test_guest_events
214 movb $1,VCPU_nmi_masked(%rbx)
215 sti
216 leaq VCPU_trap_bounce(%rbx),%rdx
217 movq %rax,TRAPBOUNCE_eip(%rdx)
218 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
219 call create_bounce_frame
220 jmp test_all_events
222 bad_hypercall:
223 movq $-ENOSYS,UREGS_rax(%rsp)
224 jmp test_all_events
226 ENTRY(int80_direct_trap)
227 pushq $0
228 SAVE_ALL
230 GET_CURRENT(%rbx)
232 /* Check that the callback is non-null. */
233 leaq VCPU_int80_bounce(%rbx),%rdx
234 cmpb $0,TRAPBOUNCE_flags(%rdx)
235 jz int80_slow_path
237 movq VCPU_domain(%rbx),%rax
238 testb $1,DOMAIN_is_32bit_pv(%rax)
239 jnz compat_int80_direct_trap
241 call create_bounce_frame
242 jmp test_all_events
244 int80_slow_path:
245 /*
246 * Setup entry vector and error code as if this was a GPF caused by an
247 * IDT entry with DPL==0.
248 */
249 movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
250 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
251 /* A GPF wouldn't have incremented the instruction pointer. */
252 subq $2,UREGS_rip(%rsp)
253 jmp handle_exception_saved
255 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
256 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
257 /* %rdx: trap_bounce, %rbx: struct vcpu */
258 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
259 create_bounce_frame:
260 ASSERT_INTERRUPTS_ENABLED
261 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
262 jnz 1f
263 /* Push new frame at registered guest-OS stack base. */
264 pushq %rdx
265 movq %rbx,%rdi
266 call toggle_guest_mode
267 popq %rdx
268 movq VCPU_kernel_sp(%rbx),%rsi
269 jmp 2f
270 1: /* In kernel context already: push new frame at existing %rsp. */
271 movq UREGS_rsp+8(%rsp),%rsi
272 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
273 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
274 movq $HYPERVISOR_VIRT_START,%rax
275 cmpq %rax,%rsi
276 jb 1f # In +ve address space? Then okay.
277 movq $HYPERVISOR_VIRT_END+60,%rax
278 cmpq %rax,%rsi
279 jb domain_crash_synchronous # Above Xen private area? Then okay.
280 1: movb TRAPBOUNCE_flags(%rdx),%cl
281 subq $40,%rsi
282 movq UREGS_ss+8(%rsp),%rax
283 .Lft2: movq %rax,32(%rsi) # SS
284 movq UREGS_rsp+8(%rsp),%rax
285 .Lft3: movq %rax,24(%rsi) # RSP
286 movq VCPU_vcpu_info(%rbx),%rax
287 pushq VCPUINFO_upcall_mask(%rax)
288 testb $TBF_INTERRUPT,%cl
289 setnz %ch # TBF_INTERRUPT -> set upcall mask
290 orb %ch,VCPUINFO_upcall_mask(%rax)
291 popq %rax
292 shlq $32,%rax # Bits 32-39: saved_upcall_mask
293 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
294 .Lft4: movq %rax,8(%rsi) # CS / saved_upcall_mask
295 shrq $32,%rax
296 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
297 setz %ch # %ch == !saved_upcall_mask
298 movq UREGS_eflags+8(%rsp),%rax
299 andq $~X86_EFLAGS_IF,%rax
300 shlb $1,%ch # Bit 9 (EFLAGS.IF)
301 orb %ch,%ah # Fold EFLAGS.IF into %eax
302 .Lft5: movq %rax,16(%rsi) # RFLAGS
303 movq UREGS_rip+8(%rsp),%rax
304 .Lft6: movq %rax,(%rsi) # RIP
305 testb $TBF_EXCEPTION_ERRCODE,%cl
306 jz 1f
307 subq $8,%rsi
308 movl TRAPBOUNCE_error_code(%rdx),%eax
309 .Lft7: movq %rax,(%rsi) # ERROR CODE
310 1: testb $TBF_FAILSAFE,%cl
311 jz 2f
312 subq $32,%rsi
313 movl %gs,%eax
314 .Lft8: movq %rax,24(%rsi) # GS
315 movl %fs,%eax
316 .Lft9: movq %rax,16(%rsi) # FS
317 movl %es,%eax
318 .Lft10: movq %rax,8(%rsi) # ES
319 movl %ds,%eax
320 .Lft11: movq %rax,(%rsi) # DS
321 2: subq $16,%rsi
322 movq UREGS_r11+8(%rsp),%rax
323 .Lft12: movq %rax,8(%rsi) # R11
324 movq UREGS_rcx+8(%rsp),%rax
325 .Lft13: movq %rax,(%rsi) # RCX
326 /* Rewrite our stack frame and return to guest-OS mode. */
327 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
328 /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
329 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
330 andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
331 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
332 movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
333 movq %rsi,UREGS_rsp+8(%rsp)
334 movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
335 movq TRAPBOUNCE_eip(%rdx),%rax
336 testq %rax,%rax
337 jz domain_crash_synchronous
338 movq %rax,UREGS_rip+8(%rsp)
339 ret
340 .section __ex_table,"a"
341 .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous
342 .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous
343 .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
344 .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
345 .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
346 .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
347 .previous
349 domain_crash_synchronous_string:
350 .asciz "domain_crash_sync called from entry.S\n"
352 ENTRY(domain_crash_synchronous)
353 # Get out of the guest-save area of the stack.
354 GET_GUEST_REGS(%rax)
355 movq %rax,%rsp
356 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
357 movq CPUINFO_current_vcpu(%rax),%rax
358 movq VCPU_domain(%rax),%rax
359 testb $1,DOMAIN_is_32bit_pv(%rax)
360 setz %al
361 leal (%rax,%rax,2),%eax
362 orb %al,UREGS_cs(%rsp)
363 # printk(domain_crash_synchronous_string)
364 leaq domain_crash_synchronous_string(%rip),%rdi
365 xorl %eax,%eax
366 call printk
367 jmp __domain_crash_synchronous
369 ALIGN
370 /* No special register assumptions. */
371 ENTRY(ret_from_intr)
372 GET_CURRENT(%rbx)
373 testb $3,UREGS_cs(%rsp)
374 jz restore_all_xen
375 movq VCPU_domain(%rbx),%rax
376 testb $1,DOMAIN_is_32bit_pv(%rax)
377 jz test_all_events
378 jmp compat_test_all_events
380 ALIGN
381 /* No special register assumptions. */
382 ENTRY(handle_exception)
383 SAVE_ALL
384 handle_exception_saved:
385 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
386 jz exception_with_ints_disabled
387 sti
388 1: movq %rsp,%rdi
389 movl UREGS_entry_vector(%rsp),%eax
390 leaq exception_table(%rip),%rdx
391 GET_CURRENT(%rbx)
392 PERFC_INCR(PERFC_exceptions, %rax, %rbx)
393 callq *(%rdx,%rax,8)
394 testb $3,UREGS_cs(%rsp)
395 jz restore_all_xen
396 leaq VCPU_trap_bounce(%rbx),%rdx
397 movq VCPU_domain(%rbx),%rax
398 testb $1,DOMAIN_is_32bit_pv(%rax)
399 jnz compat_post_handle_exception
400 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
401 jz test_all_events
402 call create_bounce_frame
403 movb $0,TRAPBOUNCE_flags(%rdx)
404 jmp test_all_events
406 /* No special register assumptions. */
407 exception_with_ints_disabled:
408 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
409 jnz FATAL_exception_with_ints_disabled
410 movq %rsp,%rdi
411 call search_pre_exception_table
412 testq %rax,%rax # no fixup code for faulting EIP?
413 jz 1b
414 movq %rax,UREGS_rip(%rsp)
415 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
416 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
417 jz 1f # then there is a pad quadword already
418 movq %rsp,%rsi
419 subq $8,%rsp
420 movq %rsp,%rdi
421 movq $UREGS_kernel_sizeof/8,%rcx
422 rep; movsq # make room for ec/ev
423 1: movq UREGS_error_code(%rsp),%rax # ec/ev
424 movq %rax,UREGS_kernel_sizeof(%rsp)
425 jmp restore_all_xen # return to fixup code
427 /* No special register assumptions. */
428 FATAL_exception_with_ints_disabled:
429 movl UREGS_entry_vector(%rsp),%edi
430 movq %rsp,%rsi
431 call fatal_trap
432 ud2
434 ENTRY(divide_error)
435 pushq $0
436 movl $TRAP_divide_error,4(%rsp)
437 jmp handle_exception
439 ENTRY(coprocessor_error)
440 pushq $0
441 movl $TRAP_copro_error,4(%rsp)
442 jmp handle_exception
444 ENTRY(simd_coprocessor_error)
445 pushq $0
446 movl $TRAP_simd_error,4(%rsp)
447 jmp handle_exception
449 ENTRY(device_not_available)
450 pushq $0
451 movl $TRAP_no_device,4(%rsp)
452 jmp handle_exception
454 ENTRY(debug)
455 pushq $0
456 movl $TRAP_debug,4(%rsp)
457 jmp handle_exception
459 ENTRY(int3)
460 pushq $0
461 movl $TRAP_int3,4(%rsp)
462 jmp handle_exception
464 ENTRY(overflow)
465 pushq $0
466 movl $TRAP_overflow,4(%rsp)
467 jmp handle_exception
469 ENTRY(bounds)
470 pushq $0
471 movl $TRAP_bounds,4(%rsp)
472 jmp handle_exception
474 ENTRY(invalid_op)
475 pushq $0
476 movl $TRAP_invalid_op,4(%rsp)
477 jmp handle_exception
479 ENTRY(coprocessor_segment_overrun)
480 pushq $0
481 movl $TRAP_copro_seg,4(%rsp)
482 jmp handle_exception
484 ENTRY(invalid_TSS)
485 movl $TRAP_invalid_tss,4(%rsp)
486 jmp handle_exception
488 ENTRY(segment_not_present)
489 movl $TRAP_no_segment,4(%rsp)
490 jmp handle_exception
492 ENTRY(stack_segment)
493 movl $TRAP_stack_error,4(%rsp)
494 jmp handle_exception
496 ENTRY(general_protection)
497 movl $TRAP_gp_fault,4(%rsp)
498 jmp handle_exception
500 ENTRY(alignment_check)
501 movl $TRAP_alignment_check,4(%rsp)
502 jmp handle_exception
504 ENTRY(page_fault)
505 movl $TRAP_page_fault,4(%rsp)
506 jmp handle_exception
508 ENTRY(spurious_interrupt_bug)
509 pushq $0
510 movl $TRAP_spurious_int,4(%rsp)
511 jmp handle_exception
513 ENTRY(double_fault)
514 SAVE_ALL
515 movq %rsp,%rdi
516 call do_double_fault
517 ud2
519 ENTRY(early_page_fault)
520 SAVE_ALL
521 movq %rsp,%rdi
522 call do_early_page_fault
523 jmp restore_all_xen
525 handle_ist_exception:
526 SAVE_ALL
527 testb $3,UREGS_cs(%rsp)
528 jz 1f
529 /* Interrupted guest context. Copy the context to stack bottom. */
530 GET_GUEST_REGS(%rdi)
531 movq %rsp,%rsi
532 movl $UREGS_kernel_sizeof/8,%ecx
533 movq %rdi,%rsp
534 rep movsq
535 1: movq %rsp,%rdi
536 movl UREGS_entry_vector(%rsp),%eax
537 leaq exception_table(%rip),%rdx
538 callq *(%rdx,%rax,8)
539 jmp ret_from_intr
541 ENTRY(nmi)
542 pushq $0
543 movl $TRAP_nmi,4(%rsp)
544 jmp handle_ist_exception
546 ENTRY(machine_check)
547 pushq $0
548 movl $TRAP_machine_check,4(%rsp)
549 jmp handle_ist_exception
551 .data
553 ENTRY(exception_table)
554 .quad do_divide_error
555 .quad do_debug
556 .quad do_nmi
557 .quad do_int3
558 .quad do_overflow
559 .quad do_bounds
560 .quad do_invalid_op
561 .quad math_state_restore
562 .quad 0 # double_fault
563 .quad do_coprocessor_segment_overrun
564 .quad do_invalid_TSS
565 .quad do_segment_not_present
566 .quad do_stack_segment
567 .quad do_general_protection
568 .quad do_page_fault
569 .quad do_spurious_interrupt_bug
570 .quad do_coprocessor_error
571 .quad do_alignment_check
572 .quad do_machine_check
573 .quad do_simd_coprocessor_error
575 ENTRY(hypercall_table)
576 .quad do_set_trap_table /* 0 */
577 .quad do_mmu_update
578 .quad do_set_gdt
579 .quad do_stack_switch
580 .quad do_set_callbacks
581 .quad do_fpu_taskswitch /* 5 */
582 .quad do_sched_op_compat
583 .quad do_platform_op
584 .quad do_set_debugreg
585 .quad do_get_debugreg
586 .quad do_update_descriptor /* 10 */
587 .quad do_ni_hypercall
588 .quad do_memory_op
589 .quad do_multicall
590 .quad do_update_va_mapping
591 .quad do_set_timer_op /* 15 */
592 .quad do_event_channel_op_compat
593 .quad do_xen_version
594 .quad do_console_io
595 .quad do_physdev_op_compat
596 .quad do_grant_table_op /* 20 */
597 .quad do_vm_assist
598 .quad do_update_va_mapping_otherdomain
599 .quad do_iret
600 .quad do_vcpu_op
601 .quad do_set_segment_base /* 25 */
602 .quad do_mmuext_op
603 .quad do_acm_op
604 .quad do_nmi_op
605 .quad do_sched_op
606 .quad do_callback_op /* 30 */
607 .quad do_xenoprof_op
608 .quad do_event_channel_op
609 .quad do_physdev_op
610 .quad do_hvm_op
611 .quad do_sysctl /* 35 */
612 .quad do_domctl
613 .quad do_kexec_op
614 .rept NR_hypercalls-((.-hypercall_table)/8)
615 .quad do_ni_hypercall
616 .endr
618 ENTRY(hypercall_args_table)
619 .byte 1 /* do_set_trap_table */ /* 0 */
620 .byte 4 /* do_mmu_update */
621 .byte 2 /* do_set_gdt */
622 .byte 2 /* do_stack_switch */
623 .byte 3 /* do_set_callbacks */
624 .byte 1 /* do_fpu_taskswitch */ /* 5 */
625 .byte 2 /* do_sched_op_compat */
626 .byte 1 /* do_platform_op */
627 .byte 2 /* do_set_debugreg */
628 .byte 1 /* do_get_debugreg */
629 .byte 2 /* do_update_descriptor */ /* 10 */
630 .byte 0 /* do_ni_hypercall */
631 .byte 2 /* do_memory_op */
632 .byte 2 /* do_multicall */
633 .byte 3 /* do_update_va_mapping */
634 .byte 1 /* do_set_timer_op */ /* 15 */
635 .byte 1 /* do_event_channel_op_compat */
636 .byte 2 /* do_xen_version */
637 .byte 3 /* do_console_io */
638 .byte 1 /* do_physdev_op_compat */
639 .byte 3 /* do_grant_table_op */ /* 20 */
640 .byte 2 /* do_vm_assist */
641 .byte 4 /* do_update_va_mapping_otherdomain */
642 .byte 0 /* do_iret */
643 .byte 3 /* do_vcpu_op */
644 .byte 2 /* do_set_segment_base */ /* 25 */
645 .byte 4 /* do_mmuext_op */
646 .byte 1 /* do_acm_op */
647 .byte 2 /* do_nmi_op */
648 .byte 2 /* do_sched_op */
649 .byte 2 /* do_callback_op */ /* 30 */
650 .byte 2 /* do_xenoprof_op */
651 .byte 2 /* do_event_channel_op */
652 .byte 2 /* do_physdev_op */
653 .byte 2 /* do_hvm_op */
654 .byte 1 /* do_sysctl */ /* 35 */
655 .byte 1 /* do_domctl */
656 .byte 2 /* do_kexec */
657 .rept NR_hypercalls-(.-hypercall_args_table)
658 .byte 0 /* do_ni_hypercall */
659 .endr