ia64/xen-unstable

view xen/arch/x86/x86_64/entry.S @ 16195:4970cbf9b19e

x86: Fix xentrace of hypercalls in debug builds of Xen.

Based on a patch by Yosuke Iwamatsu <y-iwamatsu@ab.jp.nec.com>

Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Tue Oct 23 09:41:06 2007 +0100 (2007-10-23)
parents c918a68617c9
children aeebd173c3fa
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 movq VCPU_syscall_addr(%rbx),%rax
31 movq %rax,TRAPBOUNCE_eip(%rdx)
32 movb $0,TRAPBOUNCE_flags(%rdx)
33 bt $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
34 jnc 1f
35 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
36 1: call create_bounce_frame
37 andl $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
38 jmp test_all_events
40 /* %rbx: struct vcpu, interrupts disabled */
41 restore_all_guest:
42 ASSERT_INTERRUPTS_DISABLED
43 RESTORE_ALL
44 testw $TRAP_syscall,4(%rsp)
45 jz iret_exit_to_guest
47 addq $8,%rsp
48 popq %rcx # RIP
49 popq %r11 # CS
50 cmpw $FLAT_KERNEL_CS32,%r11
51 popq %r11 # RFLAGS
52 popq %rsp # RSP
53 je 1f
54 sysretq
55 1: sysretl
57 ALIGN
58 /* No special register assumptions. */
59 iret_exit_to_guest:
60 addq $8,%rsp
61 .Lft0: iretq
63 .section .fixup,"ax"
64 .Lfx0: sti
65 SAVE_ALL
66 movq UREGS_error_code(%rsp),%rsi
67 movq %rsp,%rax
68 andq $~0xf,%rsp
69 pushq $__HYPERVISOR_DS # SS
70 pushq %rax # RSP
71 pushfq # RFLAGS
72 pushq $__HYPERVISOR_CS # CS
73 leaq .Ldf0(%rip),%rax
74 pushq %rax # RIP
75 pushq %rsi # error_code/entry_vector
76 jmp handle_exception
77 .Ldf0: GET_CURRENT(%rbx)
78 jmp test_all_events
79 failsafe_callback:
80 GET_CURRENT(%rbx)
81 leaq VCPU_trap_bounce(%rbx),%rdx
82 movq VCPU_failsafe_addr(%rbx),%rax
83 movq %rax,TRAPBOUNCE_eip(%rdx)
84 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
85 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
86 jnc 1f
87 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
88 1: call create_bounce_frame
89 jmp test_all_events
90 .previous
91 .section __pre_ex_table,"a"
92 .quad .Lft0,.Lfx0
93 .previous
94 .section __ex_table,"a"
95 .quad .Ldf0,failsafe_callback
96 .previous
98 ALIGN
99 /* No special register assumptions. */
100 restore_all_xen:
101 RESTORE_ALL
102 addq $8,%rsp
103 iretq
105 /*
106 * When entering SYSCALL from kernel mode:
107 * %rax = hypercall vector
108 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
109 * %rcx = SYSCALL-saved %rip
110 * NB. We must move %r10 to %rcx for C function-calling ABI.
111 *
112 * When entering SYSCALL from user mode:
113 * Vector directly to the registered arch.syscall_addr.
114 *
115 * Initial work is done by per-CPU stack trampolines. At this point %rsp
116 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
117 * and %cs have been saved. All other registers are still to be saved onto
118 * the stack, starting with %rip, and an appropriate %ss must be saved into
119 * the space left by the trampoline.
120 */
121 ALIGN
122 ENTRY(syscall_enter)
123 sti
124 movl $FLAT_KERNEL_SS,24(%rsp)
125 pushq %rcx
126 pushq $0
127 movl $TRAP_syscall,4(%rsp)
128 movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */
129 SAVE_ALL
130 GET_CURRENT(%rbx)
131 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
132 jz switch_to_kernel
134 /*hypercall:*/
135 movq %r10,%rcx
136 cmpq $NR_hypercalls,%rax
137 jae bad_hypercall
138 #ifndef NDEBUG
139 /* Deliberately corrupt parameter regs not used by this hypercall. */
140 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
141 leaq hypercall_args_table(%rip),%r10
142 movq $6,%rcx
143 sub (%r10,%rax,1),%cl
144 movq %rsp,%rdi
145 movl $0xDEADBEEF,%eax
146 rep stosq
147 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
148 movq UREGS_rax(%rsp),%rax
149 pushq %rax
150 pushq UREGS_rip+8(%rsp)
151 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
152 #else
153 #define SHADOW_BYTES 0 /* No on-stack shadow state */
154 #endif
155 cmpb $0,tb_init_done(%rip)
156 je tracing_off
157 call trace_hypercall
158 /* Now restore all the registers that trace_hypercall clobbered */
159 movq UREGS_rax+SHADOW_BYTES(%rsp),%rax /* Hypercall # */
160 movq UREGS_rdi+SHADOW_BYTES(%rsp),%rdi /* Arg 1 */
161 movq UREGS_rsi+SHADOW_BYTES(%rsp),%rsi /* Arg 2 */
162 movq UREGS_rdx+SHADOW_BYTES(%rsp),%rdx /* Arg 3 */
163 movq UREGS_r10+SHADOW_BYTES(%rsp),%rcx /* Arg 4 */
164 movq UREGS_rdi+SHADOW_BYTES(%rsp),%r8 /* Arg 5 */
165 movq UREGS_rbp+SHADOW_BYTES(%rsp),%r9 /* Arg 6 */
166 #undef SHADOW_BYTES
167 tracing_off:
168 leaq hypercall_table(%rip),%r10
169 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
170 callq *(%r10,%rax,8)
171 #ifndef NDEBUG
172 /* Deliberately corrupt parameter regs used by this hypercall. */
173 popq %r10 # Shadow RIP
174 cmpq %r10,UREGS_rip+8(%rsp)
175 popq %rcx # Shadow hypercall index
176 jne skip_clobber /* If RIP has changed then don't clobber. */
177 leaq hypercall_args_table(%rip),%r10
178 movb (%r10,%rcx,1),%cl
179 movl $0xDEADBEEF,%r10d
180 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
181 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
182 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
183 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
184 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
185 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
186 skip_clobber:
187 #endif
188 movq %rax,UREGS_rax(%rsp) # save the return value
190 /* %rbx: struct vcpu */
191 test_all_events:
192 cli # tests must not race interrupts
193 /*test_softirqs:*/
194 movl VCPU_processor(%rbx),%eax
195 shl $IRQSTAT_shift,%rax
196 leaq irq_stat(%rip),%rcx
197 testl $~0,(%rcx,%rax,1)
198 jnz process_softirqs
199 testb $1,VCPU_nmi_pending(%rbx)
200 jnz process_nmi
201 test_guest_events:
202 movq VCPU_vcpu_info(%rbx),%rax
203 testb $0xFF,VCPUINFO_upcall_mask(%rax)
204 jnz restore_all_guest
205 testb $0xFF,VCPUINFO_upcall_pending(%rax)
206 jz restore_all_guest
207 /*process_guest_events:*/
208 sti
209 leaq VCPU_trap_bounce(%rbx),%rdx
210 movq VCPU_event_addr(%rbx),%rax
211 movq %rax,TRAPBOUNCE_eip(%rdx)
212 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
213 call create_bounce_frame
214 jmp test_all_events
216 ALIGN
217 /* %rbx: struct vcpu */
218 process_softirqs:
219 sti
220 call do_softirq
221 jmp test_all_events
223 ALIGN
224 /* %rbx: struct vcpu */
225 process_nmi:
226 testb $1,VCPU_nmi_masked(%rbx)
227 jnz test_guest_events
228 movb $0,VCPU_nmi_pending(%rbx)
229 movq VCPU_nmi_addr(%rbx),%rax
230 test %rax,%rax
231 jz test_guest_events
232 movb $1,VCPU_nmi_masked(%rbx)
233 sti
234 leaq VCPU_trap_bounce(%rbx),%rdx
235 movq %rax,TRAPBOUNCE_eip(%rdx)
236 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
237 call create_bounce_frame
238 jmp test_all_events
240 bad_hypercall:
241 movq $-ENOSYS,UREGS_rax(%rsp)
242 jmp test_all_events
244 ENTRY(int80_direct_trap)
245 pushq $0
246 SAVE_ALL
248 GET_CURRENT(%rbx)
250 /* Check that the callback is non-null. */
251 leaq VCPU_int80_bounce(%rbx),%rdx
252 cmpb $0,TRAPBOUNCE_flags(%rdx)
253 jz int80_slow_path
255 movq VCPU_domain(%rbx),%rax
256 testb $1,DOMAIN_is_32bit_pv(%rax)
257 jnz compat_int80_direct_trap
259 call create_bounce_frame
260 jmp test_all_events
262 int80_slow_path:
263 /*
264 * Setup entry vector and error code as if this was a GPF caused by an
265 * IDT entry with DPL==0.
266 */
267 movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
268 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
269 /* A GPF wouldn't have incremented the instruction pointer. */
270 subq $2,UREGS_rip(%rsp)
271 jmp handle_exception_saved
273 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
274 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
275 /* %rdx: trap_bounce, %rbx: struct vcpu */
276 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
277 create_bounce_frame:
278 ASSERT_INTERRUPTS_ENABLED
279 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
280 jnz 1f
281 /* Push new frame at registered guest-OS stack base. */
282 pushq %rdx
283 movq %rbx,%rdi
284 call toggle_guest_mode
285 popq %rdx
286 movq VCPU_kernel_sp(%rbx),%rsi
287 jmp 2f
288 1: /* In kernel context already: push new frame at existing %rsp. */
289 movq UREGS_rsp+8(%rsp),%rsi
290 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
291 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
292 movq $HYPERVISOR_VIRT_START,%rax
293 cmpq %rax,%rsi
294 jb 1f # In +ve address space? Then okay.
295 movq $HYPERVISOR_VIRT_END+60,%rax
296 cmpq %rax,%rsi
297 jb domain_crash_synchronous # Above Xen private area? Then okay.
298 1: movb TRAPBOUNCE_flags(%rdx),%cl
299 subq $40,%rsi
300 movq UREGS_ss+8(%rsp),%rax
301 .Lft2: movq %rax,32(%rsi) # SS
302 movq UREGS_rsp+8(%rsp),%rax
303 .Lft3: movq %rax,24(%rsi) # RSP
304 movq VCPU_vcpu_info(%rbx),%rax
305 pushq VCPUINFO_upcall_mask(%rax)
306 testb $TBF_INTERRUPT,%cl
307 setnz %ch # TBF_INTERRUPT -> set upcall mask
308 orb %ch,VCPUINFO_upcall_mask(%rax)
309 popq %rax
310 shlq $32,%rax # Bits 32-39: saved_upcall_mask
311 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
312 .Lft4: movq %rax,8(%rsi) # CS / saved_upcall_mask
313 shrq $32,%rax
314 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
315 setz %ch # %ch == !saved_upcall_mask
316 movq UREGS_eflags+8(%rsp),%rax
317 andq $~X86_EFLAGS_IF,%rax
318 shlb $1,%ch # Bit 9 (EFLAGS.IF)
319 orb %ch,%ah # Fold EFLAGS.IF into %eax
320 .Lft5: movq %rax,16(%rsi) # RFLAGS
321 movq UREGS_rip+8(%rsp),%rax
322 .Lft6: movq %rax,(%rsi) # RIP
323 testb $TBF_EXCEPTION_ERRCODE,%cl
324 jz 1f
325 subq $8,%rsi
326 movl TRAPBOUNCE_error_code(%rdx),%eax
327 .Lft7: movq %rax,(%rsi) # ERROR CODE
328 1: testb $TBF_FAILSAFE,%cl
329 jz 2f
330 subq $32,%rsi
331 movl %gs,%eax
332 .Lft8: movq %rax,24(%rsi) # GS
333 movl %fs,%eax
334 .Lft9: movq %rax,16(%rsi) # FS
335 movl %es,%eax
336 .Lft10: movq %rax,8(%rsi) # ES
337 movl %ds,%eax
338 .Lft11: movq %rax,(%rsi) # DS
339 2: subq $16,%rsi
340 movq UREGS_r11+8(%rsp),%rax
341 .Lft12: movq %rax,8(%rsi) # R11
342 movq UREGS_rcx+8(%rsp),%rax
343 .Lft13: movq %rax,(%rsi) # RCX
344 /* Rewrite our stack frame and return to guest-OS mode. */
345 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
346 /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
347 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
348 andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
349 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
350 movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
351 movq %rsi,UREGS_rsp+8(%rsp)
352 movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
353 movq TRAPBOUNCE_eip(%rdx),%rax
354 testq %rax,%rax
355 jz domain_crash_synchronous
356 movq %rax,UREGS_rip+8(%rsp)
357 ret
358 .section __ex_table,"a"
359 .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous
360 .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous
361 .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
362 .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
363 .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
364 .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
365 .previous
367 domain_crash_synchronous_string:
368 .asciz "domain_crash_sync called from entry.S\n"
370 ENTRY(domain_crash_synchronous)
371 # Get out of the guest-save area of the stack.
372 GET_GUEST_REGS(%rax)
373 movq %rax,%rsp
374 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
375 movq CPUINFO_current_vcpu(%rax),%rax
376 movq VCPU_domain(%rax),%rax
377 testb $1,DOMAIN_is_32bit_pv(%rax)
378 setz %al
379 leal (%rax,%rax,2),%eax
380 orb %al,UREGS_cs(%rsp)
381 # printk(domain_crash_synchronous_string)
382 leaq domain_crash_synchronous_string(%rip),%rdi
383 xorl %eax,%eax
384 call printk
385 jmp __domain_crash_synchronous
387 ALIGN
388 /* No special register assumptions. */
389 ENTRY(ret_from_intr)
390 GET_CURRENT(%rbx)
391 testb $3,UREGS_cs(%rsp)
392 jz restore_all_xen
393 movq VCPU_domain(%rbx),%rax
394 testb $1,DOMAIN_is_32bit_pv(%rax)
395 jz test_all_events
396 jmp compat_test_all_events
398 ALIGN
399 /* No special register assumptions. */
400 ENTRY(handle_exception)
401 SAVE_ALL
402 handle_exception_saved:
403 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
404 jz exception_with_ints_disabled
405 sti
406 1: movq %rsp,%rdi
407 movl UREGS_entry_vector(%rsp),%eax
408 leaq exception_table(%rip),%rdx
409 GET_CURRENT(%rbx)
410 PERFC_INCR(PERFC_exceptions, %rax, %rbx)
411 callq *(%rdx,%rax,8)
412 testb $3,UREGS_cs(%rsp)
413 jz restore_all_xen
414 leaq VCPU_trap_bounce(%rbx),%rdx
415 movq VCPU_domain(%rbx),%rax
416 testb $1,DOMAIN_is_32bit_pv(%rax)
417 jnz compat_post_handle_exception
418 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
419 jz test_all_events
420 call create_bounce_frame
421 movb $0,TRAPBOUNCE_flags(%rdx)
422 jmp test_all_events
424 /* No special register assumptions. */
425 exception_with_ints_disabled:
426 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
427 jnz FATAL_exception_with_ints_disabled
428 movq %rsp,%rdi
429 call search_pre_exception_table
430 testq %rax,%rax # no fixup code for faulting EIP?
431 jz 1b
432 movq %rax,UREGS_rip(%rsp)
433 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
434 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
435 jz 1f # then there is a pad quadword already
436 movq %rsp,%rsi
437 subq $8,%rsp
438 movq %rsp,%rdi
439 movq $UREGS_kernel_sizeof/8,%rcx
440 rep; movsq # make room for ec/ev
441 1: movq UREGS_error_code(%rsp),%rax # ec/ev
442 movq %rax,UREGS_kernel_sizeof(%rsp)
443 jmp restore_all_xen # return to fixup code
445 /* No special register assumptions. */
446 FATAL_exception_with_ints_disabled:
447 movl UREGS_entry_vector(%rsp),%edi
448 movq %rsp,%rsi
449 call fatal_trap
450 ud2
452 ENTRY(divide_error)
453 pushq $0
454 movl $TRAP_divide_error,4(%rsp)
455 jmp handle_exception
457 ENTRY(coprocessor_error)
458 pushq $0
459 movl $TRAP_copro_error,4(%rsp)
460 jmp handle_exception
462 ENTRY(simd_coprocessor_error)
463 pushq $0
464 movl $TRAP_simd_error,4(%rsp)
465 jmp handle_exception
467 ENTRY(device_not_available)
468 pushq $0
469 movl $TRAP_no_device,4(%rsp)
470 jmp handle_exception
472 ENTRY(debug)
473 pushq $0
474 movl $TRAP_debug,4(%rsp)
475 jmp handle_exception
477 ENTRY(int3)
478 pushq $0
479 movl $TRAP_int3,4(%rsp)
480 jmp handle_exception
482 ENTRY(overflow)
483 pushq $0
484 movl $TRAP_overflow,4(%rsp)
485 jmp handle_exception
487 ENTRY(bounds)
488 pushq $0
489 movl $TRAP_bounds,4(%rsp)
490 jmp handle_exception
492 ENTRY(invalid_op)
493 pushq $0
494 movl $TRAP_invalid_op,4(%rsp)
495 jmp handle_exception
497 ENTRY(coprocessor_segment_overrun)
498 pushq $0
499 movl $TRAP_copro_seg,4(%rsp)
500 jmp handle_exception
502 ENTRY(invalid_TSS)
503 movl $TRAP_invalid_tss,4(%rsp)
504 jmp handle_exception
506 ENTRY(segment_not_present)
507 movl $TRAP_no_segment,4(%rsp)
508 jmp handle_exception
510 ENTRY(stack_segment)
511 movl $TRAP_stack_error,4(%rsp)
512 jmp handle_exception
514 ENTRY(general_protection)
515 movl $TRAP_gp_fault,4(%rsp)
516 jmp handle_exception
518 ENTRY(alignment_check)
519 movl $TRAP_alignment_check,4(%rsp)
520 jmp handle_exception
522 ENTRY(page_fault)
523 movl $TRAP_page_fault,4(%rsp)
524 jmp handle_exception
526 ENTRY(spurious_interrupt_bug)
527 pushq $0
528 movl $TRAP_spurious_int,4(%rsp)
529 jmp handle_exception
531 ENTRY(double_fault)
532 SAVE_ALL
533 movq %rsp,%rdi
534 call do_double_fault
535 ud2
537 ENTRY(early_page_fault)
538 SAVE_ALL
539 movq %rsp,%rdi
540 call do_early_page_fault
541 jmp restore_all_xen
543 handle_ist_exception:
544 SAVE_ALL
545 testb $3,UREGS_cs(%rsp)
546 jz 1f
547 /* Interrupted guest context. Copy the context to stack bottom. */
548 GET_GUEST_REGS(%rdi)
549 movq %rsp,%rsi
550 movl $UREGS_kernel_sizeof/8,%ecx
551 movq %rdi,%rsp
552 rep movsq
553 1: movq %rsp,%rdi
554 movl UREGS_entry_vector(%rsp),%eax
555 leaq exception_table(%rip),%rdx
556 callq *(%rdx,%rax,8)
557 jmp ret_from_intr
559 ENTRY(nmi)
560 pushq $0
561 movl $TRAP_nmi,4(%rsp)
562 jmp handle_ist_exception
564 ENTRY(machine_check)
565 pushq $0
566 movl $TRAP_machine_check,4(%rsp)
567 jmp handle_ist_exception
569 .data
571 ENTRY(exception_table)
572 .quad do_divide_error
573 .quad do_debug
574 .quad do_nmi
575 .quad do_int3
576 .quad do_overflow
577 .quad do_bounds
578 .quad do_invalid_op
579 .quad do_device_not_available
580 .quad 0 # double_fault
581 .quad do_coprocessor_segment_overrun
582 .quad do_invalid_TSS
583 .quad do_segment_not_present
584 .quad do_stack_segment
585 .quad do_general_protection
586 .quad do_page_fault
587 .quad do_spurious_interrupt_bug
588 .quad do_coprocessor_error
589 .quad do_alignment_check
590 .quad do_machine_check
591 .quad do_simd_coprocessor_error
593 ENTRY(hypercall_table)
594 .quad do_set_trap_table /* 0 */
595 .quad do_mmu_update
596 .quad do_set_gdt
597 .quad do_stack_switch
598 .quad do_set_callbacks
599 .quad do_fpu_taskswitch /* 5 */
600 .quad do_sched_op_compat
601 .quad do_platform_op
602 .quad do_set_debugreg
603 .quad do_get_debugreg
604 .quad do_update_descriptor /* 10 */
605 .quad do_ni_hypercall
606 .quad do_memory_op
607 .quad do_multicall
608 .quad do_update_va_mapping
609 .quad do_set_timer_op /* 15 */
610 .quad do_event_channel_op_compat
611 .quad do_xen_version
612 .quad do_console_io
613 .quad do_physdev_op_compat
614 .quad do_grant_table_op /* 20 */
615 .quad do_vm_assist
616 .quad do_update_va_mapping_otherdomain
617 .quad do_iret
618 .quad do_vcpu_op
619 .quad do_set_segment_base /* 25 */
620 .quad do_mmuext_op
621 .quad do_xsm_op
622 .quad do_nmi_op
623 .quad do_sched_op
624 .quad do_callback_op /* 30 */
625 .quad do_xenoprof_op
626 .quad do_event_channel_op
627 .quad do_physdev_op
628 .quad do_hvm_op
629 .quad do_sysctl /* 35 */
630 .quad do_domctl
631 .quad do_kexec_op
632 .rept NR_hypercalls-((.-hypercall_table)/8)
633 .quad do_ni_hypercall
634 .endr
636 ENTRY(hypercall_args_table)
637 .byte 1 /* do_set_trap_table */ /* 0 */
638 .byte 4 /* do_mmu_update */
639 .byte 2 /* do_set_gdt */
640 .byte 2 /* do_stack_switch */
641 .byte 3 /* do_set_callbacks */
642 .byte 1 /* do_fpu_taskswitch */ /* 5 */
643 .byte 2 /* do_sched_op_compat */
644 .byte 1 /* do_platform_op */
645 .byte 2 /* do_set_debugreg */
646 .byte 1 /* do_get_debugreg */
647 .byte 2 /* do_update_descriptor */ /* 10 */
648 .byte 0 /* do_ni_hypercall */
649 .byte 2 /* do_memory_op */
650 .byte 2 /* do_multicall */
651 .byte 3 /* do_update_va_mapping */
652 .byte 1 /* do_set_timer_op */ /* 15 */
653 .byte 1 /* do_event_channel_op_compat */
654 .byte 2 /* do_xen_version */
655 .byte 3 /* do_console_io */
656 .byte 1 /* do_physdev_op_compat */
657 .byte 3 /* do_grant_table_op */ /* 20 */
658 .byte 2 /* do_vm_assist */
659 .byte 4 /* do_update_va_mapping_otherdomain */
660 .byte 0 /* do_iret */
661 .byte 3 /* do_vcpu_op */
662 .byte 2 /* do_set_segment_base */ /* 25 */
663 .byte 4 /* do_mmuext_op */
664 .byte 1 /* do_xsm_op */
665 .byte 2 /* do_nmi_op */
666 .byte 2 /* do_sched_op */
667 .byte 2 /* do_callback_op */ /* 30 */
668 .byte 2 /* do_xenoprof_op */
669 .byte 2 /* do_event_channel_op */
670 .byte 2 /* do_physdev_op */
671 .byte 2 /* do_hvm_op */
672 .byte 1 /* do_sysctl */ /* 35 */
673 .byte 1 /* do_domctl */
674 .byte 2 /* do_kexec */
675 .byte 1 /* do_xsm_op */
676 .rept NR_hypercalls-(.-hypercall_args_table)
677 .byte 0 /* do_ni_hypercall */
678 .endr