ia64/xen-unstable

view xen/arch/x86/x86_64/entry.S @ 17697:e48453f82d30

x86: Change a local label in asm entry stubs to really be local.
This prevents it appearing in crash traces, where it can be a bit confusing.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 22 10:41:49 2008 +0100 (2008-05-22)
parents 23582bcda6e1
children a49673cd23d2
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 /* TB_eip = (32-bit syscall && syscall32_addr) ?
31 * syscall32_addr : syscall_addr */
32 xor %eax,%eax
33 cmpw $FLAT_USER_CS32,UREGS_cs(%rsp)
34 cmoveq VCPU_syscall32_addr(%rbx),%rax
35 testq %rax,%rax
36 cmovzq VCPU_syscall_addr(%rbx),%rax
37 movq %rax,TRAPBOUNCE_eip(%rdx)
38 /* TB_flags = VGCF_syscall_disables_events ? TBF_INTERRUPT : 0 */
39 btl $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
40 setc %cl
41 leal (,%rcx,TBF_INTERRUPT),%ecx
42 movb %cl,TRAPBOUNCE_flags(%rdx)
43 call create_bounce_frame
44 andl $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
45 jmp test_all_events
47 /* %rbx: struct vcpu, interrupts disabled */
48 restore_all_guest:
49 ASSERT_INTERRUPTS_DISABLED
50 RESTORE_ALL
51 testw $TRAP_syscall,4(%rsp)
52 jz iret_exit_to_guest
54 addq $8,%rsp
55 popq %rcx # RIP
56 popq %r11 # CS
57 cmpw $FLAT_USER_CS32,%r11
58 popq %r11 # RFLAGS
59 popq %rsp # RSP
60 je 1f
61 sysretq
62 1: sysretl
64 ALIGN
65 /* No special register assumptions. */
66 iret_exit_to_guest:
67 addq $8,%rsp
68 .Lft0: iretq
70 .section .fixup,"ax"
71 .Lfx0: sti
72 SAVE_ALL
73 movq UREGS_error_code(%rsp),%rsi
74 movq %rsp,%rax
75 andq $~0xf,%rsp
76 pushq $__HYPERVISOR_DS # SS
77 pushq %rax # RSP
78 pushfq # RFLAGS
79 pushq $__HYPERVISOR_CS # CS
80 leaq .Ldf0(%rip),%rax
81 pushq %rax # RIP
82 pushq %rsi # error_code/entry_vector
83 jmp handle_exception
84 .Ldf0: GET_CURRENT(%rbx)
85 jmp test_all_events
86 failsafe_callback:
87 GET_CURRENT(%rbx)
88 leaq VCPU_trap_bounce(%rbx),%rdx
89 movq VCPU_failsafe_addr(%rbx),%rax
90 movq %rax,TRAPBOUNCE_eip(%rdx)
91 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
92 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
93 jnc 1f
94 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
95 1: call create_bounce_frame
96 jmp test_all_events
97 .previous
98 .section __pre_ex_table,"a"
99 .quad .Lft0,.Lfx0
100 .previous
101 .section __ex_table,"a"
102 .quad .Ldf0,failsafe_callback
103 .previous
105 ALIGN
106 /* No special register assumptions. */
107 restore_all_xen:
108 RESTORE_ALL
109 addq $8,%rsp
110 iretq
112 /*
113 * When entering SYSCALL from kernel mode:
114 * %rax = hypercall vector
115 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
116 * %rcx = SYSCALL-saved %rip
117 * NB. We must move %r10 to %rcx for C function-calling ABI.
118 *
119 * When entering SYSCALL from user mode:
120 * Vector directly to the registered arch.syscall_addr.
121 *
122 * Initial work is done by per-CPU stack trampolines. At this point %rsp
123 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
124 * and %cs have been saved. All other registers are still to be saved onto
125 * the stack, starting with %rip, and an appropriate %ss must be saved into
126 * the space left by the trampoline.
127 */
128 ALIGN
129 ENTRY(syscall_enter)
130 sti
131 movl $FLAT_KERNEL_SS,24(%rsp)
132 pushq %rcx
133 pushq $0
134 movl $TRAP_syscall,4(%rsp)
135 movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */
136 SAVE_ALL
137 GET_CURRENT(%rbx)
138 movq VCPU_domain(%rbx),%rcx
139 testb $1,DOMAIN_is_32bit_pv(%rcx)
140 jnz compat_syscall
141 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
142 jz switch_to_kernel
144 /*hypercall:*/
145 movq %r10,%rcx
146 cmpq $NR_hypercalls,%rax
147 jae bad_hypercall
148 #ifndef NDEBUG
149 /* Deliberately corrupt parameter regs not used by this hypercall. */
150 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
151 leaq hypercall_args_table(%rip),%r10
152 movq $6,%rcx
153 sub (%r10,%rax,1),%cl
154 movq %rsp,%rdi
155 movl $0xDEADBEEF,%eax
156 rep stosq
157 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
158 movq UREGS_rax(%rsp),%rax
159 pushq %rax
160 pushq UREGS_rip+8(%rsp)
161 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
162 #else
163 #define SHADOW_BYTES 0 /* No on-stack shadow state */
164 #endif
165 cmpb $0,tb_init_done(%rip)
166 je 1f
167 call trace_hypercall
168 /* Now restore all the registers that trace_hypercall clobbered */
169 movq UREGS_rax+SHADOW_BYTES(%rsp),%rax /* Hypercall # */
170 movq UREGS_rdi+SHADOW_BYTES(%rsp),%rdi /* Arg 1 */
171 movq UREGS_rsi+SHADOW_BYTES(%rsp),%rsi /* Arg 2 */
172 movq UREGS_rdx+SHADOW_BYTES(%rsp),%rdx /* Arg 3 */
173 movq UREGS_r10+SHADOW_BYTES(%rsp),%rcx /* Arg 4 */
174 movq UREGS_rdi+SHADOW_BYTES(%rsp),%r8 /* Arg 5 */
175 movq UREGS_rbp+SHADOW_BYTES(%rsp),%r9 /* Arg 6 */
176 #undef SHADOW_BYTES
177 1: leaq hypercall_table(%rip),%r10
178 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
179 callq *(%r10,%rax,8)
180 #ifndef NDEBUG
181 /* Deliberately corrupt parameter regs used by this hypercall. */
182 popq %r10 # Shadow RIP
183 cmpq %r10,UREGS_rip+8(%rsp)
184 popq %rcx # Shadow hypercall index
185 jne skip_clobber /* If RIP has changed then don't clobber. */
186 leaq hypercall_args_table(%rip),%r10
187 movb (%r10,%rcx,1),%cl
188 movl $0xDEADBEEF,%r10d
189 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
190 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
191 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
192 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
193 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
194 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
195 skip_clobber:
196 #endif
197 movq %rax,UREGS_rax(%rsp) # save the return value
199 /* %rbx: struct vcpu */
200 test_all_events:
201 cli # tests must not race interrupts
202 /*test_softirqs:*/
203 movl VCPU_processor(%rbx),%eax
204 shl $IRQSTAT_shift,%rax
205 leaq irq_stat(%rip),%rcx
206 testl $~0,(%rcx,%rax,1)
207 jnz process_softirqs
208 testb $1,VCPU_nmi_pending(%rbx)
209 jnz process_nmi
210 test_guest_events:
211 movq VCPU_vcpu_info(%rbx),%rax
212 testb $0xFF,VCPUINFO_upcall_mask(%rax)
213 jnz restore_all_guest
214 testb $0xFF,VCPUINFO_upcall_pending(%rax)
215 jz restore_all_guest
216 /*process_guest_events:*/
217 sti
218 leaq VCPU_trap_bounce(%rbx),%rdx
219 movq VCPU_event_addr(%rbx),%rax
220 movq %rax,TRAPBOUNCE_eip(%rdx)
221 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
222 call create_bounce_frame
223 jmp test_all_events
225 ALIGN
226 /* %rbx: struct vcpu */
227 process_softirqs:
228 sti
229 call do_softirq
230 jmp test_all_events
232 ALIGN
233 /* %rbx: struct vcpu */
234 process_nmi:
235 testb $1,VCPU_nmi_masked(%rbx)
236 jnz test_guest_events
237 sti
238 movb $0,VCPU_nmi_pending(%rbx)
239 call set_guest_nmi_trapbounce
240 test %eax,%eax
241 jz test_all_events
242 movb $1,VCPU_nmi_masked(%rbx)
243 leaq VCPU_trap_bounce(%rbx),%rdx
244 call create_bounce_frame
245 jmp test_all_events
247 bad_hypercall:
248 movq $-ENOSYS,UREGS_rax(%rsp)
249 jmp test_all_events
251 ENTRY(sysenter_entry)
252 sti
253 pushq $FLAT_USER_SS
254 pushq $0
255 pushfq
256 .globl sysenter_eflags_saved
257 sysenter_eflags_saved:
258 pushq $0
259 pushq $0
260 pushq $0
261 movl $TRAP_syscall,4(%rsp)
262 SAVE_ALL
263 GET_CURRENT(%rbx)
264 cmpb $0,VCPU_sysenter_disables_events(%rbx)
265 movq $0,UREGS_rip(%rsp) /* null rip */
266 movl $3,UREGS_cs(%rsp) /* ring 3 null cs */
267 movq VCPU_sysenter_addr(%rbx),%rax
268 setne %cl
269 leaq VCPU_trap_bounce(%rbx),%rdx
270 testq %rax,%rax
271 leal (,%rcx,TBF_INTERRUPT),%ecx
272 jz 2f
273 1: movq VCPU_domain(%rbx),%rdi
274 movq %rax,TRAPBOUNCE_eip(%rdx)
275 movb %cl,TRAPBOUNCE_flags(%rdx)
276 testb $1,DOMAIN_is_32bit_pv(%rdi)
277 jnz compat_sysenter
278 call create_bounce_frame
279 jmp test_all_events
280 2: movl %eax,TRAPBOUNCE_error_code(%rdx)
281 movq VCPU_gp_fault_addr(%rbx),%rax
282 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
283 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
284 jmp 1b
286 ENTRY(int80_direct_trap)
287 pushq $0
288 SAVE_ALL
290 GET_CURRENT(%rbx)
292 /* Check that the callback is non-null. */
293 leaq VCPU_int80_bounce(%rbx),%rdx
294 cmpb $0,TRAPBOUNCE_flags(%rdx)
295 jz int80_slow_path
297 movq VCPU_domain(%rbx),%rax
298 testb $1,DOMAIN_is_32bit_pv(%rax)
299 jnz compat_int80_direct_trap
301 call create_bounce_frame
302 jmp test_all_events
304 int80_slow_path:
305 /*
306 * Setup entry vector and error code as if this was a GPF caused by an
307 * IDT entry with DPL==0.
308 */
309 movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
310 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
311 /* A GPF wouldn't have incremented the instruction pointer. */
312 subq $2,UREGS_rip(%rsp)
313 jmp handle_exception_saved
315 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
316 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
317 /* %rdx: trap_bounce, %rbx: struct vcpu */
318 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
319 create_bounce_frame:
320 ASSERT_INTERRUPTS_ENABLED
321 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
322 jnz 1f
323 /* Push new frame at registered guest-OS stack base. */
324 pushq %rdx
325 movq %rbx,%rdi
326 call toggle_guest_mode
327 popq %rdx
328 movq VCPU_kernel_sp(%rbx),%rsi
329 jmp 2f
330 1: /* In kernel context already: push new frame at existing %rsp. */
331 movq UREGS_rsp+8(%rsp),%rsi
332 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
333 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
334 movq $HYPERVISOR_VIRT_START,%rax
335 cmpq %rax,%rsi
336 jb 1f # In +ve address space? Then okay.
337 movq $HYPERVISOR_VIRT_END+60,%rax
338 cmpq %rax,%rsi
339 jb domain_crash_synchronous # Above Xen private area? Then okay.
340 1: movb TRAPBOUNCE_flags(%rdx),%cl
341 subq $40,%rsi
342 movq UREGS_ss+8(%rsp),%rax
343 .Lft2: movq %rax,32(%rsi) # SS
344 movq UREGS_rsp+8(%rsp),%rax
345 .Lft3: movq %rax,24(%rsi) # RSP
346 movq VCPU_vcpu_info(%rbx),%rax
347 pushq VCPUINFO_upcall_mask(%rax)
348 testb $TBF_INTERRUPT,%cl
349 setnz %ch # TBF_INTERRUPT -> set upcall mask
350 orb %ch,VCPUINFO_upcall_mask(%rax)
351 popq %rax
352 shlq $32,%rax # Bits 32-39: saved_upcall_mask
353 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
354 .Lft4: movq %rax,8(%rsi) # CS / saved_upcall_mask
355 shrq $32,%rax
356 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
357 setz %ch # %ch == !saved_upcall_mask
358 movl UREGS_eflags+8(%rsp),%eax
359 andl $~X86_EFLAGS_IF,%eax
360 addb %ch,%ch # Bit 9 (EFLAGS.IF)
361 orb %ch,%ah # Fold EFLAGS.IF into %eax
362 .Lft5: movq %rax,16(%rsi) # RFLAGS
363 movq UREGS_rip+8(%rsp),%rax
364 .Lft6: movq %rax,(%rsi) # RIP
365 testb $TBF_EXCEPTION_ERRCODE,%cl
366 jz 1f
367 subq $8,%rsi
368 movl TRAPBOUNCE_error_code(%rdx),%eax
369 .Lft7: movq %rax,(%rsi) # ERROR CODE
370 1: testb $TBF_FAILSAFE,%cl
371 jz 2f
372 subq $32,%rsi
373 movl %gs,%eax
374 .Lft8: movq %rax,24(%rsi) # GS
375 movl %fs,%eax
376 .Lft9: movq %rax,16(%rsi) # FS
377 movl %es,%eax
378 .Lft10: movq %rax,8(%rsi) # ES
379 movl %ds,%eax
380 .Lft11: movq %rax,(%rsi) # DS
381 2: subq $16,%rsi
382 movq UREGS_r11+8(%rsp),%rax
383 .Lft12: movq %rax,8(%rsi) # R11
384 movq UREGS_rcx+8(%rsp),%rax
385 .Lft13: movq %rax,(%rsi) # RCX
386 /* Rewrite our stack frame and return to guest-OS mode. */
387 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
388 /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
389 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
390 andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
391 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
392 movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
393 movq %rsi,UREGS_rsp+8(%rsp)
394 movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
395 movq TRAPBOUNCE_eip(%rdx),%rax
396 testq %rax,%rax
397 jz domain_crash_synchronous
398 movq %rax,UREGS_rip+8(%rsp)
399 ret
400 .section __ex_table,"a"
401 .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous
402 .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous
403 .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
404 .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
405 .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
406 .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
407 .previous
409 domain_crash_synchronous_string:
410 .asciz "domain_crash_sync called from entry.S\n"
412 ENTRY(domain_crash_synchronous)
413 # Get out of the guest-save area of the stack.
414 GET_GUEST_REGS(%rax)
415 movq %rax,%rsp
416 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
417 movq CPUINFO_current_vcpu(%rax),%rax
418 movq VCPU_domain(%rax),%rax
419 testb $1,DOMAIN_is_32bit_pv(%rax)
420 setz %al
421 leal (%rax,%rax,2),%eax
422 orb %al,UREGS_cs(%rsp)
423 # printk(domain_crash_synchronous_string)
424 leaq domain_crash_synchronous_string(%rip),%rdi
425 xorl %eax,%eax
426 call printk
427 jmp __domain_crash_synchronous
429 ALIGN
430 /* No special register assumptions. */
431 ENTRY(ret_from_intr)
432 GET_CURRENT(%rbx)
433 testb $3,UREGS_cs(%rsp)
434 jz restore_all_xen
435 movq VCPU_domain(%rbx),%rax
436 testb $1,DOMAIN_is_32bit_pv(%rax)
437 jz test_all_events
438 jmp compat_test_all_events
440 ALIGN
441 /* No special register assumptions. */
442 ENTRY(handle_exception)
443 SAVE_ALL
444 handle_exception_saved:
445 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
446 jz exception_with_ints_disabled
447 sti
448 1: movq %rsp,%rdi
449 movl UREGS_entry_vector(%rsp),%eax
450 leaq exception_table(%rip),%rdx
451 GET_CURRENT(%rbx)
452 PERFC_INCR(PERFC_exceptions, %rax, %rbx)
453 callq *(%rdx,%rax,8)
454 testb $3,UREGS_cs(%rsp)
455 jz restore_all_xen
456 leaq VCPU_trap_bounce(%rbx),%rdx
457 movq VCPU_domain(%rbx),%rax
458 testb $1,DOMAIN_is_32bit_pv(%rax)
459 jnz compat_post_handle_exception
460 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
461 jz test_all_events
462 call create_bounce_frame
463 movb $0,TRAPBOUNCE_flags(%rdx)
464 jmp test_all_events
466 /* No special register assumptions. */
467 exception_with_ints_disabled:
468 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
469 jnz FATAL_exception_with_ints_disabled
470 movq %rsp,%rdi
471 call search_pre_exception_table
472 testq %rax,%rax # no fixup code for faulting EIP?
473 jz 1b
474 movq %rax,UREGS_rip(%rsp)
475 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
476 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
477 jz 1f # then there is a pad quadword already
478 movq %rsp,%rsi
479 subq $8,%rsp
480 movq %rsp,%rdi
481 movq $UREGS_kernel_sizeof/8,%rcx
482 rep; movsq # make room for ec/ev
483 1: movq UREGS_error_code(%rsp),%rax # ec/ev
484 movq %rax,UREGS_kernel_sizeof(%rsp)
485 jmp restore_all_xen # return to fixup code
487 /* No special register assumptions. */
488 FATAL_exception_with_ints_disabled:
489 movl UREGS_entry_vector(%rsp),%edi
490 movq %rsp,%rsi
491 call fatal_trap
492 ud2
494 ENTRY(divide_error)
495 pushq $0
496 movl $TRAP_divide_error,4(%rsp)
497 jmp handle_exception
499 ENTRY(coprocessor_error)
500 pushq $0
501 movl $TRAP_copro_error,4(%rsp)
502 jmp handle_exception
504 ENTRY(simd_coprocessor_error)
505 pushq $0
506 movl $TRAP_simd_error,4(%rsp)
507 jmp handle_exception
509 ENTRY(device_not_available)
510 pushq $0
511 movl $TRAP_no_device,4(%rsp)
512 jmp handle_exception
514 ENTRY(debug)
515 pushq $0
516 movl $TRAP_debug,4(%rsp)
517 jmp handle_exception
519 ENTRY(int3)
520 pushq $0
521 movl $TRAP_int3,4(%rsp)
522 jmp handle_exception
524 ENTRY(overflow)
525 pushq $0
526 movl $TRAP_overflow,4(%rsp)
527 jmp handle_exception
529 ENTRY(bounds)
530 pushq $0
531 movl $TRAP_bounds,4(%rsp)
532 jmp handle_exception
534 ENTRY(invalid_op)
535 pushq $0
536 movl $TRAP_invalid_op,4(%rsp)
537 jmp handle_exception
539 ENTRY(coprocessor_segment_overrun)
540 pushq $0
541 movl $TRAP_copro_seg,4(%rsp)
542 jmp handle_exception
544 ENTRY(invalid_TSS)
545 movl $TRAP_invalid_tss,4(%rsp)
546 jmp handle_exception
548 ENTRY(segment_not_present)
549 movl $TRAP_no_segment,4(%rsp)
550 jmp handle_exception
552 ENTRY(stack_segment)
553 movl $TRAP_stack_error,4(%rsp)
554 jmp handle_exception
556 ENTRY(general_protection)
557 movl $TRAP_gp_fault,4(%rsp)
558 jmp handle_exception
560 ENTRY(alignment_check)
561 movl $TRAP_alignment_check,4(%rsp)
562 jmp handle_exception
564 ENTRY(page_fault)
565 movl $TRAP_page_fault,4(%rsp)
566 jmp handle_exception
568 ENTRY(spurious_interrupt_bug)
569 pushq $0
570 movl $TRAP_spurious_int,4(%rsp)
571 jmp handle_exception
573 ENTRY(double_fault)
574 SAVE_ALL
575 movq %rsp,%rdi
576 call do_double_fault
577 ud2
579 ENTRY(early_page_fault)
580 SAVE_ALL
581 movq %rsp,%rdi
582 call do_early_page_fault
583 jmp restore_all_xen
585 handle_ist_exception:
586 SAVE_ALL
587 testb $3,UREGS_cs(%rsp)
588 jz 1f
589 /* Interrupted guest context. Copy the context to stack bottom. */
590 GET_GUEST_REGS(%rdi)
591 movq %rsp,%rsi
592 movl $UREGS_kernel_sizeof/8,%ecx
593 movq %rdi,%rsp
594 rep movsq
595 1: movq %rsp,%rdi
596 movl UREGS_entry_vector(%rsp),%eax
597 leaq exception_table(%rip),%rdx
598 callq *(%rdx,%rax,8)
599 jmp ret_from_intr
601 ENTRY(nmi)
602 pushq $0
603 movl $TRAP_nmi,4(%rsp)
604 jmp handle_ist_exception
606 ENTRY(machine_check)
607 pushq $0
608 movl $TRAP_machine_check,4(%rsp)
609 jmp handle_ist_exception
611 .data
613 ENTRY(exception_table)
614 .quad do_divide_error
615 .quad do_debug
616 .quad do_nmi
617 .quad do_int3
618 .quad do_overflow
619 .quad do_bounds
620 .quad do_invalid_op
621 .quad do_device_not_available
622 .quad 0 # double_fault
623 .quad do_coprocessor_segment_overrun
624 .quad do_invalid_TSS
625 .quad do_segment_not_present
626 .quad do_stack_segment
627 .quad do_general_protection
628 .quad do_page_fault
629 .quad do_spurious_interrupt_bug
630 .quad do_coprocessor_error
631 .quad do_alignment_check
632 .quad do_machine_check
633 .quad do_simd_coprocessor_error
635 ENTRY(hypercall_table)
636 .quad do_set_trap_table /* 0 */
637 .quad do_mmu_update
638 .quad do_set_gdt
639 .quad do_stack_switch
640 .quad do_set_callbacks
641 .quad do_fpu_taskswitch /* 5 */
642 .quad do_sched_op_compat
643 .quad do_platform_op
644 .quad do_set_debugreg
645 .quad do_get_debugreg
646 .quad do_update_descriptor /* 10 */
647 .quad do_ni_hypercall
648 .quad do_memory_op
649 .quad do_multicall
650 .quad do_update_va_mapping
651 .quad do_set_timer_op /* 15 */
652 .quad do_event_channel_op_compat
653 .quad do_xen_version
654 .quad do_console_io
655 .quad do_physdev_op_compat
656 .quad do_grant_table_op /* 20 */
657 .quad do_vm_assist
658 .quad do_update_va_mapping_otherdomain
659 .quad do_iret
660 .quad do_vcpu_op
661 .quad do_set_segment_base /* 25 */
662 .quad do_mmuext_op
663 .quad do_xsm_op
664 .quad do_nmi_op
665 .quad do_sched_op
666 .quad do_callback_op /* 30 */
667 .quad do_xenoprof_op
668 .quad do_event_channel_op
669 .quad do_physdev_op
670 .quad do_hvm_op
671 .quad do_sysctl /* 35 */
672 .quad do_domctl
673 .quad do_kexec_op
674 .rept NR_hypercalls-((.-hypercall_table)/8)
675 .quad do_ni_hypercall
676 .endr
678 ENTRY(hypercall_args_table)
679 .byte 1 /* do_set_trap_table */ /* 0 */
680 .byte 4 /* do_mmu_update */
681 .byte 2 /* do_set_gdt */
682 .byte 2 /* do_stack_switch */
683 .byte 3 /* do_set_callbacks */
684 .byte 1 /* do_fpu_taskswitch */ /* 5 */
685 .byte 2 /* do_sched_op_compat */
686 .byte 1 /* do_platform_op */
687 .byte 2 /* do_set_debugreg */
688 .byte 1 /* do_get_debugreg */
689 .byte 2 /* do_update_descriptor */ /* 10 */
690 .byte 0 /* do_ni_hypercall */
691 .byte 2 /* do_memory_op */
692 .byte 2 /* do_multicall */
693 .byte 3 /* do_update_va_mapping */
694 .byte 1 /* do_set_timer_op */ /* 15 */
695 .byte 1 /* do_event_channel_op_compat */
696 .byte 2 /* do_xen_version */
697 .byte 3 /* do_console_io */
698 .byte 1 /* do_physdev_op_compat */
699 .byte 3 /* do_grant_table_op */ /* 20 */
700 .byte 2 /* do_vm_assist */
701 .byte 4 /* do_update_va_mapping_otherdomain */
702 .byte 0 /* do_iret */
703 .byte 3 /* do_vcpu_op */
704 .byte 2 /* do_set_segment_base */ /* 25 */
705 .byte 4 /* do_mmuext_op */
706 .byte 1 /* do_xsm_op */
707 .byte 2 /* do_nmi_op */
708 .byte 2 /* do_sched_op */
709 .byte 2 /* do_callback_op */ /* 30 */
710 .byte 2 /* do_xenoprof_op */
711 .byte 2 /* do_event_channel_op */
712 .byte 2 /* do_physdev_op */
713 .byte 2 /* do_hvm_op */
714 .byte 1 /* do_sysctl */ /* 35 */
715 .byte 1 /* do_domctl */
716 .byte 2 /* do_kexec */
717 .byte 1 /* do_xsm_op */
718 .rept NR_hypercalls-(.-hypercall_args_table)
719 .byte 0 /* do_ni_hypercall */
720 .endr