ia64/xen-unstable

view xen/arch/x86/x86_64/entry.S @ 16263:23582bcda6e1

x86: Clean up NMI delivery logic. Allow set_trap_table vector 2 to be
specified as not disabling event delivery, just like any other vector.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 29 09:49:39 2007 +0000 (2007-10-29)
parents 185a13c03255
children e48453f82d30
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 /* TB_eip = (32-bit syscall && syscall32_addr) ?
31 * syscall32_addr : syscall_addr */
32 xor %eax,%eax
33 cmpw $FLAT_USER_CS32,UREGS_cs(%rsp)
34 cmoveq VCPU_syscall32_addr(%rbx),%rax
35 testq %rax,%rax
36 cmovzq VCPU_syscall_addr(%rbx),%rax
37 movq %rax,TRAPBOUNCE_eip(%rdx)
38 /* TB_flags = VGCF_syscall_disables_events ? TBF_INTERRUPT : 0 */
39 btl $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
40 setc %cl
41 leal (,%rcx,TBF_INTERRUPT),%ecx
42 movb %cl,TRAPBOUNCE_flags(%rdx)
43 call create_bounce_frame
44 andl $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
45 jmp test_all_events
47 /* %rbx: struct vcpu, interrupts disabled */
48 restore_all_guest:
49 ASSERT_INTERRUPTS_DISABLED
50 RESTORE_ALL
51 testw $TRAP_syscall,4(%rsp)
52 jz iret_exit_to_guest
54 addq $8,%rsp
55 popq %rcx # RIP
56 popq %r11 # CS
57 cmpw $FLAT_USER_CS32,%r11
58 popq %r11 # RFLAGS
59 popq %rsp # RSP
60 je 1f
61 sysretq
62 1: sysretl
64 ALIGN
65 /* No special register assumptions. */
66 iret_exit_to_guest:
67 addq $8,%rsp
68 .Lft0: iretq
70 .section .fixup,"ax"
71 .Lfx0: sti
72 SAVE_ALL
73 movq UREGS_error_code(%rsp),%rsi
74 movq %rsp,%rax
75 andq $~0xf,%rsp
76 pushq $__HYPERVISOR_DS # SS
77 pushq %rax # RSP
78 pushfq # RFLAGS
79 pushq $__HYPERVISOR_CS # CS
80 leaq .Ldf0(%rip),%rax
81 pushq %rax # RIP
82 pushq %rsi # error_code/entry_vector
83 jmp handle_exception
84 .Ldf0: GET_CURRENT(%rbx)
85 jmp test_all_events
86 failsafe_callback:
87 GET_CURRENT(%rbx)
88 leaq VCPU_trap_bounce(%rbx),%rdx
89 movq VCPU_failsafe_addr(%rbx),%rax
90 movq %rax,TRAPBOUNCE_eip(%rdx)
91 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
92 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
93 jnc 1f
94 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
95 1: call create_bounce_frame
96 jmp test_all_events
97 .previous
98 .section __pre_ex_table,"a"
99 .quad .Lft0,.Lfx0
100 .previous
101 .section __ex_table,"a"
102 .quad .Ldf0,failsafe_callback
103 .previous
105 ALIGN
106 /* No special register assumptions. */
107 restore_all_xen:
108 RESTORE_ALL
109 addq $8,%rsp
110 iretq
112 /*
113 * When entering SYSCALL from kernel mode:
114 * %rax = hypercall vector
115 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
116 * %rcx = SYSCALL-saved %rip
117 * NB. We must move %r10 to %rcx for C function-calling ABI.
118 *
119 * When entering SYSCALL from user mode:
120 * Vector directly to the registered arch.syscall_addr.
121 *
122 * Initial work is done by per-CPU stack trampolines. At this point %rsp
123 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
124 * and %cs have been saved. All other registers are still to be saved onto
125 * the stack, starting with %rip, and an appropriate %ss must be saved into
126 * the space left by the trampoline.
127 */
128 ALIGN
129 ENTRY(syscall_enter)
130 sti
131 movl $FLAT_KERNEL_SS,24(%rsp)
132 pushq %rcx
133 pushq $0
134 movl $TRAP_syscall,4(%rsp)
135 movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */
136 SAVE_ALL
137 GET_CURRENT(%rbx)
138 movq VCPU_domain(%rbx),%rcx
139 testb $1,DOMAIN_is_32bit_pv(%rcx)
140 jnz compat_syscall
141 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
142 jz switch_to_kernel
144 /*hypercall:*/
145 movq %r10,%rcx
146 cmpq $NR_hypercalls,%rax
147 jae bad_hypercall
148 #ifndef NDEBUG
149 /* Deliberately corrupt parameter regs not used by this hypercall. */
150 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
151 leaq hypercall_args_table(%rip),%r10
152 movq $6,%rcx
153 sub (%r10,%rax,1),%cl
154 movq %rsp,%rdi
155 movl $0xDEADBEEF,%eax
156 rep stosq
157 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
158 movq UREGS_rax(%rsp),%rax
159 pushq %rax
160 pushq UREGS_rip+8(%rsp)
161 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
162 #else
163 #define SHADOW_BYTES 0 /* No on-stack shadow state */
164 #endif
165 cmpb $0,tb_init_done(%rip)
166 je tracing_off
167 call trace_hypercall
168 /* Now restore all the registers that trace_hypercall clobbered */
169 movq UREGS_rax+SHADOW_BYTES(%rsp),%rax /* Hypercall # */
170 movq UREGS_rdi+SHADOW_BYTES(%rsp),%rdi /* Arg 1 */
171 movq UREGS_rsi+SHADOW_BYTES(%rsp),%rsi /* Arg 2 */
172 movq UREGS_rdx+SHADOW_BYTES(%rsp),%rdx /* Arg 3 */
173 movq UREGS_r10+SHADOW_BYTES(%rsp),%rcx /* Arg 4 */
174 movq UREGS_rdi+SHADOW_BYTES(%rsp),%r8 /* Arg 5 */
175 movq UREGS_rbp+SHADOW_BYTES(%rsp),%r9 /* Arg 6 */
176 #undef SHADOW_BYTES
177 tracing_off:
178 leaq hypercall_table(%rip),%r10
179 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
180 callq *(%r10,%rax,8)
181 #ifndef NDEBUG
182 /* Deliberately corrupt parameter regs used by this hypercall. */
183 popq %r10 # Shadow RIP
184 cmpq %r10,UREGS_rip+8(%rsp)
185 popq %rcx # Shadow hypercall index
186 jne skip_clobber /* If RIP has changed then don't clobber. */
187 leaq hypercall_args_table(%rip),%r10
188 movb (%r10,%rcx,1),%cl
189 movl $0xDEADBEEF,%r10d
190 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
191 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
192 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
193 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
194 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
195 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
196 skip_clobber:
197 #endif
198 movq %rax,UREGS_rax(%rsp) # save the return value
200 /* %rbx: struct vcpu */
201 test_all_events:
202 cli # tests must not race interrupts
203 /*test_softirqs:*/
204 movl VCPU_processor(%rbx),%eax
205 shl $IRQSTAT_shift,%rax
206 leaq irq_stat(%rip),%rcx
207 testl $~0,(%rcx,%rax,1)
208 jnz process_softirqs
209 testb $1,VCPU_nmi_pending(%rbx)
210 jnz process_nmi
211 test_guest_events:
212 movq VCPU_vcpu_info(%rbx),%rax
213 testb $0xFF,VCPUINFO_upcall_mask(%rax)
214 jnz restore_all_guest
215 testb $0xFF,VCPUINFO_upcall_pending(%rax)
216 jz restore_all_guest
217 /*process_guest_events:*/
218 sti
219 leaq VCPU_trap_bounce(%rbx),%rdx
220 movq VCPU_event_addr(%rbx),%rax
221 movq %rax,TRAPBOUNCE_eip(%rdx)
222 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
223 call create_bounce_frame
224 jmp test_all_events
226 ALIGN
227 /* %rbx: struct vcpu */
228 process_softirqs:
229 sti
230 call do_softirq
231 jmp test_all_events
233 ALIGN
234 /* %rbx: struct vcpu */
235 process_nmi:
236 testb $1,VCPU_nmi_masked(%rbx)
237 jnz test_guest_events
238 sti
239 movb $0,VCPU_nmi_pending(%rbx)
240 call set_guest_nmi_trapbounce
241 test %eax,%eax
242 jz test_all_events
243 movb $1,VCPU_nmi_masked(%rbx)
244 leaq VCPU_trap_bounce(%rbx),%rdx
245 call create_bounce_frame
246 jmp test_all_events
248 bad_hypercall:
249 movq $-ENOSYS,UREGS_rax(%rsp)
250 jmp test_all_events
252 ENTRY(sysenter_entry)
253 sti
254 pushq $FLAT_USER_SS
255 pushq $0
256 pushfq
257 .globl sysenter_eflags_saved
258 sysenter_eflags_saved:
259 pushq $0
260 pushq $0
261 pushq $0
262 movl $TRAP_syscall,4(%rsp)
263 SAVE_ALL
264 GET_CURRENT(%rbx)
265 cmpb $0,VCPU_sysenter_disables_events(%rbx)
266 movq $0,UREGS_rip(%rsp) /* null rip */
267 movl $3,UREGS_cs(%rsp) /* ring 3 null cs */
268 movq VCPU_sysenter_addr(%rbx),%rax
269 setne %cl
270 leaq VCPU_trap_bounce(%rbx),%rdx
271 testq %rax,%rax
272 leal (,%rcx,TBF_INTERRUPT),%ecx
273 jz 2f
274 1: movq VCPU_domain(%rbx),%rdi
275 movq %rax,TRAPBOUNCE_eip(%rdx)
276 movb %cl,TRAPBOUNCE_flags(%rdx)
277 testb $1,DOMAIN_is_32bit_pv(%rdi)
278 jnz compat_sysenter
279 call create_bounce_frame
280 jmp test_all_events
281 2: movl %eax,TRAPBOUNCE_error_code(%rdx)
282 movq VCPU_gp_fault_addr(%rbx),%rax
283 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
284 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
285 jmp 1b
287 ENTRY(int80_direct_trap)
288 pushq $0
289 SAVE_ALL
291 GET_CURRENT(%rbx)
293 /* Check that the callback is non-null. */
294 leaq VCPU_int80_bounce(%rbx),%rdx
295 cmpb $0,TRAPBOUNCE_flags(%rdx)
296 jz int80_slow_path
298 movq VCPU_domain(%rbx),%rax
299 testb $1,DOMAIN_is_32bit_pv(%rax)
300 jnz compat_int80_direct_trap
302 call create_bounce_frame
303 jmp test_all_events
305 int80_slow_path:
306 /*
307 * Setup entry vector and error code as if this was a GPF caused by an
308 * IDT entry with DPL==0.
309 */
310 movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
311 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
312 /* A GPF wouldn't have incremented the instruction pointer. */
313 subq $2,UREGS_rip(%rsp)
314 jmp handle_exception_saved
316 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
317 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
318 /* %rdx: trap_bounce, %rbx: struct vcpu */
319 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
320 create_bounce_frame:
321 ASSERT_INTERRUPTS_ENABLED
322 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
323 jnz 1f
324 /* Push new frame at registered guest-OS stack base. */
325 pushq %rdx
326 movq %rbx,%rdi
327 call toggle_guest_mode
328 popq %rdx
329 movq VCPU_kernel_sp(%rbx),%rsi
330 jmp 2f
331 1: /* In kernel context already: push new frame at existing %rsp. */
332 movq UREGS_rsp+8(%rsp),%rsi
333 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
334 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
335 movq $HYPERVISOR_VIRT_START,%rax
336 cmpq %rax,%rsi
337 jb 1f # In +ve address space? Then okay.
338 movq $HYPERVISOR_VIRT_END+60,%rax
339 cmpq %rax,%rsi
340 jb domain_crash_synchronous # Above Xen private area? Then okay.
341 1: movb TRAPBOUNCE_flags(%rdx),%cl
342 subq $40,%rsi
343 movq UREGS_ss+8(%rsp),%rax
344 .Lft2: movq %rax,32(%rsi) # SS
345 movq UREGS_rsp+8(%rsp),%rax
346 .Lft3: movq %rax,24(%rsi) # RSP
347 movq VCPU_vcpu_info(%rbx),%rax
348 pushq VCPUINFO_upcall_mask(%rax)
349 testb $TBF_INTERRUPT,%cl
350 setnz %ch # TBF_INTERRUPT -> set upcall mask
351 orb %ch,VCPUINFO_upcall_mask(%rax)
352 popq %rax
353 shlq $32,%rax # Bits 32-39: saved_upcall_mask
354 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
355 .Lft4: movq %rax,8(%rsi) # CS / saved_upcall_mask
356 shrq $32,%rax
357 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
358 setz %ch # %ch == !saved_upcall_mask
359 movl UREGS_eflags+8(%rsp),%eax
360 andl $~X86_EFLAGS_IF,%eax
361 addb %ch,%ch # Bit 9 (EFLAGS.IF)
362 orb %ch,%ah # Fold EFLAGS.IF into %eax
363 .Lft5: movq %rax,16(%rsi) # RFLAGS
364 movq UREGS_rip+8(%rsp),%rax
365 .Lft6: movq %rax,(%rsi) # RIP
366 testb $TBF_EXCEPTION_ERRCODE,%cl
367 jz 1f
368 subq $8,%rsi
369 movl TRAPBOUNCE_error_code(%rdx),%eax
370 .Lft7: movq %rax,(%rsi) # ERROR CODE
371 1: testb $TBF_FAILSAFE,%cl
372 jz 2f
373 subq $32,%rsi
374 movl %gs,%eax
375 .Lft8: movq %rax,24(%rsi) # GS
376 movl %fs,%eax
377 .Lft9: movq %rax,16(%rsi) # FS
378 movl %es,%eax
379 .Lft10: movq %rax,8(%rsi) # ES
380 movl %ds,%eax
381 .Lft11: movq %rax,(%rsi) # DS
382 2: subq $16,%rsi
383 movq UREGS_r11+8(%rsp),%rax
384 .Lft12: movq %rax,8(%rsi) # R11
385 movq UREGS_rcx+8(%rsp),%rax
386 .Lft13: movq %rax,(%rsi) # RCX
387 /* Rewrite our stack frame and return to guest-OS mode. */
388 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
389 /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
390 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
391 andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
392 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
393 movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
394 movq %rsi,UREGS_rsp+8(%rsp)
395 movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
396 movq TRAPBOUNCE_eip(%rdx),%rax
397 testq %rax,%rax
398 jz domain_crash_synchronous
399 movq %rax,UREGS_rip+8(%rsp)
400 ret
401 .section __ex_table,"a"
402 .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous
403 .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous
404 .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
405 .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
406 .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
407 .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
408 .previous
410 domain_crash_synchronous_string:
411 .asciz "domain_crash_sync called from entry.S\n"
413 ENTRY(domain_crash_synchronous)
414 # Get out of the guest-save area of the stack.
415 GET_GUEST_REGS(%rax)
416 movq %rax,%rsp
417 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
418 movq CPUINFO_current_vcpu(%rax),%rax
419 movq VCPU_domain(%rax),%rax
420 testb $1,DOMAIN_is_32bit_pv(%rax)
421 setz %al
422 leal (%rax,%rax,2),%eax
423 orb %al,UREGS_cs(%rsp)
424 # printk(domain_crash_synchronous_string)
425 leaq domain_crash_synchronous_string(%rip),%rdi
426 xorl %eax,%eax
427 call printk
428 jmp __domain_crash_synchronous
430 ALIGN
431 /* No special register assumptions. */
432 ENTRY(ret_from_intr)
433 GET_CURRENT(%rbx)
434 testb $3,UREGS_cs(%rsp)
435 jz restore_all_xen
436 movq VCPU_domain(%rbx),%rax
437 testb $1,DOMAIN_is_32bit_pv(%rax)
438 jz test_all_events
439 jmp compat_test_all_events
441 ALIGN
442 /* No special register assumptions. */
443 ENTRY(handle_exception)
444 SAVE_ALL
445 handle_exception_saved:
446 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
447 jz exception_with_ints_disabled
448 sti
449 1: movq %rsp,%rdi
450 movl UREGS_entry_vector(%rsp),%eax
451 leaq exception_table(%rip),%rdx
452 GET_CURRENT(%rbx)
453 PERFC_INCR(PERFC_exceptions, %rax, %rbx)
454 callq *(%rdx,%rax,8)
455 testb $3,UREGS_cs(%rsp)
456 jz restore_all_xen
457 leaq VCPU_trap_bounce(%rbx),%rdx
458 movq VCPU_domain(%rbx),%rax
459 testb $1,DOMAIN_is_32bit_pv(%rax)
460 jnz compat_post_handle_exception
461 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
462 jz test_all_events
463 call create_bounce_frame
464 movb $0,TRAPBOUNCE_flags(%rdx)
465 jmp test_all_events
467 /* No special register assumptions. */
468 exception_with_ints_disabled:
469 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
470 jnz FATAL_exception_with_ints_disabled
471 movq %rsp,%rdi
472 call search_pre_exception_table
473 testq %rax,%rax # no fixup code for faulting EIP?
474 jz 1b
475 movq %rax,UREGS_rip(%rsp)
476 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
477 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
478 jz 1f # then there is a pad quadword already
479 movq %rsp,%rsi
480 subq $8,%rsp
481 movq %rsp,%rdi
482 movq $UREGS_kernel_sizeof/8,%rcx
483 rep; movsq # make room for ec/ev
484 1: movq UREGS_error_code(%rsp),%rax # ec/ev
485 movq %rax,UREGS_kernel_sizeof(%rsp)
486 jmp restore_all_xen # return to fixup code
488 /* No special register assumptions. */
489 FATAL_exception_with_ints_disabled:
490 movl UREGS_entry_vector(%rsp),%edi
491 movq %rsp,%rsi
492 call fatal_trap
493 ud2
495 ENTRY(divide_error)
496 pushq $0
497 movl $TRAP_divide_error,4(%rsp)
498 jmp handle_exception
500 ENTRY(coprocessor_error)
501 pushq $0
502 movl $TRAP_copro_error,4(%rsp)
503 jmp handle_exception
505 ENTRY(simd_coprocessor_error)
506 pushq $0
507 movl $TRAP_simd_error,4(%rsp)
508 jmp handle_exception
510 ENTRY(device_not_available)
511 pushq $0
512 movl $TRAP_no_device,4(%rsp)
513 jmp handle_exception
515 ENTRY(debug)
516 pushq $0
517 movl $TRAP_debug,4(%rsp)
518 jmp handle_exception
520 ENTRY(int3)
521 pushq $0
522 movl $TRAP_int3,4(%rsp)
523 jmp handle_exception
525 ENTRY(overflow)
526 pushq $0
527 movl $TRAP_overflow,4(%rsp)
528 jmp handle_exception
530 ENTRY(bounds)
531 pushq $0
532 movl $TRAP_bounds,4(%rsp)
533 jmp handle_exception
535 ENTRY(invalid_op)
536 pushq $0
537 movl $TRAP_invalid_op,4(%rsp)
538 jmp handle_exception
540 ENTRY(coprocessor_segment_overrun)
541 pushq $0
542 movl $TRAP_copro_seg,4(%rsp)
543 jmp handle_exception
545 ENTRY(invalid_TSS)
546 movl $TRAP_invalid_tss,4(%rsp)
547 jmp handle_exception
549 ENTRY(segment_not_present)
550 movl $TRAP_no_segment,4(%rsp)
551 jmp handle_exception
553 ENTRY(stack_segment)
554 movl $TRAP_stack_error,4(%rsp)
555 jmp handle_exception
557 ENTRY(general_protection)
558 movl $TRAP_gp_fault,4(%rsp)
559 jmp handle_exception
561 ENTRY(alignment_check)
562 movl $TRAP_alignment_check,4(%rsp)
563 jmp handle_exception
565 ENTRY(page_fault)
566 movl $TRAP_page_fault,4(%rsp)
567 jmp handle_exception
569 ENTRY(spurious_interrupt_bug)
570 pushq $0
571 movl $TRAP_spurious_int,4(%rsp)
572 jmp handle_exception
574 ENTRY(double_fault)
575 SAVE_ALL
576 movq %rsp,%rdi
577 call do_double_fault
578 ud2
580 ENTRY(early_page_fault)
581 SAVE_ALL
582 movq %rsp,%rdi
583 call do_early_page_fault
584 jmp restore_all_xen
586 handle_ist_exception:
587 SAVE_ALL
588 testb $3,UREGS_cs(%rsp)
589 jz 1f
590 /* Interrupted guest context. Copy the context to stack bottom. */
591 GET_GUEST_REGS(%rdi)
592 movq %rsp,%rsi
593 movl $UREGS_kernel_sizeof/8,%ecx
594 movq %rdi,%rsp
595 rep movsq
596 1: movq %rsp,%rdi
597 movl UREGS_entry_vector(%rsp),%eax
598 leaq exception_table(%rip),%rdx
599 callq *(%rdx,%rax,8)
600 jmp ret_from_intr
602 ENTRY(nmi)
603 pushq $0
604 movl $TRAP_nmi,4(%rsp)
605 jmp handle_ist_exception
607 ENTRY(machine_check)
608 pushq $0
609 movl $TRAP_machine_check,4(%rsp)
610 jmp handle_ist_exception
612 .data
614 ENTRY(exception_table)
615 .quad do_divide_error
616 .quad do_debug
617 .quad do_nmi
618 .quad do_int3
619 .quad do_overflow
620 .quad do_bounds
621 .quad do_invalid_op
622 .quad do_device_not_available
623 .quad 0 # double_fault
624 .quad do_coprocessor_segment_overrun
625 .quad do_invalid_TSS
626 .quad do_segment_not_present
627 .quad do_stack_segment
628 .quad do_general_protection
629 .quad do_page_fault
630 .quad do_spurious_interrupt_bug
631 .quad do_coprocessor_error
632 .quad do_alignment_check
633 .quad do_machine_check
634 .quad do_simd_coprocessor_error
636 ENTRY(hypercall_table)
637 .quad do_set_trap_table /* 0 */
638 .quad do_mmu_update
639 .quad do_set_gdt
640 .quad do_stack_switch
641 .quad do_set_callbacks
642 .quad do_fpu_taskswitch /* 5 */
643 .quad do_sched_op_compat
644 .quad do_platform_op
645 .quad do_set_debugreg
646 .quad do_get_debugreg
647 .quad do_update_descriptor /* 10 */
648 .quad do_ni_hypercall
649 .quad do_memory_op
650 .quad do_multicall
651 .quad do_update_va_mapping
652 .quad do_set_timer_op /* 15 */
653 .quad do_event_channel_op_compat
654 .quad do_xen_version
655 .quad do_console_io
656 .quad do_physdev_op_compat
657 .quad do_grant_table_op /* 20 */
658 .quad do_vm_assist
659 .quad do_update_va_mapping_otherdomain
660 .quad do_iret
661 .quad do_vcpu_op
662 .quad do_set_segment_base /* 25 */
663 .quad do_mmuext_op
664 .quad do_xsm_op
665 .quad do_nmi_op
666 .quad do_sched_op
667 .quad do_callback_op /* 30 */
668 .quad do_xenoprof_op
669 .quad do_event_channel_op
670 .quad do_physdev_op
671 .quad do_hvm_op
672 .quad do_sysctl /* 35 */
673 .quad do_domctl
674 .quad do_kexec_op
675 .rept NR_hypercalls-((.-hypercall_table)/8)
676 .quad do_ni_hypercall
677 .endr
679 ENTRY(hypercall_args_table)
680 .byte 1 /* do_set_trap_table */ /* 0 */
681 .byte 4 /* do_mmu_update */
682 .byte 2 /* do_set_gdt */
683 .byte 2 /* do_stack_switch */
684 .byte 3 /* do_set_callbacks */
685 .byte 1 /* do_fpu_taskswitch */ /* 5 */
686 .byte 2 /* do_sched_op_compat */
687 .byte 1 /* do_platform_op */
688 .byte 2 /* do_set_debugreg */
689 .byte 1 /* do_get_debugreg */
690 .byte 2 /* do_update_descriptor */ /* 10 */
691 .byte 0 /* do_ni_hypercall */
692 .byte 2 /* do_memory_op */
693 .byte 2 /* do_multicall */
694 .byte 3 /* do_update_va_mapping */
695 .byte 1 /* do_set_timer_op */ /* 15 */
696 .byte 1 /* do_event_channel_op_compat */
697 .byte 2 /* do_xen_version */
698 .byte 3 /* do_console_io */
699 .byte 1 /* do_physdev_op_compat */
700 .byte 3 /* do_grant_table_op */ /* 20 */
701 .byte 2 /* do_vm_assist */
702 .byte 4 /* do_update_va_mapping_otherdomain */
703 .byte 0 /* do_iret */
704 .byte 3 /* do_vcpu_op */
705 .byte 2 /* do_set_segment_base */ /* 25 */
706 .byte 4 /* do_mmuext_op */
707 .byte 1 /* do_xsm_op */
708 .byte 2 /* do_nmi_op */
709 .byte 2 /* do_sched_op */
710 .byte 2 /* do_callback_op */ /* 30 */
711 .byte 2 /* do_xenoprof_op */
712 .byte 2 /* do_event_channel_op */
713 .byte 2 /* do_physdev_op */
714 .byte 2 /* do_hvm_op */
715 .byte 1 /* do_sysctl */ /* 35 */
716 .byte 1 /* do_domctl */
717 .byte 2 /* do_kexec */
718 .byte 1 /* do_xsm_op */
719 .rept NR_hypercalls-(.-hypercall_args_table)
720 .byte 0 /* do_ni_hypercall */
721 .endr