direct-io.hg

view xen/arch/x86/x86_64/entry.S @ 5712:1a57305dd9e9

Small fix to x86_64 entry.S.
author kaf24@firebug.cl.cam.ac.uk
date Sun Jul 10 17:28:06 2005 +0000 (2005-07-10)
parents 9e1e2d648dce
children 25e96ea6e4c5
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_CURRENT(reg) \
16 movq $STACK_SIZE-8, reg; \
17 orq %rsp, reg; \
18 andq $~7,reg; \
19 movq (reg),reg;
21 ALIGN
22 /* %rbx: struct vcpu */
23 switch_to_kernel:
24 leaq VCPU_trap_bounce(%rbx),%rdx
25 movq VCPU_syscall_addr(%rbx),%rax
26 movq %rax,TRAPBOUNCE_eip(%rdx)
27 movw $0,TRAPBOUNCE_flags(%rdx)
28 call create_bounce_frame
29 jmp test_all_events
31 /* %rbx: struct vcpu, interrupts disabled */
32 restore_all_guest:
33 RESTORE_ALL
34 testw $TRAP_syscall,4(%rsp)
35 jz iret_exit_to_guest
37 addq $8,%rsp
38 popq %rcx # RIP
39 popq %r11 # CS
40 cmpw $__GUEST_CS32,%r11
41 popq %r11 # RFLAGS
42 popq %rsp # RSP
43 je 1f
44 sysretq
45 1: sysretl
47 ALIGN
48 /* No special register assumptions. */
49 iret_exit_to_guest:
50 addq $8,%rsp
51 FLT1: iretq
53 .section .fixup,"ax"
54 FIX1: popq -15*8-8(%rsp) # error_code/entry_vector
55 SAVE_ALL # 15*8 bytes pushed
56 movq -8(%rsp),%rsi # error_code/entry_vector
57 sti # after stack abuse (-1024(%rsp))
58 pushq $__HYPERVISOR_DS # SS
59 leaq 8(%rsp),%rax
60 pushq %rax # RSP
61 pushf # RFLAGS
62 pushq $__HYPERVISOR_CS # CS
63 leaq DBLFLT1(%rip),%rax
64 pushq %rax # RIP
65 pushq %rsi # error_code/entry_vector
66 jmp error_code
67 DBLFLT1:GET_CURRENT(%rbx)
68 jmp test_all_events
69 failsafe_callback:
70 GET_CURRENT(%rbx)
71 leaq VCPU_trap_bounce(%rbx),%rdx
72 movq VCPU_failsafe_addr(%rbx),%rax
73 movq %rax,TRAPBOUNCE_eip(%rdx)
74 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
75 call create_bounce_frame
76 jmp test_all_events
77 .previous
78 .section __pre_ex_table,"a"
79 .quad FLT1,FIX1
80 .previous
81 .section __ex_table,"a"
82 .quad DBLFLT1,failsafe_callback
83 .previous
85 ALIGN
86 /* No special register assumptions. */
87 restore_all_xen:
88 RESTORE_ALL
89 addq $8,%rsp
90 iretq
92 /*
93 * When entering SYSCALL from kernel mode:
94 * %rax = hypercall vector
95 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
96 * %r11, %rcx = SYSCALL-saved %rflags and %rip
97 * NB. We must move %r10 to %rcx for C function-calling ABI.
98 *
99 * When entering SYSCALL from user mode:
100 * Vector directly to the registered arch.syscall_addr.
101 *
102 * Initial work is done by per-CPU stack trampolines. At this point %rsp
103 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
104 * and %cs have been saved. All other registers are still to be saved onto
105 * the stack, starting with %rip, and an appropriate %ss must be saved into
106 * the space left by the trampoline.
107 */
108 ALIGN
109 ENTRY(syscall_enter)
110 sti
111 movl $__GUEST_SS,24(%rsp)
112 pushq %rcx
113 pushq $0
114 movl $TRAP_syscall,4(%rsp)
115 SAVE_ALL
116 GET_CURRENT(%rbx)
117 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
118 jz switch_to_kernel
120 /*hypercall:*/
121 movq %r10,%rcx
122 andq $(NR_hypercalls-1),%rax
123 leaq hypercall_table(%rip),%r10
124 PERFC_INCR(PERFC_hypercalls, %rax)
125 callq *(%r10,%rax,8)
126 movq %rax,UREGS_rax(%rsp) # save the return value
128 /* %rbx: struct vcpu */
129 test_all_events:
130 cli # tests must not race interrupts
131 /*test_softirqs:*/
132 movl VCPU_processor(%rbx),%eax
133 shl $IRQSTAT_shift,%rax
134 leaq irq_stat(%rip),%rcx
135 testl $~0,(%rcx,%rax,1)
136 jnz process_softirqs
137 /*test_guest_events:*/
138 movq VCPU_vcpu_info(%rbx),%rax
139 testb $0xFF,VCPUINFO_upcall_mask(%rax)
140 jnz restore_all_guest
141 testb $0xFF,VCPUINFO_upcall_pending(%rax)
142 jz restore_all_guest
143 /*process_guest_events:*/
144 sti
145 leaq VCPU_trap_bounce(%rbx),%rdx
146 movq VCPU_event_addr(%rbx),%rax
147 movq %rax,TRAPBOUNCE_eip(%rdx)
148 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
149 call create_bounce_frame
150 jmp test_all_events
152 #ifdef CONFIG_VMX
153 /*
154 * At VMExit time the processor saves the guest selectors, rsp, rip,
155 * and rflags. Therefore we don't save them, but simply decrement
156 * the kernel stack pointer to make it consistent with the stack frame
157 * at usual interruption time. The rflags of the host is not saved by VMX,
158 * and we set it to the fixed value.
159 *
160 * We also need the room, especially because orig_eax field is used
161 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
162 * (10) u64 gs;
163 * (9) u64 fs;
164 * (8) u64 ds;
165 * (7) u64 es;
166 * <- get_stack_bottom() (= HOST_ESP)
167 * (6) u64 ss;
168 * (5) u64 rsp;
169 * (4) u64 rflags;
170 * (3) u64 cs;
171 * (2) u64 rip;
172 * (2/1) u32 entry_vector;
173 * (1/1) u32 error_code;
174 */
175 #define VMX_MONITOR_RFLAGS 0x202 /* IF on */
176 #define NR_SKIPPED_REGS 6 /* See the above explanation */
177 #define VMX_SAVE_ALL_NOSEGREGS \
178 pushq $VMX_MONITOR_RFLAGS; \
179 popfq; \
180 subq $(NR_SKIPPED_REGS*8), %rsp; \
181 pushq %rdi; \
182 pushq %rsi; \
183 pushq %rdx; \
184 pushq %rcx; \
185 pushq %rax; \
186 pushq %r8; \
187 pushq %r9; \
188 pushq %r10; \
189 pushq %r11; \
190 pushq %rbx; \
191 pushq %rbp; \
192 pushq %r12; \
193 pushq %r13; \
194 pushq %r14; \
195 pushq %r15; \
197 ENTRY(vmx_asm_vmexit_handler)
198 /* selectors are restored/saved by VMX */
199 VMX_SAVE_ALL_NOSEGREGS
200 call vmx_vmexit_handler
201 jmp vmx_asm_do_resume
203 ENTRY(vmx_asm_do_launch)
204 popq %r15
205 popq %r14
206 popq %r13
207 popq %r12
208 popq %rbp
209 popq %rbx
210 popq %r11
211 popq %r10
212 popq %r9
213 popq %r8
214 popq %rax
215 popq %rcx
216 popq %rdx
217 popq %rsi
218 popq %rdi
219 addq $(NR_SKIPPED_REGS*8), %rsp
220 /* VMLUANCH */
221 .byte 0x0f,0x01,0xc2
222 pushfq
223 call vm_launch_fail
224 hlt
226 ALIGN
228 ENTRY(vmx_asm_do_resume)
229 vmx_test_all_events:
230 GET_CURRENT(%rbx)
231 /* test_all_events: */
232 cli # tests must not race interrupts
233 /*test_softirqs:*/
234 movl VCPU_processor(%rbx),%eax
235 shl $IRQSTAT_shift,%rax
236 leaq irq_stat(%rip), %rdx
237 testl $~0,(%rdx,%rax,1)
238 jnz vmx_process_softirqs
240 vmx_restore_all_guest:
241 call load_cr2
242 /*
243 * Check if we are going back to VMX-based VM
244 * By this time, all the setups in the VMCS must be complete.
245 */
246 popq %r15
247 popq %r14
248 popq %r13
249 popq %r12
250 popq %rbp
251 popq %rbx
252 popq %r11
253 popq %r10
254 popq %r9
255 popq %r8
256 popq %rax
257 popq %rcx
258 popq %rdx
259 popq %rsi
260 popq %rdi
261 addq $(NR_SKIPPED_REGS*8), %rsp
262 /* VMRESUME */
263 .byte 0x0f,0x01,0xc3
264 pushfq
265 call vm_resume_fail
266 /* Should never reach here */
267 hlt
269 ALIGN
270 vmx_process_softirqs:
271 sti
272 call do_softirq
273 jmp vmx_test_all_events
274 #endif
276 ALIGN
277 /* %rbx: struct vcpu */
278 process_softirqs:
279 sti
280 call do_softirq
281 jmp test_all_events
283 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
284 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
285 /* %rdx: trap_bounce, %rbx: struct vcpu */
286 /* On return only %rbx is guaranteed non-clobbered. */
287 create_bounce_frame:
288 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
289 jnz 1f
290 /* Push new frame at registered guest-OS stack base. */
291 pushq %rdx
292 movq %rbx,%rdi
293 call toggle_guest_mode
294 popq %rdx
295 movq VCPU_kernel_sp(%rbx),%rsi
296 jmp 2f
297 1: /* In kernel context already: push new frame at existing %rsp. */
298 movq UREGS_rsp+8(%rsp),%rsi
299 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
300 2: movq $HYPERVISOR_VIRT_START,%rax
301 cmpq %rax,%rsi
302 jb 1f # In +ve address space? Then okay.
303 movq $HYPERVISOR_VIRT_END+60,%rax
304 cmpq %rax,%rsi
305 jb domain_crash_synchronous # Above Xen private area? Then okay.
306 1: movb TRAPBOUNCE_flags(%rdx),%cl
307 subq $40,%rsi
308 movq UREGS_ss+8(%rsp),%rax
309 FLT2: movq %rax,32(%rsi) # SS
310 movq UREGS_rsp+8(%rsp),%rax
311 FLT3: movq %rax,24(%rsi) # RSP
312 movq UREGS_eflags+8(%rsp),%rax
313 FLT4: movq %rax,16(%rsi) # RFLAGS
314 movq VCPU_vcpu_info(%rbx),%rax
315 pushq VCPUINFO_upcall_mask(%rax)
316 testb $TBF_INTERRUPT,%cl
317 setnz VCPUINFO_upcall_mask(%rax)# TBF_INTERRUPT -> clear upcall mask
318 popq %rax
319 shlq $32,%rax # Bits 32-39: saved_upcall_mask
320 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
321 FLT5: movq %rax,8(%rsi) # CS/saved_upcall_mask
322 movq UREGS_rip+8(%rsp),%rax
323 FLT6: movq %rax,(%rsi) # RIP
324 testb $TBF_EXCEPTION_ERRCODE,%cl
325 jz 1f
326 subq $8,%rsi
327 movl TRAPBOUNCE_error_code(%rdx),%eax
328 FLT7: movq %rax,(%rsi) # ERROR CODE
329 testb $TBF_EXCEPTION_CR2,%cl
330 jz 2f
331 subq $8,%rsi
332 movq TRAPBOUNCE_cr2(%rdx),%rax
333 FLT8: movq %rax,(%rsi) # CR2
334 1: testb $TBF_FAILSAFE,%cl
335 jz 2f
336 subq $32,%rsi
337 movl %gs,%eax
338 FLT9: movq %rax,24(%rsi) # GS
339 movl %fs,%eax
340 FLT10: movq %rax,16(%rsi) # FS
341 movl %es,%eax
342 FLT11: movq %rax,8(%rsi) # ES
343 movl %ds,%eax
344 FLT12: movq %rax,(%rsi) # DS
345 2: subq $16,%rsi
346 movq UREGS_r11+8(%rsp),%rax
347 FLT13: movq %rax,8(%rsi) # R11
348 movq UREGS_rcx+8(%rsp),%rax
349 FLT14: movq %rax,(%rsi) # RCX
350 /* Rewrite our stack frame and return to guest-OS mode. */
351 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
352 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
353 andl $0xfffcbeff,UREGS_eflags+8(%rsp)
354 movq $__GUEST_SS,UREGS_ss+8(%rsp)
355 movq %rsi,UREGS_rsp+8(%rsp)
356 movq $__GUEST_CS,UREGS_cs+8(%rsp)
357 movq TRAPBOUNCE_eip(%rdx),%rax
358 testq %rax,%rax
359 jz domain_crash_synchronous
360 movq %rax,UREGS_rip+8(%rsp)
361 movb $0,TRAPBOUNCE_flags(%rdx)
362 ret
363 .section __ex_table,"a"
364 .quad FLT2,domain_crash_synchronous , FLT3,domain_crash_synchronous
365 .quad FLT4,domain_crash_synchronous , FLT5,domain_crash_synchronous
366 .quad FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
367 .quad FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
368 .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
369 .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
370 .quad FLT14,domain_crash_synchronous
371 .previous
373 ALIGN
374 /* %rbx: struct vcpu */
375 process_guest_exception_and_events:
376 leaq VCPU_trap_bounce(%rbx),%rdx
377 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
378 jz test_all_events
379 call create_bounce_frame
380 jmp test_all_events
382 ALIGN
383 /* No special register assumptions. */
384 ENTRY(ret_from_intr)
385 GET_CURRENT(%rbx)
386 testb $3,UREGS_cs(%rsp)
387 jnz test_all_events
388 jmp restore_all_xen
390 ALIGN
391 /* No special register assumptions. */
392 error_code:
393 SAVE_ALL
394 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
395 jz exception_with_ints_disabled
396 sti
397 movq %rsp,%rdi
398 movl UREGS_entry_vector(%rsp),%eax
399 leaq exception_table(%rip),%rdx
400 GET_CURRENT(%rbx)
401 PERFC_INCR(PERFC_exceptions, %rax)
402 callq *(%rdx,%rax,8)
403 testb $3,UREGS_cs(%rsp)
404 jz restore_all_xen
405 jmp process_guest_exception_and_events
407 /* No special register assumptions. */
408 exception_with_ints_disabled:
409 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
410 jnz FATAL_exception_with_ints_disabled
411 movq %rsp,%rdi
412 call search_pre_exception_table
413 testq %rax,%rax # no fixup code for faulting EIP?
414 jz FATAL_exception_with_ints_disabled
415 movq %rax,UREGS_rip(%rsp)
416 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
417 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
418 jz 1f # then there is a pad quadword already
419 movq %rsp,%rsi
420 subq $8,%rsp
421 movq %rsp,%rdi
422 movq $UREGS_kernel_sizeof/8,%rcx
423 rep; movsq # make room for ec/ev
424 1: movq UREGS_error_code(%rsp),%rax # ec/ev
425 movq %rax,UREGS_kernel_sizeof(%rsp)
426 jmp restore_all_xen # return to fixup code
428 /* No special register assumptions. */
429 FATAL_exception_with_ints_disabled:
430 movl UREGS_entry_vector(%rsp),%edi
431 movq %rsp,%rsi
432 call fatal_trap
433 ud2
435 ENTRY(divide_error)
436 pushq $0
437 movl $TRAP_divide_error,4(%rsp)
438 jmp error_code
440 ENTRY(coprocessor_error)
441 pushq $0
442 movl $TRAP_copro_error,4(%rsp)
443 jmp error_code
445 ENTRY(simd_coprocessor_error)
446 pushq $0
447 movl $TRAP_simd_error,4(%rsp)
448 jmp error_code
450 ENTRY(device_not_available)
451 pushq $0
452 movl $TRAP_no_device,4(%rsp)
453 jmp error_code
455 ENTRY(debug)
456 pushq $0
457 movl $TRAP_debug,4(%rsp)
458 jmp error_code
460 ENTRY(int3)
461 pushq $0
462 movl $TRAP_int3,4(%rsp)
463 jmp error_code
465 ENTRY(overflow)
466 pushq $0
467 movl $TRAP_overflow,4(%rsp)
468 jmp error_code
470 ENTRY(bounds)
471 pushq $0
472 movl $TRAP_bounds,4(%rsp)
473 jmp error_code
475 ENTRY(invalid_op)
476 pushq $0
477 movl $TRAP_invalid_op,4(%rsp)
478 jmp error_code
480 ENTRY(coprocessor_segment_overrun)
481 pushq $0
482 movl $TRAP_copro_seg,4(%rsp)
483 jmp error_code
485 ENTRY(invalid_TSS)
486 movl $TRAP_invalid_tss,4(%rsp)
487 jmp error_code
489 ENTRY(segment_not_present)
490 movl $TRAP_no_segment,4(%rsp)
491 jmp error_code
493 ENTRY(stack_segment)
494 movl $TRAP_stack_error,4(%rsp)
495 jmp error_code
497 ENTRY(general_protection)
498 movl $TRAP_gp_fault,4(%rsp)
499 jmp error_code
501 ENTRY(alignment_check)
502 movl $TRAP_alignment_check,4(%rsp)
503 jmp error_code
505 ENTRY(page_fault)
506 movl $TRAP_page_fault,4(%rsp)
507 jmp error_code
509 ENTRY(machine_check)
510 pushq $0
511 movl $TRAP_machine_check,4(%rsp)
512 jmp error_code
514 ENTRY(spurious_interrupt_bug)
515 pushq $0
516 movl $TRAP_spurious_int,4(%rsp)
517 jmp error_code
519 ENTRY(double_fault)
520 movl $TRAP_double_fault,4(%rsp)
521 jmp error_code
523 ENTRY(nmi)
524 pushq $0
525 SAVE_ALL
526 inb $0x61,%al
527 movl %eax,%esi # reason
528 movq %rsp,%rdi # regs
529 call do_nmi
530 jmp restore_all_xen
532 do_arch_sched_op:
533 # Ensure we return success even if we return via schedule_tail()
534 xorl %eax,%eax
535 movq %rax,UREGS_rax+8(%rsp)
536 jmp do_sched_op
538 .data
540 ENTRY(exception_table)
541 .quad do_divide_error
542 .quad do_debug
543 .quad 0 # nmi
544 .quad do_int3
545 .quad do_overflow
546 .quad do_bounds
547 .quad do_invalid_op
548 .quad math_state_restore
549 .quad do_double_fault
550 .quad do_coprocessor_segment_overrun
551 .quad do_invalid_TSS
552 .quad do_segment_not_present
553 .quad do_stack_segment
554 .quad do_general_protection
555 .quad do_page_fault
556 .quad do_spurious_interrupt_bug
557 .quad do_coprocessor_error
558 .quad do_alignment_check
559 .quad do_machine_check
560 .quad do_simd_coprocessor_error
562 ENTRY(hypercall_table)
563 .quad do_set_trap_table /* 0 */
564 .quad do_mmu_update
565 .quad do_set_gdt
566 .quad do_stack_switch
567 .quad do_set_callbacks
568 .quad do_fpu_taskswitch /* 5 */
569 .quad do_arch_sched_op
570 .quad do_dom0_op
571 .quad do_set_debugreg
572 .quad do_get_debugreg
573 .quad do_update_descriptor /* 10 */
574 .quad do_ni_hypercall
575 .quad do_dom_mem_op
576 .quad do_multicall
577 .quad do_update_va_mapping
578 .quad do_set_timer_op /* 15 */
579 .quad do_event_channel_op
580 .quad do_xen_version
581 .quad do_console_io
582 .quad do_physdev_op
583 .quad do_grant_table_op /* 20 */
584 .quad do_vm_assist
585 .quad do_update_va_mapping_otherdomain
586 .quad do_switch_to_user
587 .quad do_boot_vcpu
588 .quad do_set_segment_base /* 25 */
589 .quad do_mmuext_op
590 .rept NR_hypercalls-((.-hypercall_table)/4)
591 .quad do_ni_hypercall
592 .endr