direct-io.hg

view xen/arch/x86/x86_64/compat/entry.S @ 15416:b35b8053012e

Fix x86/64 failsafe callback handling.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jun 21 18:02:50 2007 +0100 (2007-06-21)
parents d0acb5a3e8d8
children
line source
1 /*
2 * Compatibility hypercall routines.
3 */
5 #include <xen/config.h>
6 #include <xen/errno.h>
7 #include <xen/softirq.h>
8 #include <asm/asm_defns.h>
9 #include <asm/apicdef.h>
10 #include <asm/page.h>
11 #include <asm/desc.h>
12 #include <public/xen.h>
14 #define GET_GUEST_REGS(reg) \
15 movq $~(STACK_SIZE-1),reg; \
16 andq %rsp,reg; \
17 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
19 #define GET_CURRENT(reg) \
20 movq $STACK_SIZE-8, reg; \
21 orq %rsp, reg; \
22 andq $~7,reg; \
23 movq (reg),reg;
25 ALIGN
26 ENTRY(compat_hypercall)
27 pushq $0
28 movl $TRAP_syscall,4(%rsp)
29 SAVE_ALL
30 GET_CURRENT(%rbx)
32 cmpl $NR_hypercalls,%eax
33 jae compat_bad_hypercall
34 #ifndef NDEBUG
35 /* Deliberately corrupt parameter regs not used by this hypercall. */
36 pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi
37 pushq UREGS_rbp+5*8(%rsp)
38 leaq compat_hypercall_args_table(%rip),%r10
39 movq $6,%rcx
40 subb (%r10,%rax,1),%cl
41 movq %rsp,%rdi
42 movl $0xDEADBEEF,%eax
43 rep stosq
44 popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */
45 popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */
46 popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */
47 movl UREGS_rax(%rsp),%eax
48 pushq %rax
49 pushq UREGS_rip+8(%rsp)
50 #else
51 /* Relocate argument registers and zero-extend to 64 bits. */
52 movl %eax,%eax /* Hypercall # */
53 xchgl %ecx,%esi /* Arg 2, Arg 4 */
54 movl %edx,%edx /* Arg 3 */
55 movl %edi,%r8d /* Arg 5 */
56 movl %ebp,%r9d /* Arg 6 */
57 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
58 #endif
59 leaq compat_hypercall_table(%rip),%r10
60 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
61 callq *(%r10,%rax,8)
62 #ifndef NDEBUG
63 /* Deliberately corrupt parameter regs used by this hypercall. */
64 popq %r10 # Shadow RIP
65 cmpq %r10,UREGS_rip+8(%rsp)
66 popq %rcx # Shadow hypercall index
67 jne compat_skip_clobber /* If RIP has changed then don't clobber. */
68 leaq compat_hypercall_args_table(%rip),%r10
69 movb (%r10,%rcx,1),%cl
70 movl $0xDEADBEEF,%r10d
71 testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
72 cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
73 cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
74 cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
75 cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
76 cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
77 compat_skip_clobber:
78 #endif
79 movl %eax,UREGS_rax(%rsp) # save the return value
81 /* %rbx: struct vcpu */
82 ENTRY(compat_test_all_events)
83 cli # tests must not race interrupts
84 /*compat_test_softirqs:*/
85 movl VCPU_processor(%rbx),%eax
86 shlq $IRQSTAT_shift,%rax
87 leaq irq_stat(%rip),%rcx
88 testl $~0,(%rcx,%rax,1)
89 jnz compat_process_softirqs
90 testb $1,VCPU_nmi_pending(%rbx)
91 jnz compat_process_nmi
92 compat_test_guest_events:
93 movq VCPU_vcpu_info(%rbx),%rax
94 testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
95 jnz compat_restore_all_guest
96 testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
97 jz compat_restore_all_guest
98 /*compat_process_guest_events:*/
99 sti
100 leaq VCPU_trap_bounce(%rbx),%rdx
101 movl VCPU_event_addr(%rbx),%eax
102 movl %eax,TRAPBOUNCE_eip(%rdx)
103 movl VCPU_event_sel(%rbx),%eax
104 movw %ax,TRAPBOUNCE_cs(%rdx)
105 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
106 call compat_create_bounce_frame
107 jmp compat_test_all_events
109 ALIGN
110 /* %rbx: struct vcpu */
111 compat_process_softirqs:
112 sti
113 call do_softirq
114 jmp compat_test_all_events
116 ALIGN
117 /* %rbx: struct vcpu */
118 compat_process_nmi:
119 testb $1,VCPU_nmi_masked(%rbx)
120 jnz compat_test_guest_events
121 movb $0,VCPU_nmi_pending(%rbx)
122 movl VCPU_nmi_addr(%rbx),%eax
123 testl %eax,%eax
124 jz compat_test_guest_events
125 movb $1,VCPU_nmi_masked(%rbx)
126 sti
127 leaq VCPU_trap_bounce(%rbx),%rdx
128 movl %eax,TRAPBOUNCE_eip(%rdx)
129 movw $FLAT_COMPAT_KERNEL_CS,TRAPBOUNCE_cs(%rdx)
130 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
131 call compat_create_bounce_frame
132 jmp compat_test_all_events
134 compat_bad_hypercall:
135 movl $-ENOSYS,UREGS_rax(%rsp)
136 jmp compat_test_all_events
138 /* %rbx: struct vcpu, interrupts disabled */
139 compat_restore_all_guest:
140 ASSERT_INTERRUPTS_DISABLED
141 RESTORE_ALL
142 addq $8,%rsp
143 .Lft0: iretq
145 .section .fixup,"ax"
146 .Lfx0: sti
147 SAVE_ALL
148 movq UREGS_error_code(%rsp),%rsi
149 movq %rsp,%rax
150 andq $~0xf,%rsp
151 pushq $__HYPERVISOR_DS # SS
152 pushq %rax # RSP
153 pushfq # RFLAGS
154 pushq $__HYPERVISOR_CS # CS
155 leaq .Ldf0(%rip),%rax
156 pushq %rax # RIP
157 pushq %rsi # error_code/entry_vector
158 jmp handle_exception
159 .Ldf0: GET_CURRENT(%rbx)
160 jmp compat_test_all_events
161 compat_failsafe_callback:
162 GET_CURRENT(%rbx)
163 leaq VCPU_trap_bounce(%rbx),%rdx
164 movl VCPU_failsafe_addr(%rbx),%eax
165 movl %eax,TRAPBOUNCE_eip(%rdx)
166 movl VCPU_failsafe_sel(%rbx),%eax
167 movw %ax,TRAPBOUNCE_cs(%rdx)
168 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
169 btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
170 jnc 1f
171 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
172 1: call compat_create_bounce_frame
173 jmp compat_test_all_events
174 .previous
175 .section __pre_ex_table,"a"
176 .quad .Lft0,.Lfx0
177 .previous
178 .section __ex_table,"a"
179 .quad .Ldf0,compat_failsafe_callback
180 .previous
182 /* %rdx: trap_bounce, %rbx: struct vcpu */
183 ENTRY(compat_post_handle_exception)
184 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
185 jz compat_test_all_events
186 call compat_create_bounce_frame
187 movb $0,TRAPBOUNCE_flags(%rdx)
188 jmp compat_test_all_events
190 ENTRY(compat_int80_direct_trap)
191 call compat_create_bounce_frame
192 jmp compat_test_all_events
194 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
195 /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
196 /* %rdx: trap_bounce, %rbx: struct vcpu */
197 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
198 compat_create_bounce_frame:
199 ASSERT_INTERRUPTS_ENABLED
200 mov %fs,%edi
201 testb $2,UREGS_cs+8(%rsp)
202 jz 1f
203 /* Push new frame at registered guest-OS stack base. */
204 movl VCPU_kernel_sp(%rbx),%esi
205 .Lft1: mov VCPU_kernel_ss(%rbx),%fs
206 subl $2*4,%esi
207 movl UREGS_rsp+8(%rsp),%eax
208 .Lft2: movl %eax,%fs:(%rsi)
209 movl UREGS_ss+8(%rsp),%eax
210 .Lft3: movl %eax,%fs:4(%rsi)
211 jmp 2f
212 1: /* In kernel context already: push new frame at existing %rsp. */
213 movl UREGS_rsp+8(%rsp),%esi
214 .Lft4: mov UREGS_ss+8(%rsp),%fs
215 2:
216 movb TRAPBOUNCE_flags(%rdx),%cl
217 subl $3*4,%esi
218 movq VCPU_vcpu_info(%rbx),%rax
219 pushq COMPAT_VCPUINFO_upcall_mask(%rax)
220 testb $TBF_INTERRUPT,%cl
221 setnz %ch # TBF_INTERRUPT -> set upcall mask
222 orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
223 popq %rax
224 shll $16,%eax # Bits 16-23: saved_upcall_mask
225 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
226 .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
227 shrl $16,%eax
228 testb %al,%al # Bits 0-7: saved_upcall_mask
229 setz %ch # %ch == !saved_upcall_mask
230 movl UREGS_eflags+8(%rsp),%eax
231 andl $~X86_EFLAGS_IF,%eax
232 shlb $1,%ch # Bit 9 (EFLAGS.IF)
233 orb %ch,%ah # Fold EFLAGS.IF into %eax
234 .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS
235 movl UREGS_rip+8(%rsp),%eax
236 .Lft7: movl %eax,%fs:(%rsi) # EIP
237 testb $TBF_EXCEPTION_ERRCODE,%cl
238 jz 1f
239 subl $4,%esi
240 movl TRAPBOUNCE_error_code(%rdx),%eax
241 .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
242 1:
243 testb $TBF_FAILSAFE,%cl
244 jz 2f
245 subl $4*4,%esi
246 movl %gs,%eax
247 .Lft9: movl %eax,%fs:3*4(%rsi) # GS
248 .Lft10: movl %edi,%fs:2*4(%rsi) # FS
249 movl %es,%eax
250 .Lft11: movl %eax,%fs:1*4(%rsi) # ES
251 movl %ds,%eax
252 .Lft12: movl %eax,%fs:0*4(%rsi) # DS
253 2:
254 /* Rewrite our stack frame and return to guest-OS mode. */
255 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
256 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
257 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
258 mov %fs,UREGS_ss+8(%rsp)
259 movl %esi,UREGS_rsp+8(%rsp)
260 .Lft13: mov %edi,%fs
261 movzwl TRAPBOUNCE_cs(%rdx),%eax
262 /* Null selectors (0-3) are not allowed. */
263 testl $~3,%eax
264 jz domain_crash_synchronous
265 movl %eax,UREGS_cs+8(%rsp)
266 movl TRAPBOUNCE_eip(%rdx),%eax
267 movl %eax,UREGS_rip+8(%rsp)
268 ret
269 .section .fixup,"ax"
270 .Lfx13:
271 xorl %edi,%edi
272 jmp .Lft13
273 .previous
274 .section __ex_table,"a"
275 .quad .Lft1,domain_crash_synchronous , .Lft2,compat_crash_page_fault
276 .quad .Lft3,compat_crash_page_fault_4 , .Lft4,domain_crash_synchronous
277 .quad .Lft5,compat_crash_page_fault_4 , .Lft6,compat_crash_page_fault_8
278 .quad .Lft7,compat_crash_page_fault , .Lft8,compat_crash_page_fault
279 .quad .Lft9,compat_crash_page_fault_12, .Lft10,compat_crash_page_fault_8
280 .quad .Lft11,compat_crash_page_fault_4 , .Lft12,compat_crash_page_fault
281 .quad .Lft13,.Lfx13
282 .previous
284 compat_crash_page_fault_12:
285 addl $4,%esi
286 compat_crash_page_fault_8:
287 addl $4,%esi
288 compat_crash_page_fault_4:
289 addl $4,%esi
290 compat_crash_page_fault:
291 .Lft14: mov %edi,%fs
292 movl %esi,%edi
293 call show_page_walk
294 jmp domain_crash_synchronous
295 .section .fixup,"ax"
296 .Lfx14:
297 xorl %edi,%edi
298 jmp .Lft14
299 .previous
300 .section __ex_table,"a"
301 .quad .Lft14,.Lfx14
302 .previous
304 .section .rodata, "a", @progbits
306 ENTRY(compat_hypercall_table)
307 .quad compat_set_trap_table /* 0 */
308 .quad do_mmu_update
309 .quad compat_set_gdt
310 .quad do_stack_switch
311 .quad compat_set_callbacks
312 .quad do_fpu_taskswitch /* 5 */
313 .quad do_sched_op_compat
314 .quad compat_platform_op
315 .quad do_set_debugreg
316 .quad do_get_debugreg
317 .quad compat_update_descriptor /* 10 */
318 .quad compat_ni_hypercall
319 .quad compat_memory_op
320 .quad compat_multicall
321 .quad compat_update_va_mapping
322 .quad compat_set_timer_op /* 15 */
323 .quad do_event_channel_op_compat
324 .quad compat_xen_version
325 .quad do_console_io
326 .quad compat_physdev_op_compat
327 .quad compat_grant_table_op /* 20 */
328 .quad compat_vm_assist
329 .quad compat_update_va_mapping_otherdomain
330 .quad compat_iret
331 .quad compat_vcpu_op
332 .quad compat_ni_hypercall /* 25 */
333 .quad compat_mmuext_op
334 .quad do_acm_op
335 .quad compat_nmi_op
336 .quad compat_sched_op
337 .quad compat_callback_op /* 30 */
338 .quad compat_xenoprof_op
339 .quad do_event_channel_op
340 .quad compat_physdev_op
341 .quad do_hvm_op
342 .quad do_sysctl /* 35 */
343 .quad do_domctl
344 .quad compat_kexec_op
345 .rept NR_hypercalls-((.-compat_hypercall_table)/8)
346 .quad compat_ni_hypercall
347 .endr
349 ENTRY(compat_hypercall_args_table)
350 .byte 1 /* compat_set_trap_table */ /* 0 */
351 .byte 4 /* compat_mmu_update */
352 .byte 2 /* compat_set_gdt */
353 .byte 2 /* compat_stack_switch */
354 .byte 4 /* compat_set_callbacks */
355 .byte 1 /* compat_fpu_taskswitch */ /* 5 */
356 .byte 2 /* compat_sched_op_compat */
357 .byte 1 /* compat_platform_op */
358 .byte 2 /* compat_set_debugreg */
359 .byte 1 /* compat_get_debugreg */
360 .byte 4 /* compat_update_descriptor */ /* 10 */
361 .byte 0 /* compat_ni_hypercall */
362 .byte 2 /* compat_memory_op */
363 .byte 2 /* compat_multicall */
364 .byte 4 /* compat_update_va_mapping */
365 .byte 2 /* compat_set_timer_op */ /* 15 */
366 .byte 1 /* compat_event_channel_op_compat */
367 .byte 2 /* compat_xen_version */
368 .byte 3 /* compat_console_io */
369 .byte 1 /* compat_physdev_op_compat */
370 .byte 3 /* compat_grant_table_op */ /* 20 */
371 .byte 2 /* compat_vm_assist */
372 .byte 5 /* compat_update_va_mapping_otherdomain */
373 .byte 0 /* compat_iret */
374 .byte 3 /* compat_vcpu_op */
375 .byte 0 /* compat_ni_hypercall */ /* 25 */
376 .byte 4 /* compat_mmuext_op */
377 .byte 1 /* do_acm_op */
378 .byte 2 /* compat_nmi_op */
379 .byte 2 /* compat_sched_op */
380 .byte 2 /* compat_callback_op */ /* 30 */
381 .byte 2 /* compat_xenoprof_op */
382 .byte 2 /* compat_event_channel_op */
383 .byte 2 /* compat_physdev_op */
384 .byte 2 /* do_hvm_op */
385 .byte 1 /* do_sysctl */ /* 35 */
386 .byte 1 /* do_domctl */
387 .byte 2 /* compat_kexec_op */
388 .rept NR_hypercalls-(.-compat_hypercall_args_table)
389 .byte 0 /* compat_ni_hypercall */
390 .endr