ia64/xen-unstable

view xen/arch/x86/x86_64/compat/entry.S @ 18886:e34f3e314ecf

x86/32on64: adjust address when converting syscall to fault

The faulting address is at the start of the syscall instruction rather
than at the following one.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 05 15:21:59 2008 +0000 (2008-12-05)
parents a49673cd23d2
children db20b819679c
line source
1 /*
2 * Compatibility hypercall routines.
3 */
5 #include <xen/config.h>
6 #include <xen/errno.h>
7 #include <xen/softirq.h>
8 #include <asm/asm_defns.h>
9 #include <asm/apicdef.h>
10 #include <asm/page.h>
11 #include <asm/desc.h>
12 #include <public/xen.h>
14 #define GET_GUEST_REGS(reg) \
15 movq $~(STACK_SIZE-1),reg; \
16 andq %rsp,reg; \
17 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
19 #define GET_CURRENT(reg) \
20 movq $STACK_SIZE-8, reg; \
21 orq %rsp, reg; \
22 andq $~7,reg; \
23 movq (reg),reg;
25 ALIGN
26 ENTRY(compat_hypercall)
27 pushq $0
28 movl $TRAP_syscall,4(%rsp)
29 SAVE_ALL
30 GET_CURRENT(%rbx)
32 cmpl $NR_hypercalls,%eax
33 jae compat_bad_hypercall
34 #ifndef NDEBUG
35 /* Deliberately corrupt parameter regs not used by this hypercall. */
36 pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi
37 pushq UREGS_rbp+5*8(%rsp)
38 leaq compat_hypercall_args_table(%rip),%r10
39 movq $6,%rcx
40 subb (%r10,%rax,1),%cl
41 movq %rsp,%rdi
42 movl $0xDEADBEEF,%eax
43 rep stosq
44 popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */
45 popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */
46 popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */
47 movl UREGS_rax(%rsp),%eax
48 pushq %rax
49 pushq UREGS_rip+8(%rsp)
50 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
51 #else
52 /* Relocate argument registers and zero-extend to 64 bits. */
53 movl %eax,%eax /* Hypercall # */
54 xchgl %ecx,%esi /* Arg 2, Arg 4 */
55 movl %edx,%edx /* Arg 3 */
56 movl %edi,%r8d /* Arg 5 */
57 movl %ebp,%r9d /* Arg 6 */
58 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
59 #define SHADOW_BYTES 0 /* No on-stack shadow state */
60 #endif
61 cmpb $0,tb_init_done(%rip)
62 je 1f
63 call trace_hypercall
64 /* Now restore all the registers that trace_hypercall clobbered */
65 movl UREGS_rax+SHADOW_BYTES(%rsp),%eax /* Hypercall # */
66 movl UREGS_rbx+SHADOW_BYTES(%rsp),%edi /* Arg 1 */
67 movl UREGS_rcx+SHADOW_BYTES(%rsp),%esi /* Arg 2 */
68 movl UREGS_rdx+SHADOW_BYTES(%rsp),%edx /* Arg 3 */
69 movl UREGS_rsi+SHADOW_BYTES(%rsp),%ecx /* Arg 4 */
70 movl UREGS_rdi+SHADOW_BYTES(%rsp),%r8d /* Arg 5 */
71 movl UREGS_rbp+SHADOW_BYTES(%rsp),%r9d /* Arg 6 */
72 #undef SHADOW_BYTES
73 1: leaq compat_hypercall_table(%rip),%r10
74 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
75 callq *(%r10,%rax,8)
76 #ifndef NDEBUG
77 /* Deliberately corrupt parameter regs used by this hypercall. */
78 popq %r10 # Shadow RIP
79 cmpq %r10,UREGS_rip+8(%rsp)
80 popq %rcx # Shadow hypercall index
81 jne compat_skip_clobber /* If RIP has changed then don't clobber. */
82 leaq compat_hypercall_args_table(%rip),%r10
83 movb (%r10,%rcx,1),%cl
84 movl $0xDEADBEEF,%r10d
85 testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
86 cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
87 cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
88 cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
89 cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
90 cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
91 compat_skip_clobber:
92 #endif
93 movl %eax,UREGS_rax(%rsp) # save the return value
95 /* %rbx: struct vcpu */
96 ENTRY(compat_test_all_events)
97 cli # tests must not race interrupts
98 /*compat_test_softirqs:*/
99 movl VCPU_processor(%rbx),%eax
100 shlq $IRQSTAT_shift,%rax
101 leaq irq_stat(%rip),%rcx
102 testl $~0,(%rcx,%rax,1)
103 jnz compat_process_softirqs
104 testb $1,VCPU_mce_pending(%rbx)
105 jnz compat_process_mce
106 testb $1,VCPU_nmi_pending(%rbx)
107 jnz compat_process_nmi
108 compat_test_guest_events:
109 movq VCPU_vcpu_info(%rbx),%rax
110 testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
111 jnz compat_restore_all_guest
112 testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
113 jz compat_restore_all_guest
114 /*compat_process_guest_events:*/
115 sti
116 leaq VCPU_trap_bounce(%rbx),%rdx
117 movl VCPU_event_addr(%rbx),%eax
118 movl %eax,TRAPBOUNCE_eip(%rdx)
119 movl VCPU_event_sel(%rbx),%eax
120 movw %ax,TRAPBOUNCE_cs(%rdx)
121 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
122 call compat_create_bounce_frame
123 jmp compat_test_all_events
125 ALIGN
126 /* %rbx: struct vcpu */
127 compat_process_softirqs:
128 sti
129 call do_softirq
130 jmp compat_test_all_events
132 ALIGN
133 /* %rbx: struct vcpu */
134 compat_process_mce:
135 cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
136 jae compat_test_guest_events
137 sti
138 movb $0,VCPU_mce_pending(%rbx)
139 call set_guest_machinecheck_trapbounce
140 testl %eax,%eax
141 jz compat_test_all_events
142 movw VCPU_trap_priority(%rbx),%dx # safe priority for the
143 movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
144 movw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
145 jmp compat_process_trap
147 ALIGN
148 /* %rbx: struct vcpu */
149 compat_process_nmi:
150 cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
151 jae compat_test_guest_events
152 sti
153 movb $0,VCPU_nmi_pending(%rbx)
154 call set_guest_nmi_trapbounce
155 testl %eax,%eax
156 jz compat_test_all_events
157 movw VCPU_trap_priority(%rbx),%dx # safe priority for the
158 movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
159 movw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
160 /* FALLTHROUGH */
161 compat_process_trap:
162 leaq VCPU_trap_bounce(%rbx),%rdx
163 call compat_create_bounce_frame
164 jmp compat_test_all_events
166 compat_bad_hypercall:
167 movl $-ENOSYS,UREGS_rax(%rsp)
168 jmp compat_test_all_events
170 /* %rbx: struct vcpu, interrupts disabled */
171 compat_restore_all_guest:
172 ASSERT_INTERRUPTS_DISABLED
173 RESTORE_ALL
174 addq $8,%rsp
175 .Lft0: iretq
177 .section .fixup,"ax"
178 .Lfx0: sti
179 SAVE_ALL
180 movq UREGS_error_code(%rsp),%rsi
181 movq %rsp,%rax
182 andq $~0xf,%rsp
183 pushq $__HYPERVISOR_DS # SS
184 pushq %rax # RSP
185 pushfq # RFLAGS
186 pushq $__HYPERVISOR_CS # CS
187 leaq .Ldf0(%rip),%rax
188 pushq %rax # RIP
189 pushq %rsi # error_code/entry_vector
190 jmp handle_exception
191 .Ldf0: GET_CURRENT(%rbx)
192 jmp compat_test_all_events
193 compat_failsafe_callback:
194 GET_CURRENT(%rbx)
195 leaq VCPU_trap_bounce(%rbx),%rdx
196 movl VCPU_failsafe_addr(%rbx),%eax
197 movl %eax,TRAPBOUNCE_eip(%rdx)
198 movl VCPU_failsafe_sel(%rbx),%eax
199 movw %ax,TRAPBOUNCE_cs(%rdx)
200 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
201 btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
202 jnc 1f
203 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
204 1: call compat_create_bounce_frame
205 jmp compat_test_all_events
206 .previous
207 .section __pre_ex_table,"a"
208 .quad .Lft0,.Lfx0
209 .previous
210 .section __ex_table,"a"
211 .quad .Ldf0,compat_failsafe_callback
212 .previous
214 /* %rdx: trap_bounce, %rbx: struct vcpu */
215 ENTRY(compat_post_handle_exception)
216 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
217 jz compat_test_all_events
218 call compat_create_bounce_frame
219 movb $0,TRAPBOUNCE_flags(%rdx)
220 jmp compat_test_all_events
222 ENTRY(compat_syscall)
223 cmpb $0,VCPU_syscall32_disables_events(%rbx)
224 movzwl VCPU_syscall32_sel(%rbx),%esi
225 movq VCPU_syscall32_addr(%rbx),%rax
226 setne %cl
227 leaq VCPU_trap_bounce(%rbx),%rdx
228 testl $~3,%esi
229 leal (,%rcx,TBF_INTERRUPT),%ecx
230 jz 2f
231 1: movq %rax,TRAPBOUNCE_eip(%rdx)
232 movw %si,TRAPBOUNCE_cs(%rdx)
233 movb %cl,TRAPBOUNCE_flags(%rdx)
234 call compat_create_bounce_frame
235 jmp compat_test_all_events
236 2: movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
237 subl $2,UREGS_rip(%rsp)
238 movq VCPU_gp_fault_addr(%rbx),%rax
239 movzwl VCPU_gp_fault_sel(%rbx),%esi
240 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
241 movl $0,TRAPBOUNCE_error_code(%rdx)
242 jmp 1b
244 ENTRY(compat_sysenter)
245 cmpl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
246 movzwl VCPU_sysenter_sel(%rbx),%eax
247 movzwl VCPU_gp_fault_sel(%rbx),%ecx
248 cmovel %ecx,%eax
249 testl $~3,%eax
250 movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
251 cmovzl %ecx,%eax
252 movw %ax,TRAPBOUNCE_cs(%rdx)
253 call compat_create_bounce_frame
254 jmp compat_test_all_events
256 ENTRY(compat_int80_direct_trap)
257 call compat_create_bounce_frame
258 jmp compat_test_all_events
260 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
261 /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
262 /* %rdx: trap_bounce, %rbx: struct vcpu */
263 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
264 compat_create_bounce_frame:
265 ASSERT_INTERRUPTS_ENABLED
266 mov %fs,%edi
267 testb $2,UREGS_cs+8(%rsp)
268 jz 1f
269 /* Push new frame at registered guest-OS stack base. */
270 movl VCPU_kernel_sp(%rbx),%esi
271 .Lft1: mov VCPU_kernel_ss(%rbx),%fs
272 subl $2*4,%esi
273 movl UREGS_rsp+8(%rsp),%eax
274 .Lft2: movl %eax,%fs:(%rsi)
275 movl UREGS_ss+8(%rsp),%eax
276 .Lft3: movl %eax,%fs:4(%rsi)
277 jmp 2f
278 1: /* In kernel context already: push new frame at existing %rsp. */
279 movl UREGS_rsp+8(%rsp),%esi
280 .Lft4: mov UREGS_ss+8(%rsp),%fs
281 2:
282 movb TRAPBOUNCE_flags(%rdx),%cl
283 subl $3*4,%esi
284 movq VCPU_vcpu_info(%rbx),%rax
285 pushq COMPAT_VCPUINFO_upcall_mask(%rax)
286 testb $TBF_INTERRUPT,%cl
287 setnz %ch # TBF_INTERRUPT -> set upcall mask
288 orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
289 popq %rax
290 shll $16,%eax # Bits 16-23: saved_upcall_mask
291 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
292 .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
293 shrl $16,%eax
294 testb %al,%al # Bits 0-7: saved_upcall_mask
295 setz %ch # %ch == !saved_upcall_mask
296 movl UREGS_eflags+8(%rsp),%eax
297 andl $~X86_EFLAGS_IF,%eax
298 addb %ch,%ch # Bit 9 (EFLAGS.IF)
299 orb %ch,%ah # Fold EFLAGS.IF into %eax
300 .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS
301 movl UREGS_rip+8(%rsp),%eax
302 .Lft7: movl %eax,%fs:(%rsi) # EIP
303 testb $TBF_EXCEPTION_ERRCODE,%cl
304 jz 1f
305 subl $4,%esi
306 movl TRAPBOUNCE_error_code(%rdx),%eax
307 .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
308 1:
309 testb $TBF_FAILSAFE,%cl
310 jz 2f
311 subl $4*4,%esi
312 movl %gs,%eax
313 .Lft9: movl %eax,%fs:3*4(%rsi) # GS
314 .Lft10: movl %edi,%fs:2*4(%rsi) # FS
315 movl %es,%eax
316 .Lft11: movl %eax,%fs:1*4(%rsi) # ES
317 movl %ds,%eax
318 .Lft12: movl %eax,%fs:0*4(%rsi) # DS
319 2:
320 /* Rewrite our stack frame and return to guest-OS mode. */
321 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
322 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
323 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
324 mov %fs,UREGS_ss+8(%rsp)
325 movl %esi,UREGS_rsp+8(%rsp)
326 .Lft13: mov %edi,%fs
327 movzwl TRAPBOUNCE_cs(%rdx),%eax
328 /* Null selectors (0-3) are not allowed. */
329 testl $~3,%eax
330 jz domain_crash_synchronous
331 movl %eax,UREGS_cs+8(%rsp)
332 movl TRAPBOUNCE_eip(%rdx),%eax
333 movl %eax,UREGS_rip+8(%rsp)
334 ret
335 .section .fixup,"ax"
336 .Lfx13:
337 xorl %edi,%edi
338 jmp .Lft13
339 .previous
340 .section __ex_table,"a"
341 .quad .Lft1,domain_crash_synchronous , .Lft2,compat_crash_page_fault
342 .quad .Lft3,compat_crash_page_fault_4 , .Lft4,domain_crash_synchronous
343 .quad .Lft5,compat_crash_page_fault_4 , .Lft6,compat_crash_page_fault_8
344 .quad .Lft7,compat_crash_page_fault , .Lft8,compat_crash_page_fault
345 .quad .Lft9,compat_crash_page_fault_12, .Lft10,compat_crash_page_fault_8
346 .quad .Lft11,compat_crash_page_fault_4 , .Lft12,compat_crash_page_fault
347 .quad .Lft13,.Lfx13
348 .previous
350 compat_crash_page_fault_12:
351 addl $4,%esi
352 compat_crash_page_fault_8:
353 addl $4,%esi
354 compat_crash_page_fault_4:
355 addl $4,%esi
356 compat_crash_page_fault:
357 .Lft14: mov %edi,%fs
358 movl %esi,%edi
359 call show_page_walk
360 jmp domain_crash_synchronous
361 .section .fixup,"ax"
362 .Lfx14:
363 xorl %edi,%edi
364 jmp .Lft14
365 .previous
366 .section __ex_table,"a"
367 .quad .Lft14,.Lfx14
368 .previous
370 .section .rodata, "a", @progbits
372 ENTRY(compat_hypercall_table)
373 .quad compat_set_trap_table /* 0 */
374 .quad do_mmu_update
375 .quad compat_set_gdt
376 .quad do_stack_switch
377 .quad compat_set_callbacks
378 .quad do_fpu_taskswitch /* 5 */
379 .quad do_sched_op_compat
380 .quad compat_platform_op
381 .quad do_set_debugreg
382 .quad do_get_debugreg
383 .quad compat_update_descriptor /* 10 */
384 .quad compat_ni_hypercall
385 .quad compat_memory_op
386 .quad compat_multicall
387 .quad compat_update_va_mapping
388 .quad compat_set_timer_op /* 15 */
389 .quad do_event_channel_op_compat
390 .quad compat_xen_version
391 .quad do_console_io
392 .quad compat_physdev_op_compat
393 .quad compat_grant_table_op /* 20 */
394 .quad compat_vm_assist
395 .quad compat_update_va_mapping_otherdomain
396 .quad compat_iret
397 .quad compat_vcpu_op
398 .quad compat_ni_hypercall /* 25 */
399 .quad compat_mmuext_op
400 .quad do_xsm_op
401 .quad compat_nmi_op
402 .quad compat_sched_op
403 .quad compat_callback_op /* 30 */
404 .quad compat_xenoprof_op
405 .quad do_event_channel_op
406 .quad compat_physdev_op
407 .quad do_hvm_op
408 .quad do_sysctl /* 35 */
409 .quad do_domctl
410 .quad compat_kexec_op
411 .rept __HYPERVISOR_arch_0-((.-compat_hypercall_table)/8)
412 .quad compat_ni_hypercall
413 .endr
414 .quad do_mca /* 48 */
415 .rept NR_hypercalls-((.-compat_hypercall_table)/8)
416 .quad compat_ni_hypercall
417 .endr
419 ENTRY(compat_hypercall_args_table)
420 .byte 1 /* compat_set_trap_table */ /* 0 */
421 .byte 4 /* compat_mmu_update */
422 .byte 2 /* compat_set_gdt */
423 .byte 2 /* compat_stack_switch */
424 .byte 4 /* compat_set_callbacks */
425 .byte 1 /* compat_fpu_taskswitch */ /* 5 */
426 .byte 2 /* compat_sched_op_compat */
427 .byte 1 /* compat_platform_op */
428 .byte 2 /* compat_set_debugreg */
429 .byte 1 /* compat_get_debugreg */
430 .byte 4 /* compat_update_descriptor */ /* 10 */
431 .byte 0 /* compat_ni_hypercall */
432 .byte 2 /* compat_memory_op */
433 .byte 2 /* compat_multicall */
434 .byte 4 /* compat_update_va_mapping */
435 .byte 2 /* compat_set_timer_op */ /* 15 */
436 .byte 1 /* compat_event_channel_op_compat */
437 .byte 2 /* compat_xen_version */
438 .byte 3 /* compat_console_io */
439 .byte 1 /* compat_physdev_op_compat */
440 .byte 3 /* compat_grant_table_op */ /* 20 */
441 .byte 2 /* compat_vm_assist */
442 .byte 5 /* compat_update_va_mapping_otherdomain */
443 .byte 0 /* compat_iret */
444 .byte 3 /* compat_vcpu_op */
445 .byte 0 /* compat_ni_hypercall */ /* 25 */
446 .byte 4 /* compat_mmuext_op */
447 .byte 1 /* do_xsm_op */
448 .byte 2 /* compat_nmi_op */
449 .byte 2 /* compat_sched_op */
450 .byte 2 /* compat_callback_op */ /* 30 */
451 .byte 2 /* compat_xenoprof_op */
452 .byte 2 /* compat_event_channel_op */
453 .byte 2 /* compat_physdev_op */
454 .byte 2 /* do_hvm_op */
455 .byte 1 /* do_sysctl */ /* 35 */
456 .byte 1 /* do_domctl */
457 .byte 2 /* compat_kexec_op */
458 .rept __HYPERVISOR_arch_0-(.-compat_hypercall_args_table)
459 .byte 0 /* compat_ni_hypercall */
460 .endr
461 .byte 1 /* do_mca */
462 .rept NR_hypercalls-(.-compat_hypercall_args_table)
463 .byte 0 /* compat_ni_hypercall */
464 .endr