ia64/xen-unstable

view xen/arch/x86/x86_64/compat/entry.S @ 17697:e48453f82d30

x86: Change a local label in asm entry stubs to really be local.
This prevents it appearing in crash traces, where it can be a bit confusing.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 22 10:41:49 2008 +0100 (2008-05-22)
parents 23582bcda6e1
children a49673cd23d2
line source
1 /*
2 * Compatibility hypercall routines.
3 */
5 #include <xen/config.h>
6 #include <xen/errno.h>
7 #include <xen/softirq.h>
8 #include <asm/asm_defns.h>
9 #include <asm/apicdef.h>
10 #include <asm/page.h>
11 #include <asm/desc.h>
12 #include <public/xen.h>
14 #define GET_GUEST_REGS(reg) \
15 movq $~(STACK_SIZE-1),reg; \
16 andq %rsp,reg; \
17 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
19 #define GET_CURRENT(reg) \
20 movq $STACK_SIZE-8, reg; \
21 orq %rsp, reg; \
22 andq $~7,reg; \
23 movq (reg),reg;
25 ALIGN
26 ENTRY(compat_hypercall)
27 pushq $0
28 movl $TRAP_syscall,4(%rsp)
29 SAVE_ALL
30 GET_CURRENT(%rbx)
32 cmpl $NR_hypercalls,%eax
33 jae compat_bad_hypercall
34 #ifndef NDEBUG
35 /* Deliberately corrupt parameter regs not used by this hypercall. */
36 pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi
37 pushq UREGS_rbp+5*8(%rsp)
38 leaq compat_hypercall_args_table(%rip),%r10
39 movq $6,%rcx
40 subb (%r10,%rax,1),%cl
41 movq %rsp,%rdi
42 movl $0xDEADBEEF,%eax
43 rep stosq
44 popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */
45 popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */
46 popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */
47 movl UREGS_rax(%rsp),%eax
48 pushq %rax
49 pushq UREGS_rip+8(%rsp)
50 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
51 #else
52 /* Relocate argument registers and zero-extend to 64 bits. */
53 movl %eax,%eax /* Hypercall # */
54 xchgl %ecx,%esi /* Arg 2, Arg 4 */
55 movl %edx,%edx /* Arg 3 */
56 movl %edi,%r8d /* Arg 5 */
57 movl %ebp,%r9d /* Arg 6 */
58 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
59 #define SHADOW_BYTES 0 /* No on-stack shadow state */
60 #endif
61 cmpb $0,tb_init_done(%rip)
62 je 1f
63 call trace_hypercall
64 /* Now restore all the registers that trace_hypercall clobbered */
65 movl UREGS_rax+SHADOW_BYTES(%rsp),%eax /* Hypercall # */
66 movl UREGS_rbx+SHADOW_BYTES(%rsp),%edi /* Arg 1 */
67 movl UREGS_rcx+SHADOW_BYTES(%rsp),%esi /* Arg 2 */
68 movl UREGS_rdx+SHADOW_BYTES(%rsp),%edx /* Arg 3 */
69 movl UREGS_rsi+SHADOW_BYTES(%rsp),%ecx /* Arg 4 */
70 movl UREGS_rdi+SHADOW_BYTES(%rsp),%r8d /* Arg 5 */
71 movl UREGS_rbp+SHADOW_BYTES(%rsp),%r9d /* Arg 6 */
72 #undef SHADOW_BYTES
73 1: leaq compat_hypercall_table(%rip),%r10
74 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
75 callq *(%r10,%rax,8)
76 #ifndef NDEBUG
77 /* Deliberately corrupt parameter regs used by this hypercall. */
78 popq %r10 # Shadow RIP
79 cmpq %r10,UREGS_rip+8(%rsp)
80 popq %rcx # Shadow hypercall index
81 jne compat_skip_clobber /* If RIP has changed then don't clobber. */
82 leaq compat_hypercall_args_table(%rip),%r10
83 movb (%r10,%rcx,1),%cl
84 movl $0xDEADBEEF,%r10d
85 testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
86 cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
87 cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
88 cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
89 cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
90 cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
91 compat_skip_clobber:
92 #endif
93 movl %eax,UREGS_rax(%rsp) # save the return value
95 /* %rbx: struct vcpu */
96 ENTRY(compat_test_all_events)
97 cli # tests must not race interrupts
98 /*compat_test_softirqs:*/
99 movl VCPU_processor(%rbx),%eax
100 shlq $IRQSTAT_shift,%rax
101 leaq irq_stat(%rip),%rcx
102 testl $~0,(%rcx,%rax,1)
103 jnz compat_process_softirqs
104 testb $1,VCPU_nmi_pending(%rbx)
105 jnz compat_process_nmi
106 compat_test_guest_events:
107 movq VCPU_vcpu_info(%rbx),%rax
108 testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
109 jnz compat_restore_all_guest
110 testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
111 jz compat_restore_all_guest
112 /*compat_process_guest_events:*/
113 sti
114 leaq VCPU_trap_bounce(%rbx),%rdx
115 movl VCPU_event_addr(%rbx),%eax
116 movl %eax,TRAPBOUNCE_eip(%rdx)
117 movl VCPU_event_sel(%rbx),%eax
118 movw %ax,TRAPBOUNCE_cs(%rdx)
119 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
120 call compat_create_bounce_frame
121 jmp compat_test_all_events
123 ALIGN
124 /* %rbx: struct vcpu */
125 compat_process_softirqs:
126 sti
127 call do_softirq
128 jmp compat_test_all_events
130 ALIGN
131 /* %rbx: struct vcpu */
132 compat_process_nmi:
133 testb $1,VCPU_nmi_masked(%rbx)
134 jnz compat_test_guest_events
135 sti
136 movb $0,VCPU_nmi_pending(%rbx)
137 call set_guest_nmi_trapbounce
138 testl %eax,%eax
139 jz compat_test_all_events
140 movb $1,VCPU_nmi_masked(%rbx)
141 leaq VCPU_trap_bounce(%rbx),%rdx
142 call compat_create_bounce_frame
143 jmp compat_test_all_events
145 compat_bad_hypercall:
146 movl $-ENOSYS,UREGS_rax(%rsp)
147 jmp compat_test_all_events
149 /* %rbx: struct vcpu, interrupts disabled */
150 compat_restore_all_guest:
151 ASSERT_INTERRUPTS_DISABLED
152 RESTORE_ALL
153 addq $8,%rsp
154 .Lft0: iretq
156 .section .fixup,"ax"
157 .Lfx0: sti
158 SAVE_ALL
159 movq UREGS_error_code(%rsp),%rsi
160 movq %rsp,%rax
161 andq $~0xf,%rsp
162 pushq $__HYPERVISOR_DS # SS
163 pushq %rax # RSP
164 pushfq # RFLAGS
165 pushq $__HYPERVISOR_CS # CS
166 leaq .Ldf0(%rip),%rax
167 pushq %rax # RIP
168 pushq %rsi # error_code/entry_vector
169 jmp handle_exception
170 .Ldf0: GET_CURRENT(%rbx)
171 jmp compat_test_all_events
172 compat_failsafe_callback:
173 GET_CURRENT(%rbx)
174 leaq VCPU_trap_bounce(%rbx),%rdx
175 movl VCPU_failsafe_addr(%rbx),%eax
176 movl %eax,TRAPBOUNCE_eip(%rdx)
177 movl VCPU_failsafe_sel(%rbx),%eax
178 movw %ax,TRAPBOUNCE_cs(%rdx)
179 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
180 btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
181 jnc 1f
182 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
183 1: call compat_create_bounce_frame
184 jmp compat_test_all_events
185 .previous
186 .section __pre_ex_table,"a"
187 .quad .Lft0,.Lfx0
188 .previous
189 .section __ex_table,"a"
190 .quad .Ldf0,compat_failsafe_callback
191 .previous
193 /* %rdx: trap_bounce, %rbx: struct vcpu */
194 ENTRY(compat_post_handle_exception)
195 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
196 jz compat_test_all_events
197 call compat_create_bounce_frame
198 movb $0,TRAPBOUNCE_flags(%rdx)
199 jmp compat_test_all_events
201 ENTRY(compat_syscall)
202 cmpb $0,VCPU_syscall32_disables_events(%rbx)
203 movzwl VCPU_syscall32_sel(%rbx),%esi
204 movq VCPU_syscall32_addr(%rbx),%rax
205 setne %cl
206 leaq VCPU_trap_bounce(%rbx),%rdx
207 testl $~3,%esi
208 leal (,%rcx,TBF_INTERRUPT),%ecx
209 jz 2f
210 1: movq %rax,TRAPBOUNCE_eip(%rdx)
211 movw %si,TRAPBOUNCE_cs(%rdx)
212 movb %cl,TRAPBOUNCE_flags(%rdx)
213 call compat_create_bounce_frame
214 jmp compat_test_all_events
215 2: movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
216 movq VCPU_gp_fault_addr(%rbx),%rax
217 movzwl VCPU_gp_fault_sel(%rbx),%esi
218 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
219 movl $0,TRAPBOUNCE_error_code(%rdx)
220 jmp 1b
222 ENTRY(compat_sysenter)
223 cmpl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
224 movzwl VCPU_sysenter_sel(%rbx),%eax
225 movzwl VCPU_gp_fault_sel(%rbx),%ecx
226 cmovel %ecx,%eax
227 testl $~3,%eax
228 movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
229 cmovzl %ecx,%eax
230 movw %ax,TRAPBOUNCE_cs(%rdx)
231 call compat_create_bounce_frame
232 jmp compat_test_all_events
234 ENTRY(compat_int80_direct_trap)
235 call compat_create_bounce_frame
236 jmp compat_test_all_events
238 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
239 /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
240 /* %rdx: trap_bounce, %rbx: struct vcpu */
241 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
242 compat_create_bounce_frame:
243 ASSERT_INTERRUPTS_ENABLED
244 mov %fs,%edi
245 testb $2,UREGS_cs+8(%rsp)
246 jz 1f
247 /* Push new frame at registered guest-OS stack base. */
248 movl VCPU_kernel_sp(%rbx),%esi
249 .Lft1: mov VCPU_kernel_ss(%rbx),%fs
250 subl $2*4,%esi
251 movl UREGS_rsp+8(%rsp),%eax
252 .Lft2: movl %eax,%fs:(%rsi)
253 movl UREGS_ss+8(%rsp),%eax
254 .Lft3: movl %eax,%fs:4(%rsi)
255 jmp 2f
256 1: /* In kernel context already: push new frame at existing %rsp. */
257 movl UREGS_rsp+8(%rsp),%esi
258 .Lft4: mov UREGS_ss+8(%rsp),%fs
259 2:
260 movb TRAPBOUNCE_flags(%rdx),%cl
261 subl $3*4,%esi
262 movq VCPU_vcpu_info(%rbx),%rax
263 pushq COMPAT_VCPUINFO_upcall_mask(%rax)
264 testb $TBF_INTERRUPT,%cl
265 setnz %ch # TBF_INTERRUPT -> set upcall mask
266 orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
267 popq %rax
268 shll $16,%eax # Bits 16-23: saved_upcall_mask
269 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
270 .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
271 shrl $16,%eax
272 testb %al,%al # Bits 0-7: saved_upcall_mask
273 setz %ch # %ch == !saved_upcall_mask
274 movl UREGS_eflags+8(%rsp),%eax
275 andl $~X86_EFLAGS_IF,%eax
276 addb %ch,%ch # Bit 9 (EFLAGS.IF)
277 orb %ch,%ah # Fold EFLAGS.IF into %eax
278 .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS
279 movl UREGS_rip+8(%rsp),%eax
280 .Lft7: movl %eax,%fs:(%rsi) # EIP
281 testb $TBF_EXCEPTION_ERRCODE,%cl
282 jz 1f
283 subl $4,%esi
284 movl TRAPBOUNCE_error_code(%rdx),%eax
285 .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
286 1:
287 testb $TBF_FAILSAFE,%cl
288 jz 2f
289 subl $4*4,%esi
290 movl %gs,%eax
291 .Lft9: movl %eax,%fs:3*4(%rsi) # GS
292 .Lft10: movl %edi,%fs:2*4(%rsi) # FS
293 movl %es,%eax
294 .Lft11: movl %eax,%fs:1*4(%rsi) # ES
295 movl %ds,%eax
296 .Lft12: movl %eax,%fs:0*4(%rsi) # DS
297 2:
298 /* Rewrite our stack frame and return to guest-OS mode. */
299 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
300 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
301 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
302 mov %fs,UREGS_ss+8(%rsp)
303 movl %esi,UREGS_rsp+8(%rsp)
304 .Lft13: mov %edi,%fs
305 movzwl TRAPBOUNCE_cs(%rdx),%eax
306 /* Null selectors (0-3) are not allowed. */
307 testl $~3,%eax
308 jz domain_crash_synchronous
309 movl %eax,UREGS_cs+8(%rsp)
310 movl TRAPBOUNCE_eip(%rdx),%eax
311 movl %eax,UREGS_rip+8(%rsp)
312 ret
313 .section .fixup,"ax"
314 .Lfx13:
315 xorl %edi,%edi
316 jmp .Lft13
317 .previous
318 .section __ex_table,"a"
319 .quad .Lft1,domain_crash_synchronous , .Lft2,compat_crash_page_fault
320 .quad .Lft3,compat_crash_page_fault_4 , .Lft4,domain_crash_synchronous
321 .quad .Lft5,compat_crash_page_fault_4 , .Lft6,compat_crash_page_fault_8
322 .quad .Lft7,compat_crash_page_fault , .Lft8,compat_crash_page_fault
323 .quad .Lft9,compat_crash_page_fault_12, .Lft10,compat_crash_page_fault_8
324 .quad .Lft11,compat_crash_page_fault_4 , .Lft12,compat_crash_page_fault
325 .quad .Lft13,.Lfx13
326 .previous
328 compat_crash_page_fault_12:
329 addl $4,%esi
330 compat_crash_page_fault_8:
331 addl $4,%esi
332 compat_crash_page_fault_4:
333 addl $4,%esi
334 compat_crash_page_fault:
335 .Lft14: mov %edi,%fs
336 movl %esi,%edi
337 call show_page_walk
338 jmp domain_crash_synchronous
339 .section .fixup,"ax"
340 .Lfx14:
341 xorl %edi,%edi
342 jmp .Lft14
343 .previous
344 .section __ex_table,"a"
345 .quad .Lft14,.Lfx14
346 .previous
348 .section .rodata, "a", @progbits
350 ENTRY(compat_hypercall_table)
351 .quad compat_set_trap_table /* 0 */
352 .quad do_mmu_update
353 .quad compat_set_gdt
354 .quad do_stack_switch
355 .quad compat_set_callbacks
356 .quad do_fpu_taskswitch /* 5 */
357 .quad do_sched_op_compat
358 .quad compat_platform_op
359 .quad do_set_debugreg
360 .quad do_get_debugreg
361 .quad compat_update_descriptor /* 10 */
362 .quad compat_ni_hypercall
363 .quad compat_memory_op
364 .quad compat_multicall
365 .quad compat_update_va_mapping
366 .quad compat_set_timer_op /* 15 */
367 .quad do_event_channel_op_compat
368 .quad compat_xen_version
369 .quad do_console_io
370 .quad compat_physdev_op_compat
371 .quad compat_grant_table_op /* 20 */
372 .quad compat_vm_assist
373 .quad compat_update_va_mapping_otherdomain
374 .quad compat_iret
375 .quad compat_vcpu_op
376 .quad compat_ni_hypercall /* 25 */
377 .quad compat_mmuext_op
378 .quad do_xsm_op
379 .quad compat_nmi_op
380 .quad compat_sched_op
381 .quad compat_callback_op /* 30 */
382 .quad compat_xenoprof_op
383 .quad do_event_channel_op
384 .quad compat_physdev_op
385 .quad do_hvm_op
386 .quad do_sysctl /* 35 */
387 .quad do_domctl
388 .quad compat_kexec_op
389 .rept NR_hypercalls-((.-compat_hypercall_table)/8)
390 .quad compat_ni_hypercall
391 .endr
393 ENTRY(compat_hypercall_args_table)
394 .byte 1 /* compat_set_trap_table */ /* 0 */
395 .byte 4 /* compat_mmu_update */
396 .byte 2 /* compat_set_gdt */
397 .byte 2 /* compat_stack_switch */
398 .byte 4 /* compat_set_callbacks */
399 .byte 1 /* compat_fpu_taskswitch */ /* 5 */
400 .byte 2 /* compat_sched_op_compat */
401 .byte 1 /* compat_platform_op */
402 .byte 2 /* compat_set_debugreg */
403 .byte 1 /* compat_get_debugreg */
404 .byte 4 /* compat_update_descriptor */ /* 10 */
405 .byte 0 /* compat_ni_hypercall */
406 .byte 2 /* compat_memory_op */
407 .byte 2 /* compat_multicall */
408 .byte 4 /* compat_update_va_mapping */
409 .byte 2 /* compat_set_timer_op */ /* 15 */
410 .byte 1 /* compat_event_channel_op_compat */
411 .byte 2 /* compat_xen_version */
412 .byte 3 /* compat_console_io */
413 .byte 1 /* compat_physdev_op_compat */
414 .byte 3 /* compat_grant_table_op */ /* 20 */
415 .byte 2 /* compat_vm_assist */
416 .byte 5 /* compat_update_va_mapping_otherdomain */
417 .byte 0 /* compat_iret */
418 .byte 3 /* compat_vcpu_op */
419 .byte 0 /* compat_ni_hypercall */ /* 25 */
420 .byte 4 /* compat_mmuext_op */
421 .byte 1 /* do_xsm_op */
422 .byte 2 /* compat_nmi_op */
423 .byte 2 /* compat_sched_op */
424 .byte 2 /* compat_callback_op */ /* 30 */
425 .byte 2 /* compat_xenoprof_op */
426 .byte 2 /* compat_event_channel_op */
427 .byte 2 /* compat_physdev_op */
428 .byte 2 /* do_hvm_op */
429 .byte 1 /* do_sysctl */ /* 35 */
430 .byte 1 /* do_domctl */
431 .byte 2 /* compat_kexec_op */
432 .rept NR_hypercalls-(.-compat_hypercall_args_table)
433 .byte 0 /* compat_ni_hypercall */
434 .endr