ia64/xen-unstable

view xen/arch/x86/x86_64/compat/entry.S @ 16263:23582bcda6e1

x86: Clean up NMI delivery logic. Allow set_trap_table vector 2 to be
specified as not disabling event delivery, just like any other vector.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 29 09:49:39 2007 +0000 (2007-10-29)
parents aeebd173c3fa
children e48453f82d30
line source
1 /*
2 * Compatibility hypercall routines.
3 */
5 #include <xen/config.h>
6 #include <xen/errno.h>
7 #include <xen/softirq.h>
8 #include <asm/asm_defns.h>
9 #include <asm/apicdef.h>
10 #include <asm/page.h>
11 #include <asm/desc.h>
12 #include <public/xen.h>
14 #define GET_GUEST_REGS(reg) \
15 movq $~(STACK_SIZE-1),reg; \
16 andq %rsp,reg; \
17 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
19 #define GET_CURRENT(reg) \
20 movq $STACK_SIZE-8, reg; \
21 orq %rsp, reg; \
22 andq $~7,reg; \
23 movq (reg),reg;
25 ALIGN
26 ENTRY(compat_hypercall)
27 pushq $0
28 movl $TRAP_syscall,4(%rsp)
29 SAVE_ALL
30 GET_CURRENT(%rbx)
32 cmpl $NR_hypercalls,%eax
33 jae compat_bad_hypercall
34 #ifndef NDEBUG
35 /* Deliberately corrupt parameter regs not used by this hypercall. */
36 pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi
37 pushq UREGS_rbp+5*8(%rsp)
38 leaq compat_hypercall_args_table(%rip),%r10
39 movq $6,%rcx
40 subb (%r10,%rax,1),%cl
41 movq %rsp,%rdi
42 movl $0xDEADBEEF,%eax
43 rep stosq
44 popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */
45 popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */
46 popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */
47 movl UREGS_rax(%rsp),%eax
48 pushq %rax
49 pushq UREGS_rip+8(%rsp)
50 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
51 #else
52 /* Relocate argument registers and zero-extend to 64 bits. */
53 movl %eax,%eax /* Hypercall # */
54 xchgl %ecx,%esi /* Arg 2, Arg 4 */
55 movl %edx,%edx /* Arg 3 */
56 movl %edi,%r8d /* Arg 5 */
57 movl %ebp,%r9d /* Arg 6 */
58 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
59 #define SHADOW_BYTES 0 /* No on-stack shadow state */
60 #endif
61 cmpb $0,tb_init_done(%rip)
62 je compat_tracing_off
63 call trace_hypercall
64 /* Now restore all the registers that trace_hypercall clobbered */
65 movl UREGS_rax+SHADOW_BYTES(%rsp),%eax /* Hypercall # */
66 movl UREGS_rbx+SHADOW_BYTES(%rsp),%edi /* Arg 1 */
67 movl UREGS_rcx+SHADOW_BYTES(%rsp),%esi /* Arg 2 */
68 movl UREGS_rdx+SHADOW_BYTES(%rsp),%edx /* Arg 3 */
69 movl UREGS_rsi+SHADOW_BYTES(%rsp),%ecx /* Arg 4 */
70 movl UREGS_rdi+SHADOW_BYTES(%rsp),%r8d /* Arg 5 */
71 movl UREGS_rbp+SHADOW_BYTES(%rsp),%r9d /* Arg 6 */
72 #undef SHADOW_BYTES
73 compat_tracing_off:
74 leaq compat_hypercall_table(%rip),%r10
75 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
76 callq *(%r10,%rax,8)
77 #ifndef NDEBUG
78 /* Deliberately corrupt parameter regs used by this hypercall. */
79 popq %r10 # Shadow RIP
80 cmpq %r10,UREGS_rip+8(%rsp)
81 popq %rcx # Shadow hypercall index
82 jne compat_skip_clobber /* If RIP has changed then don't clobber. */
83 leaq compat_hypercall_args_table(%rip),%r10
84 movb (%r10,%rcx,1),%cl
85 movl $0xDEADBEEF,%r10d
86 testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
87 cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
88 cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
89 cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
90 cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
91 cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
92 compat_skip_clobber:
93 #endif
94 movl %eax,UREGS_rax(%rsp) # save the return value
96 /* %rbx: struct vcpu */
97 ENTRY(compat_test_all_events)
98 cli # tests must not race interrupts
99 /*compat_test_softirqs:*/
100 movl VCPU_processor(%rbx),%eax
101 shlq $IRQSTAT_shift,%rax
102 leaq irq_stat(%rip),%rcx
103 testl $~0,(%rcx,%rax,1)
104 jnz compat_process_softirqs
105 testb $1,VCPU_nmi_pending(%rbx)
106 jnz compat_process_nmi
107 compat_test_guest_events:
108 movq VCPU_vcpu_info(%rbx),%rax
109 testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
110 jnz compat_restore_all_guest
111 testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
112 jz compat_restore_all_guest
113 /*compat_process_guest_events:*/
114 sti
115 leaq VCPU_trap_bounce(%rbx),%rdx
116 movl VCPU_event_addr(%rbx),%eax
117 movl %eax,TRAPBOUNCE_eip(%rdx)
118 movl VCPU_event_sel(%rbx),%eax
119 movw %ax,TRAPBOUNCE_cs(%rdx)
120 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
121 call compat_create_bounce_frame
122 jmp compat_test_all_events
124 ALIGN
125 /* %rbx: struct vcpu */
126 compat_process_softirqs:
127 sti
128 call do_softirq
129 jmp compat_test_all_events
131 ALIGN
132 /* %rbx: struct vcpu */
133 compat_process_nmi:
134 testb $1,VCPU_nmi_masked(%rbx)
135 jnz compat_test_guest_events
136 sti
137 movb $0,VCPU_nmi_pending(%rbx)
138 call set_guest_nmi_trapbounce
139 testl %eax,%eax
140 jz compat_test_all_events
141 movb $1,VCPU_nmi_masked(%rbx)
142 leaq VCPU_trap_bounce(%rbx),%rdx
143 call compat_create_bounce_frame
144 jmp compat_test_all_events
146 compat_bad_hypercall:
147 movl $-ENOSYS,UREGS_rax(%rsp)
148 jmp compat_test_all_events
150 /* %rbx: struct vcpu, interrupts disabled */
151 compat_restore_all_guest:
152 ASSERT_INTERRUPTS_DISABLED
153 RESTORE_ALL
154 addq $8,%rsp
155 .Lft0: iretq
157 .section .fixup,"ax"
158 .Lfx0: sti
159 SAVE_ALL
160 movq UREGS_error_code(%rsp),%rsi
161 movq %rsp,%rax
162 andq $~0xf,%rsp
163 pushq $__HYPERVISOR_DS # SS
164 pushq %rax # RSP
165 pushfq # RFLAGS
166 pushq $__HYPERVISOR_CS # CS
167 leaq .Ldf0(%rip),%rax
168 pushq %rax # RIP
169 pushq %rsi # error_code/entry_vector
170 jmp handle_exception
171 .Ldf0: GET_CURRENT(%rbx)
172 jmp compat_test_all_events
173 compat_failsafe_callback:
174 GET_CURRENT(%rbx)
175 leaq VCPU_trap_bounce(%rbx),%rdx
176 movl VCPU_failsafe_addr(%rbx),%eax
177 movl %eax,TRAPBOUNCE_eip(%rdx)
178 movl VCPU_failsafe_sel(%rbx),%eax
179 movw %ax,TRAPBOUNCE_cs(%rdx)
180 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
181 btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
182 jnc 1f
183 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
184 1: call compat_create_bounce_frame
185 jmp compat_test_all_events
186 .previous
187 .section __pre_ex_table,"a"
188 .quad .Lft0,.Lfx0
189 .previous
190 .section __ex_table,"a"
191 .quad .Ldf0,compat_failsafe_callback
192 .previous
194 /* %rdx: trap_bounce, %rbx: struct vcpu */
195 ENTRY(compat_post_handle_exception)
196 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
197 jz compat_test_all_events
198 call compat_create_bounce_frame
199 movb $0,TRAPBOUNCE_flags(%rdx)
200 jmp compat_test_all_events
202 ENTRY(compat_syscall)
203 cmpb $0,VCPU_syscall32_disables_events(%rbx)
204 movzwl VCPU_syscall32_sel(%rbx),%esi
205 movq VCPU_syscall32_addr(%rbx),%rax
206 setne %cl
207 leaq VCPU_trap_bounce(%rbx),%rdx
208 testl $~3,%esi
209 leal (,%rcx,TBF_INTERRUPT),%ecx
210 jz 2f
211 1: movq %rax,TRAPBOUNCE_eip(%rdx)
212 movw %si,TRAPBOUNCE_cs(%rdx)
213 movb %cl,TRAPBOUNCE_flags(%rdx)
214 call compat_create_bounce_frame
215 jmp compat_test_all_events
216 2: movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
217 movq VCPU_gp_fault_addr(%rbx),%rax
218 movzwl VCPU_gp_fault_sel(%rbx),%esi
219 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
220 movl $0,TRAPBOUNCE_error_code(%rdx)
221 jmp 1b
223 ENTRY(compat_sysenter)
224 cmpl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
225 movzwl VCPU_sysenter_sel(%rbx),%eax
226 movzwl VCPU_gp_fault_sel(%rbx),%ecx
227 cmovel %ecx,%eax
228 testl $~3,%eax
229 movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
230 cmovzl %ecx,%eax
231 movw %ax,TRAPBOUNCE_cs(%rdx)
232 call compat_create_bounce_frame
233 jmp compat_test_all_events
235 ENTRY(compat_int80_direct_trap)
236 call compat_create_bounce_frame
237 jmp compat_test_all_events
239 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
240 /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
241 /* %rdx: trap_bounce, %rbx: struct vcpu */
242 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
243 compat_create_bounce_frame:
244 ASSERT_INTERRUPTS_ENABLED
245 mov %fs,%edi
246 testb $2,UREGS_cs+8(%rsp)
247 jz 1f
248 /* Push new frame at registered guest-OS stack base. */
249 movl VCPU_kernel_sp(%rbx),%esi
250 .Lft1: mov VCPU_kernel_ss(%rbx),%fs
251 subl $2*4,%esi
252 movl UREGS_rsp+8(%rsp),%eax
253 .Lft2: movl %eax,%fs:(%rsi)
254 movl UREGS_ss+8(%rsp),%eax
255 .Lft3: movl %eax,%fs:4(%rsi)
256 jmp 2f
257 1: /* In kernel context already: push new frame at existing %rsp. */
258 movl UREGS_rsp+8(%rsp),%esi
259 .Lft4: mov UREGS_ss+8(%rsp),%fs
260 2:
261 movb TRAPBOUNCE_flags(%rdx),%cl
262 subl $3*4,%esi
263 movq VCPU_vcpu_info(%rbx),%rax
264 pushq COMPAT_VCPUINFO_upcall_mask(%rax)
265 testb $TBF_INTERRUPT,%cl
266 setnz %ch # TBF_INTERRUPT -> set upcall mask
267 orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
268 popq %rax
269 shll $16,%eax # Bits 16-23: saved_upcall_mask
270 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
271 .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
272 shrl $16,%eax
273 testb %al,%al # Bits 0-7: saved_upcall_mask
274 setz %ch # %ch == !saved_upcall_mask
275 movl UREGS_eflags+8(%rsp),%eax
276 andl $~X86_EFLAGS_IF,%eax
277 addb %ch,%ch # Bit 9 (EFLAGS.IF)
278 orb %ch,%ah # Fold EFLAGS.IF into %eax
279 .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS
280 movl UREGS_rip+8(%rsp),%eax
281 .Lft7: movl %eax,%fs:(%rsi) # EIP
282 testb $TBF_EXCEPTION_ERRCODE,%cl
283 jz 1f
284 subl $4,%esi
285 movl TRAPBOUNCE_error_code(%rdx),%eax
286 .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
287 1:
288 testb $TBF_FAILSAFE,%cl
289 jz 2f
290 subl $4*4,%esi
291 movl %gs,%eax
292 .Lft9: movl %eax,%fs:3*4(%rsi) # GS
293 .Lft10: movl %edi,%fs:2*4(%rsi) # FS
294 movl %es,%eax
295 .Lft11: movl %eax,%fs:1*4(%rsi) # ES
296 movl %ds,%eax
297 .Lft12: movl %eax,%fs:0*4(%rsi) # DS
298 2:
299 /* Rewrite our stack frame and return to guest-OS mode. */
300 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
301 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
302 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
303 mov %fs,UREGS_ss+8(%rsp)
304 movl %esi,UREGS_rsp+8(%rsp)
305 .Lft13: mov %edi,%fs
306 movzwl TRAPBOUNCE_cs(%rdx),%eax
307 /* Null selectors (0-3) are not allowed. */
308 testl $~3,%eax
309 jz domain_crash_synchronous
310 movl %eax,UREGS_cs+8(%rsp)
311 movl TRAPBOUNCE_eip(%rdx),%eax
312 movl %eax,UREGS_rip+8(%rsp)
313 ret
314 .section .fixup,"ax"
315 .Lfx13:
316 xorl %edi,%edi
317 jmp .Lft13
318 .previous
319 .section __ex_table,"a"
320 .quad .Lft1,domain_crash_synchronous , .Lft2,compat_crash_page_fault
321 .quad .Lft3,compat_crash_page_fault_4 , .Lft4,domain_crash_synchronous
322 .quad .Lft5,compat_crash_page_fault_4 , .Lft6,compat_crash_page_fault_8
323 .quad .Lft7,compat_crash_page_fault , .Lft8,compat_crash_page_fault
324 .quad .Lft9,compat_crash_page_fault_12, .Lft10,compat_crash_page_fault_8
325 .quad .Lft11,compat_crash_page_fault_4 , .Lft12,compat_crash_page_fault
326 .quad .Lft13,.Lfx13
327 .previous
329 compat_crash_page_fault_12:
330 addl $4,%esi
331 compat_crash_page_fault_8:
332 addl $4,%esi
333 compat_crash_page_fault_4:
334 addl $4,%esi
335 compat_crash_page_fault:
336 .Lft14: mov %edi,%fs
337 movl %esi,%edi
338 call show_page_walk
339 jmp domain_crash_synchronous
340 .section .fixup,"ax"
341 .Lfx14:
342 xorl %edi,%edi
343 jmp .Lft14
344 .previous
345 .section __ex_table,"a"
346 .quad .Lft14,.Lfx14
347 .previous
349 .section .rodata, "a", @progbits
351 ENTRY(compat_hypercall_table)
352 .quad compat_set_trap_table /* 0 */
353 .quad do_mmu_update
354 .quad compat_set_gdt
355 .quad do_stack_switch
356 .quad compat_set_callbacks
357 .quad do_fpu_taskswitch /* 5 */
358 .quad do_sched_op_compat
359 .quad compat_platform_op
360 .quad do_set_debugreg
361 .quad do_get_debugreg
362 .quad compat_update_descriptor /* 10 */
363 .quad compat_ni_hypercall
364 .quad compat_memory_op
365 .quad compat_multicall
366 .quad compat_update_va_mapping
367 .quad compat_set_timer_op /* 15 */
368 .quad do_event_channel_op_compat
369 .quad compat_xen_version
370 .quad do_console_io
371 .quad compat_physdev_op_compat
372 .quad compat_grant_table_op /* 20 */
373 .quad compat_vm_assist
374 .quad compat_update_va_mapping_otherdomain
375 .quad compat_iret
376 .quad compat_vcpu_op
377 .quad compat_ni_hypercall /* 25 */
378 .quad compat_mmuext_op
379 .quad do_xsm_op
380 .quad compat_nmi_op
381 .quad compat_sched_op
382 .quad compat_callback_op /* 30 */
383 .quad compat_xenoprof_op
384 .quad do_event_channel_op
385 .quad compat_physdev_op
386 .quad do_hvm_op
387 .quad do_sysctl /* 35 */
388 .quad do_domctl
389 .quad compat_kexec_op
390 .rept NR_hypercalls-((.-compat_hypercall_table)/8)
391 .quad compat_ni_hypercall
392 .endr
394 ENTRY(compat_hypercall_args_table)
395 .byte 1 /* compat_set_trap_table */ /* 0 */
396 .byte 4 /* compat_mmu_update */
397 .byte 2 /* compat_set_gdt */
398 .byte 2 /* compat_stack_switch */
399 .byte 4 /* compat_set_callbacks */
400 .byte 1 /* compat_fpu_taskswitch */ /* 5 */
401 .byte 2 /* compat_sched_op_compat */
402 .byte 1 /* compat_platform_op */
403 .byte 2 /* compat_set_debugreg */
404 .byte 1 /* compat_get_debugreg */
405 .byte 4 /* compat_update_descriptor */ /* 10 */
406 .byte 0 /* compat_ni_hypercall */
407 .byte 2 /* compat_memory_op */
408 .byte 2 /* compat_multicall */
409 .byte 4 /* compat_update_va_mapping */
410 .byte 2 /* compat_set_timer_op */ /* 15 */
411 .byte 1 /* compat_event_channel_op_compat */
412 .byte 2 /* compat_xen_version */
413 .byte 3 /* compat_console_io */
414 .byte 1 /* compat_physdev_op_compat */
415 .byte 3 /* compat_grant_table_op */ /* 20 */
416 .byte 2 /* compat_vm_assist */
417 .byte 5 /* compat_update_va_mapping_otherdomain */
418 .byte 0 /* compat_iret */
419 .byte 3 /* compat_vcpu_op */
420 .byte 0 /* compat_ni_hypercall */ /* 25 */
421 .byte 4 /* compat_mmuext_op */
422 .byte 1 /* do_xsm_op */
423 .byte 2 /* compat_nmi_op */
424 .byte 2 /* compat_sched_op */
425 .byte 2 /* compat_callback_op */ /* 30 */
426 .byte 2 /* compat_xenoprof_op */
427 .byte 2 /* compat_event_channel_op */
428 .byte 2 /* compat_physdev_op */
429 .byte 2 /* do_hvm_op */
430 .byte 1 /* do_sysctl */ /* 35 */
431 .byte 1 /* do_domctl */
432 .byte 2 /* compat_kexec_op */
433 .rept NR_hypercalls-(.-compat_hypercall_args_table)
434 .byte 0 /* compat_ni_hypercall */
435 .endr