ia64/xen-unstable

view xen/arch/x86/x86_64/compat/entry.S @ 16185:42d8dadb5864

x86: Allow NMI callback CS to be specified via set_trap_table()
hypercall.
Based on a patch by Jan Beulich.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 22 13:04:32 2007 +0100 (2007-10-22)
parents a330276d8c90
children 4970cbf9b19e
line source
1 /*
2 * Compatibility hypercall routines.
3 */
5 #include <xen/config.h>
6 #include <xen/errno.h>
7 #include <xen/softirq.h>
8 #include <asm/asm_defns.h>
9 #include <asm/apicdef.h>
10 #include <asm/page.h>
11 #include <asm/desc.h>
12 #include <public/xen.h>
14 #define GET_GUEST_REGS(reg) \
15 movq $~(STACK_SIZE-1),reg; \
16 andq %rsp,reg; \
17 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
19 #define GET_CURRENT(reg) \
20 movq $STACK_SIZE-8, reg; \
21 orq %rsp, reg; \
22 andq $~7,reg; \
23 movq (reg),reg;
25 ALIGN
26 ENTRY(compat_hypercall)
27 pushq $0
28 movl $TRAP_syscall,4(%rsp)
29 SAVE_ALL
30 GET_CURRENT(%rbx)
32 cmpl $NR_hypercalls,%eax
33 jae compat_bad_hypercall
34 #ifndef NDEBUG
35 /* Deliberately corrupt parameter regs not used by this hypercall. */
36 pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi
37 pushq UREGS_rbp+5*8(%rsp)
38 leaq compat_hypercall_args_table(%rip),%r10
39 movq $6,%rcx
40 subb (%r10,%rax,1),%cl
41 movq %rsp,%rdi
42 movl $0xDEADBEEF,%eax
43 rep stosq
44 popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */
45 popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */
46 popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */
47 movl UREGS_rax(%rsp),%eax
48 pushq %rax
49 pushq UREGS_rip+8(%rsp)
50 #else
51 /* Relocate argument registers and zero-extend to 64 bits. */
52 movl %eax,%eax /* Hypercall # */
53 xchgl %ecx,%esi /* Arg 2, Arg 4 */
54 movl %edx,%edx /* Arg 3 */
55 movl %edi,%r8d /* Arg 5 */
56 movl %ebp,%r9d /* Arg 6 */
57 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
58 #endif
59 cmpb $0,tb_init_done(%rip)
60 je compat_tracing_off
61 call trace_hypercall
62 /* Now restore all the registers that trace_hypercall clobbered */
63 movl UREGS_rax(%rsp),%eax /* Hypercall # */
64 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
65 movl UREGS_rcx(%rsp),%esi /* Arg 2 */
66 movl UREGS_rdx(%rsp),%edx /* Arg 3 */
67 movl UREGS_rsi(%rsp),%ecx /* Arg 4 */
68 movl UREGS_rdi(%rsp),%r8d /* Arg 5 */
69 movl UREGS_rbp(%rsp),%r9d /* Arg 6 */
70 compat_tracing_off:
71 leaq compat_hypercall_table(%rip),%r10
72 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
73 callq *(%r10,%rax,8)
74 #ifndef NDEBUG
75 /* Deliberately corrupt parameter regs used by this hypercall. */
76 popq %r10 # Shadow RIP
77 cmpq %r10,UREGS_rip+8(%rsp)
78 popq %rcx # Shadow hypercall index
79 jne compat_skip_clobber /* If RIP has changed then don't clobber. */
80 leaq compat_hypercall_args_table(%rip),%r10
81 movb (%r10,%rcx,1),%cl
82 movl $0xDEADBEEF,%r10d
83 testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
84 cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
85 cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
86 cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
87 cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
88 cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
89 compat_skip_clobber:
90 #endif
91 movl %eax,UREGS_rax(%rsp) # save the return value
93 /* %rbx: struct vcpu */
94 ENTRY(compat_test_all_events)
95 cli # tests must not race interrupts
96 /*compat_test_softirqs:*/
97 movl VCPU_processor(%rbx),%eax
98 shlq $IRQSTAT_shift,%rax
99 leaq irq_stat(%rip),%rcx
100 testl $~0,(%rcx,%rax,1)
101 jnz compat_process_softirqs
102 testb $1,VCPU_nmi_pending(%rbx)
103 jnz compat_process_nmi
104 compat_test_guest_events:
105 movq VCPU_vcpu_info(%rbx),%rax
106 testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
107 jnz compat_restore_all_guest
108 testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
109 jz compat_restore_all_guest
110 /*compat_process_guest_events:*/
111 sti
112 leaq VCPU_trap_bounce(%rbx),%rdx
113 movl VCPU_event_addr(%rbx),%eax
114 movl %eax,TRAPBOUNCE_eip(%rdx)
115 movl VCPU_event_sel(%rbx),%eax
116 movw %ax,TRAPBOUNCE_cs(%rdx)
117 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
118 call compat_create_bounce_frame
119 jmp compat_test_all_events
121 ALIGN
122 /* %rbx: struct vcpu */
123 compat_process_softirqs:
124 sti
125 call do_softirq
126 jmp compat_test_all_events
128 ALIGN
129 /* %rbx: struct vcpu */
130 compat_process_nmi:
131 testb $1,VCPU_nmi_masked(%rbx)
132 jnz compat_test_guest_events
133 movb $0,VCPU_nmi_pending(%rbx)
134 movzwl VCPU_nmi_cs(%rbx),%eax
135 movl VCPU_nmi_addr(%rbx),%ecx
136 testl %eax,%eax
137 jz compat_test_guest_events
138 movb $1,VCPU_nmi_masked(%rbx)
139 sti
140 leaq VCPU_trap_bounce(%rbx),%rdx
141 movw %ax,TRAPBOUNCE_cs(%rdx)
142 movl %ecx,TRAPBOUNCE_eip(%rdx)
143 movw $FLAT_COMPAT_KERNEL_CS,TRAPBOUNCE_cs(%rdx)
144 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
145 call compat_create_bounce_frame
146 jmp compat_test_all_events
148 compat_bad_hypercall:
149 movl $-ENOSYS,UREGS_rax(%rsp)
150 jmp compat_test_all_events
152 /* %rbx: struct vcpu, interrupts disabled */
153 compat_restore_all_guest:
154 ASSERT_INTERRUPTS_DISABLED
155 RESTORE_ALL
156 addq $8,%rsp
157 .Lft0: iretq
159 .section .fixup,"ax"
160 .Lfx0: sti
161 SAVE_ALL
162 movq UREGS_error_code(%rsp),%rsi
163 movq %rsp,%rax
164 andq $~0xf,%rsp
165 pushq $__HYPERVISOR_DS # SS
166 pushq %rax # RSP
167 pushfq # RFLAGS
168 pushq $__HYPERVISOR_CS # CS
169 leaq .Ldf0(%rip),%rax
170 pushq %rax # RIP
171 pushq %rsi # error_code/entry_vector
172 jmp handle_exception
173 .Ldf0: GET_CURRENT(%rbx)
174 jmp compat_test_all_events
175 compat_failsafe_callback:
176 GET_CURRENT(%rbx)
177 leaq VCPU_trap_bounce(%rbx),%rdx
178 movl VCPU_failsafe_addr(%rbx),%eax
179 movl %eax,TRAPBOUNCE_eip(%rdx)
180 movl VCPU_failsafe_sel(%rbx),%eax
181 movw %ax,TRAPBOUNCE_cs(%rdx)
182 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
183 btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
184 jnc 1f
185 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
186 1: call compat_create_bounce_frame
187 jmp compat_test_all_events
188 .previous
189 .section __pre_ex_table,"a"
190 .quad .Lft0,.Lfx0
191 .previous
192 .section __ex_table,"a"
193 .quad .Ldf0,compat_failsafe_callback
194 .previous
196 /* %rdx: trap_bounce, %rbx: struct vcpu */
197 ENTRY(compat_post_handle_exception)
198 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
199 jz compat_test_all_events
200 call compat_create_bounce_frame
201 movb $0,TRAPBOUNCE_flags(%rdx)
202 jmp compat_test_all_events
204 ENTRY(compat_int80_direct_trap)
205 call compat_create_bounce_frame
206 jmp compat_test_all_events
208 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
209 /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
210 /* %rdx: trap_bounce, %rbx: struct vcpu */
211 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
212 compat_create_bounce_frame:
213 ASSERT_INTERRUPTS_ENABLED
214 mov %fs,%edi
215 testb $2,UREGS_cs+8(%rsp)
216 jz 1f
217 /* Push new frame at registered guest-OS stack base. */
218 movl VCPU_kernel_sp(%rbx),%esi
219 .Lft1: mov VCPU_kernel_ss(%rbx),%fs
220 subl $2*4,%esi
221 movl UREGS_rsp+8(%rsp),%eax
222 .Lft2: movl %eax,%fs:(%rsi)
223 movl UREGS_ss+8(%rsp),%eax
224 .Lft3: movl %eax,%fs:4(%rsi)
225 jmp 2f
226 1: /* In kernel context already: push new frame at existing %rsp. */
227 movl UREGS_rsp+8(%rsp),%esi
228 .Lft4: mov UREGS_ss+8(%rsp),%fs
229 2:
230 movb TRAPBOUNCE_flags(%rdx),%cl
231 subl $3*4,%esi
232 movq VCPU_vcpu_info(%rbx),%rax
233 pushq COMPAT_VCPUINFO_upcall_mask(%rax)
234 testb $TBF_INTERRUPT,%cl
235 setnz %ch # TBF_INTERRUPT -> set upcall mask
236 orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
237 popq %rax
238 shll $16,%eax # Bits 16-23: saved_upcall_mask
239 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
240 .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
241 shrl $16,%eax
242 testb %al,%al # Bits 0-7: saved_upcall_mask
243 setz %ch # %ch == !saved_upcall_mask
244 movl UREGS_eflags+8(%rsp),%eax
245 andl $~X86_EFLAGS_IF,%eax
246 shlb $1,%ch # Bit 9 (EFLAGS.IF)
247 orb %ch,%ah # Fold EFLAGS.IF into %eax
248 .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS
249 movl UREGS_rip+8(%rsp),%eax
250 .Lft7: movl %eax,%fs:(%rsi) # EIP
251 testb $TBF_EXCEPTION_ERRCODE,%cl
252 jz 1f
253 subl $4,%esi
254 movl TRAPBOUNCE_error_code(%rdx),%eax
255 .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
256 1:
257 testb $TBF_FAILSAFE,%cl
258 jz 2f
259 subl $4*4,%esi
260 movl %gs,%eax
261 .Lft9: movl %eax,%fs:3*4(%rsi) # GS
262 .Lft10: movl %edi,%fs:2*4(%rsi) # FS
263 movl %es,%eax
264 .Lft11: movl %eax,%fs:1*4(%rsi) # ES
265 movl %ds,%eax
266 .Lft12: movl %eax,%fs:0*4(%rsi) # DS
267 2:
268 /* Rewrite our stack frame and return to guest-OS mode. */
269 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
270 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
271 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
272 mov %fs,UREGS_ss+8(%rsp)
273 movl %esi,UREGS_rsp+8(%rsp)
274 .Lft13: mov %edi,%fs
275 movzwl TRAPBOUNCE_cs(%rdx),%eax
276 /* Null selectors (0-3) are not allowed. */
277 testl $~3,%eax
278 jz domain_crash_synchronous
279 movl %eax,UREGS_cs+8(%rsp)
280 movl TRAPBOUNCE_eip(%rdx),%eax
281 movl %eax,UREGS_rip+8(%rsp)
282 ret
283 .section .fixup,"ax"
284 .Lfx13:
285 xorl %edi,%edi
286 jmp .Lft13
287 .previous
288 .section __ex_table,"a"
289 .quad .Lft1,domain_crash_synchronous , .Lft2,compat_crash_page_fault
290 .quad .Lft3,compat_crash_page_fault_4 , .Lft4,domain_crash_synchronous
291 .quad .Lft5,compat_crash_page_fault_4 , .Lft6,compat_crash_page_fault_8
292 .quad .Lft7,compat_crash_page_fault , .Lft8,compat_crash_page_fault
293 .quad .Lft9,compat_crash_page_fault_12, .Lft10,compat_crash_page_fault_8
294 .quad .Lft11,compat_crash_page_fault_4 , .Lft12,compat_crash_page_fault
295 .quad .Lft13,.Lfx13
296 .previous
298 compat_crash_page_fault_12:
299 addl $4,%esi
300 compat_crash_page_fault_8:
301 addl $4,%esi
302 compat_crash_page_fault_4:
303 addl $4,%esi
304 compat_crash_page_fault:
305 .Lft14: mov %edi,%fs
306 movl %esi,%edi
307 call show_page_walk
308 jmp domain_crash_synchronous
309 .section .fixup,"ax"
310 .Lfx14:
311 xorl %edi,%edi
312 jmp .Lft14
313 .previous
314 .section __ex_table,"a"
315 .quad .Lft14,.Lfx14
316 .previous
318 .section .rodata, "a", @progbits
320 ENTRY(compat_hypercall_table)
321 .quad compat_set_trap_table /* 0 */
322 .quad do_mmu_update
323 .quad compat_set_gdt
324 .quad do_stack_switch
325 .quad compat_set_callbacks
326 .quad do_fpu_taskswitch /* 5 */
327 .quad do_sched_op_compat
328 .quad compat_platform_op
329 .quad do_set_debugreg
330 .quad do_get_debugreg
331 .quad compat_update_descriptor /* 10 */
332 .quad compat_ni_hypercall
333 .quad compat_memory_op
334 .quad compat_multicall
335 .quad compat_update_va_mapping
336 .quad compat_set_timer_op /* 15 */
337 .quad do_event_channel_op_compat
338 .quad compat_xen_version
339 .quad do_console_io
340 .quad compat_physdev_op_compat
341 .quad compat_grant_table_op /* 20 */
342 .quad compat_vm_assist
343 .quad compat_update_va_mapping_otherdomain
344 .quad compat_iret
345 .quad compat_vcpu_op
346 .quad compat_ni_hypercall /* 25 */
347 .quad compat_mmuext_op
348 .quad do_xsm_op
349 .quad compat_nmi_op
350 .quad compat_sched_op
351 .quad compat_callback_op /* 30 */
352 .quad compat_xenoprof_op
353 .quad do_event_channel_op
354 .quad compat_physdev_op
355 .quad do_hvm_op
356 .quad do_sysctl /* 35 */
357 .quad do_domctl
358 .quad compat_kexec_op
359 .rept NR_hypercalls-((.-compat_hypercall_table)/8)
360 .quad compat_ni_hypercall
361 .endr
363 ENTRY(compat_hypercall_args_table)
364 .byte 1 /* compat_set_trap_table */ /* 0 */
365 .byte 4 /* compat_mmu_update */
366 .byte 2 /* compat_set_gdt */
367 .byte 2 /* compat_stack_switch */
368 .byte 4 /* compat_set_callbacks */
369 .byte 1 /* compat_fpu_taskswitch */ /* 5 */
370 .byte 2 /* compat_sched_op_compat */
371 .byte 1 /* compat_platform_op */
372 .byte 2 /* compat_set_debugreg */
373 .byte 1 /* compat_get_debugreg */
374 .byte 4 /* compat_update_descriptor */ /* 10 */
375 .byte 0 /* compat_ni_hypercall */
376 .byte 2 /* compat_memory_op */
377 .byte 2 /* compat_multicall */
378 .byte 4 /* compat_update_va_mapping */
379 .byte 2 /* compat_set_timer_op */ /* 15 */
380 .byte 1 /* compat_event_channel_op_compat */
381 .byte 2 /* compat_xen_version */
382 .byte 3 /* compat_console_io */
383 .byte 1 /* compat_physdev_op_compat */
384 .byte 3 /* compat_grant_table_op */ /* 20 */
385 .byte 2 /* compat_vm_assist */
386 .byte 5 /* compat_update_va_mapping_otherdomain */
387 .byte 0 /* compat_iret */
388 .byte 3 /* compat_vcpu_op */
389 .byte 0 /* compat_ni_hypercall */ /* 25 */
390 .byte 4 /* compat_mmuext_op */
391 .byte 1 /* do_xsm_op */
392 .byte 2 /* compat_nmi_op */
393 .byte 2 /* compat_sched_op */
394 .byte 2 /* compat_callback_op */ /* 30 */
395 .byte 2 /* compat_xenoprof_op */
396 .byte 2 /* compat_event_channel_op */
397 .byte 2 /* compat_physdev_op */
398 .byte 2 /* do_hvm_op */
399 .byte 1 /* do_sysctl */ /* 35 */
400 .byte 1 /* do_domctl */
401 .byte 2 /* compat_kexec_op */
402 .rept NR_hypercalls-(.-compat_hypercall_args_table)
403 .byte 0 /* compat_ni_hypercall */
404 .endr