ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 9563:9bee4875a848

Rename sched_op->sched_op_compat and sched_op_new->sched_op
after Christian's interface cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Apr 01 11:08:50 2006 +0100 (2006-04-01)
parents c445d4a0dd76
children 887ff2d1e382
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_GUEST_REGS(reg) \
65 movl $~(STACK_SIZE-1),reg; \
66 andl %esp,reg; \
67 orl $(STACK_SIZE-CPUINFO_sizeof),reg;
69 #define GET_CURRENT(reg) \
70 movl $STACK_SIZE-4, reg; \
71 orl %esp, reg; \
72 andl $~3,reg; \
73 movl (reg),reg;
76 ALIGN
77 restore_all_guest:
78 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
79 jnz restore_all_vm86
80 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
81 testl $2,UREGS_cs(%esp)
82 jnz 1f
83 call restore_ring0_guest
84 jmp restore_all_vm86
85 1:
86 #endif
87 FLT1: mov UREGS_ds(%esp),%ds
88 FLT2: mov UREGS_es(%esp),%es
89 FLT3: mov UREGS_fs(%esp),%fs
90 FLT4: mov UREGS_gs(%esp),%gs
91 restore_all_vm86:
92 popl %ebx
93 popl %ecx
94 popl %edx
95 popl %esi
96 popl %edi
97 popl %ebp
98 popl %eax
99 addl $4,%esp
100 FLT5: iret
101 .section .fixup,"ax"
102 FIX5: subl $28,%esp
103 pushl 28(%esp) # error_code/entry_vector
104 movl %eax,UREGS_eax+4(%esp)
105 movl %ebp,UREGS_ebp+4(%esp)
106 movl %edi,UREGS_edi+4(%esp)
107 movl %esi,UREGS_esi+4(%esp)
108 movl %edx,UREGS_edx+4(%esp)
109 movl %ecx,UREGS_ecx+4(%esp)
110 movl %ebx,UREGS_ebx+4(%esp)
111 FIX1: SET_XEN_SEGMENTS(a)
112 movl %eax,%fs
113 movl %eax,%gs
114 sti
115 popl %esi
116 pushfl # EFLAGS
117 movl $__HYPERVISOR_CS,%eax
118 pushl %eax # CS
119 movl $DBLFLT1,%eax
120 pushl %eax # EIP
121 pushl %esi # error_code/entry_vector
122 jmp error_code
123 DBLFLT1:GET_CURRENT(%ebx)
124 jmp test_all_events
125 failsafe_callback:
126 GET_CURRENT(%ebx)
127 leal VCPU_trap_bounce(%ebx),%edx
128 movl VCPU_failsafe_addr(%ebx),%eax
129 movl %eax,TRAPBOUNCE_eip(%edx)
130 movl VCPU_failsafe_sel(%ebx),%eax
131 movw %ax,TRAPBOUNCE_cs(%edx)
132 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
133 call create_bounce_frame
134 xorl %eax,%eax
135 movl %eax,UREGS_ds(%esp)
136 movl %eax,UREGS_es(%esp)
137 movl %eax,UREGS_fs(%esp)
138 movl %eax,UREGS_gs(%esp)
139 jmp test_all_events
140 .previous
141 .section __pre_ex_table,"a"
142 .long FLT1,FIX1
143 .long FLT2,FIX1
144 .long FLT3,FIX1
145 .long FLT4,FIX1
146 .long FLT5,FIX5
147 .previous
148 .section __ex_table,"a"
149 .long DBLFLT1,failsafe_callback
150 .previous
152 ALIGN
153 restore_all_xen:
154 popl %ebx
155 popl %ecx
156 popl %edx
157 popl %esi
158 popl %edi
159 popl %ebp
160 popl %eax
161 addl $4,%esp
162 iret
164 ALIGN
165 ENTRY(hypercall)
166 subl $4,%esp
167 FIXUP_RING0_GUEST_STACK
168 SAVE_ALL(b)
169 sti
170 GET_CURRENT(%ebx)
171 andl $(NR_hypercalls-1),%eax
172 PERFC_INCR(PERFC_hypercalls, %eax)
173 #ifndef NDEBUG
174 /* Deliberately corrupt parameter regs not used by this hypercall. */
175 pushl %eax
176 pushl UREGS_eip+4(%esp)
177 pushl 28(%esp) # EBP
178 pushl 28(%esp) # EDI
179 pushl 28(%esp) # ESI
180 pushl 28(%esp) # EDX
181 pushl 28(%esp) # ECX
182 pushl 28(%esp) # EBX
183 movzb hypercall_args_table(,%eax,1),%ecx
184 leal (%esp,%ecx,4),%edi
185 subl $6,%ecx
186 negl %ecx
187 movl %eax,%esi
188 movl $0xDEADBEEF,%eax
189 rep stosl
190 movl %esi,%eax
191 #endif
192 call *hypercall_table(,%eax,4)
193 #ifndef NDEBUG
194 /* Deliberately corrupt parameter regs used by this hypercall. */
195 addl $24,%esp # Shadow parameters
196 popl %ecx # Shadow EIP
197 cmpl %ecx,UREGS_eip(%esp)
198 popl %ecx # Shadow hypercall index
199 jne skip_clobber # If EIP has changed then don't clobber
200 movzb hypercall_args_table(,%ecx,1),%ecx
201 movl %esp,%edi
202 movl %eax,%esi
203 movl $0xDEADBEEF,%eax
204 rep stosl
205 movl %esi,%eax
206 skip_clobber:
207 #endif
208 movl %eax,UREGS_eax(%esp) # save the return value
210 test_all_events:
211 xorl %ecx,%ecx
212 notl %ecx
213 cli # tests must not race interrupts
214 /*test_softirqs:*/
215 movl VCPU_processor(%ebx),%eax
216 shl $IRQSTAT_shift,%eax
217 test %ecx,irq_stat(%eax,1)
218 jnz process_softirqs
219 btr $_VCPUF_nmi_pending,VCPU_flags(%ebx)
220 jc process_nmi
221 test_guest_events:
222 movl VCPU_vcpu_info(%ebx),%eax
223 testb $0xFF,VCPUINFO_upcall_mask(%eax)
224 jnz restore_all_guest
225 testb $0xFF,VCPUINFO_upcall_pending(%eax)
226 jz restore_all_guest
227 /*process_guest_events:*/
228 sti
229 leal VCPU_trap_bounce(%ebx),%edx
230 movl VCPU_event_addr(%ebx),%eax
231 movl %eax,TRAPBOUNCE_eip(%edx)
232 movl VCPU_event_sel(%ebx),%eax
233 movw %ax,TRAPBOUNCE_cs(%edx)
234 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
235 call create_bounce_frame
236 jmp test_all_events
238 ALIGN
239 process_softirqs:
240 sti
241 call do_softirq
242 jmp test_all_events
244 ALIGN
245 process_nmi:
246 movl VCPU_nmi_addr(%ebx),%eax
247 test %eax,%eax
248 jz test_all_events
249 bts $_VCPUF_nmi_masked,VCPU_flags(%ebx)
250 jc 1f
251 sti
252 leal VCPU_trap_bounce(%ebx),%edx
253 movl %eax,TRAPBOUNCE_eip(%edx)
254 movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
255 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
256 call create_bounce_frame
257 jmp test_all_events
258 1: bts $_VCPUF_nmi_pending,VCPU_flags(%ebx)
259 jmp test_guest_events
261 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
262 /* {EIP, CS, EFLAGS, [ESP, SS]} */
263 /* %edx == trap_bounce, %ebx == struct vcpu */
264 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
265 create_bounce_frame:
266 movl UREGS_eflags+4(%esp),%ecx
267 movb UREGS_cs+4(%esp),%cl
268 testl $(2|X86_EFLAGS_VM),%ecx
269 jz ring1 /* jump if returning to an existing ring-1 activation */
270 movl VCPU_kernel_sp(%ebx),%esi
271 FLT6: mov VCPU_kernel_ss(%ebx),%gs
272 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
273 jz nvm86_1
274 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
275 movl UREGS_es+4(%esp),%eax
276 FLT7: movl %eax,%gs:(%esi)
277 movl UREGS_ds+4(%esp),%eax
278 FLT8: movl %eax,%gs:4(%esi)
279 movl UREGS_fs+4(%esp),%eax
280 FLT9: movl %eax,%gs:8(%esi)
281 movl UREGS_gs+4(%esp),%eax
282 FLT10: movl %eax,%gs:12(%esi)
283 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
284 movl UREGS_esp+4(%esp),%eax
285 FLT11: movl %eax,%gs:(%esi)
286 movl UREGS_ss+4(%esp),%eax
287 FLT12: movl %eax,%gs:4(%esi)
288 jmp 1f
289 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
290 movl UREGS_esp+4(%esp),%esi
291 FLT13: mov UREGS_ss+4(%esp),%gs
292 1: /* Construct a stack frame: EFLAGS, CS/EIP */
293 movb TRAPBOUNCE_flags(%edx),%cl
294 subl $12,%esi
295 movl UREGS_eip+4(%esp),%eax
296 FLT14: movl %eax,%gs:(%esi)
297 movl VCPU_vcpu_info(%ebx),%eax
298 pushl VCPUINFO_upcall_mask(%eax)
299 testb $TBF_INTERRUPT,%cl
300 setnz %ch # TBF_INTERRUPT -> set upcall mask
301 orb %ch,VCPUINFO_upcall_mask(%eax)
302 popl %eax
303 shll $16,%eax # Bits 16-23: saved_upcall_mask
304 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
305 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
306 testw $2,%ax
307 jnz FLT15
308 and $~3,%ax # RPL 1 -> RPL 0
309 #endif
310 FLT15: movl %eax,%gs:4(%esi)
311 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
312 setz %ch # %ch == !saved_upcall_mask
313 movl UREGS_eflags+4(%esp),%eax
314 andl $~X86_EFLAGS_IF,%eax
315 shlb $1,%ch # Bit 9 (EFLAGS.IF)
316 orb %ch,%ah # Fold EFLAGS.IF into %eax
317 FLT16: movl %eax,%gs:8(%esi)
318 test $TBF_EXCEPTION_ERRCODE,%cl
319 jz 1f
320 subl $4,%esi # push error_code onto guest frame
321 movl TRAPBOUNCE_error_code(%edx),%eax
322 FLT17: movl %eax,%gs:(%esi)
323 1: testb $TBF_FAILSAFE,%cl
324 jz 2f
325 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
326 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
327 jz nvm86_2
328 xorl %eax,%eax # VM86: we write zero selector values
329 FLT18: movl %eax,%gs:(%esi)
330 FLT19: movl %eax,%gs:4(%esi)
331 FLT20: movl %eax,%gs:8(%esi)
332 FLT21: movl %eax,%gs:12(%esi)
333 jmp 2f
334 nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
335 FLT22: movl %eax,%gs:(%esi)
336 movl UREGS_es+4(%esp),%eax
337 FLT23: movl %eax,%gs:4(%esi)
338 movl UREGS_fs+4(%esp),%eax
339 FLT24: movl %eax,%gs:8(%esi)
340 movl UREGS_gs+4(%esp),%eax
341 FLT25: movl %eax,%gs:12(%esi)
342 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
343 jz nvm86_3
344 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
345 movl %eax,UREGS_ds+4(%esp)
346 movl %eax,UREGS_es+4(%esp)
347 movl %eax,UREGS_fs+4(%esp)
348 movl %eax,UREGS_gs+4(%esp)
349 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
350 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
351 andl $0xfffcbeff,UREGS_eflags+4(%esp)
352 mov %gs,UREGS_ss+4(%esp)
353 movl %esi,UREGS_esp+4(%esp)
354 movzwl TRAPBOUNCE_cs(%edx),%eax
355 movl %eax,UREGS_cs+4(%esp)
356 movl TRAPBOUNCE_eip(%edx),%eax
357 test %eax,%eax
358 jz domain_crash_synchronous
359 movl %eax,UREGS_eip+4(%esp)
360 movb $0,TRAPBOUNCE_flags(%edx)
361 ret
362 .section __ex_table,"a"
363 .long FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
364 .long FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
365 .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
366 .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
367 .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
368 .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
369 .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
370 .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
371 .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
372 .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
373 .previous
375 domain_crash_synchronous_string:
376 .asciz "domain_crash_sync called from entry.S (%lx)\n"
378 domain_crash_synchronous:
379 pushl $domain_crash_synchronous_string
380 call printf
381 jmp __domain_crash_synchronous
383 ALIGN
384 process_guest_exception_and_events:
385 leal VCPU_trap_bounce(%ebx),%edx
386 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
387 jz test_all_events
388 call create_bounce_frame
389 jmp test_all_events
391 ALIGN
392 ENTRY(ret_from_intr)
393 GET_CURRENT(%ebx)
394 movl UREGS_eflags(%esp),%eax
395 movb UREGS_cs(%esp),%al
396 testl $(3|X86_EFLAGS_VM),%eax
397 jnz test_all_events
398 jmp restore_all_xen
400 ENTRY(divide_error)
401 pushl $TRAP_divide_error<<16
402 ALIGN
403 error_code:
404 FIXUP_RING0_GUEST_STACK
405 SAVE_ALL_NOSEGREGS(a)
406 SET_XEN_SEGMENTS(a)
407 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
408 jz exception_with_ints_disabled
409 sti # re-enable interrupts
410 xorl %eax,%eax
411 movw UREGS_entry_vector(%esp),%ax
412 movl %esp,%edx
413 pushl %edx # push the cpu_user_regs pointer
414 GET_CURRENT(%ebx)
415 PERFC_INCR(PERFC_exceptions, %eax)
416 call *exception_table(,%eax,4)
417 addl $4,%esp
418 movl UREGS_eflags(%esp),%eax
419 movb UREGS_cs(%esp),%al
420 testl $(3|X86_EFLAGS_VM),%eax
421 jz restore_all_xen
422 jmp process_guest_exception_and_events
424 exception_with_ints_disabled:
425 movl UREGS_eflags(%esp),%eax
426 movb UREGS_cs(%esp),%al
427 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
428 jnz FATAL_exception_with_ints_disabled
429 pushl %esp
430 call search_pre_exception_table
431 addl $4,%esp
432 testl %eax,%eax # no fixup code for faulting EIP?
433 jz FATAL_exception_with_ints_disabled
434 movl %eax,UREGS_eip(%esp)
435 movl %esp,%esi
436 subl $4,%esp
437 movl %esp,%edi
438 movl $UREGS_kernel_sizeof/4,%ecx
439 rep; movsl # make room for error_code/entry_vector
440 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
441 movl %eax,UREGS_kernel_sizeof(%esp)
442 jmp restore_all_xen # return to fixup code
444 FATAL_exception_with_ints_disabled:
445 xorl %esi,%esi
446 movw UREGS_entry_vector(%esp),%si
447 movl %esp,%edx
448 pushl %edx # push the cpu_user_regs pointer
449 pushl %esi # push the trapnr (entry vector)
450 call fatal_trap
451 ud2
453 ENTRY(coprocessor_error)
454 pushl $TRAP_copro_error<<16
455 jmp error_code
457 ENTRY(simd_coprocessor_error)
458 pushl $TRAP_simd_error<<16
459 jmp error_code
461 ENTRY(device_not_available)
462 pushl $TRAP_no_device<<16
463 jmp error_code
465 ENTRY(debug)
466 pushl $TRAP_debug<<16
467 jmp error_code
469 ENTRY(int3)
470 pushl $TRAP_int3<<16
471 jmp error_code
473 ENTRY(overflow)
474 pushl $TRAP_overflow<<16
475 jmp error_code
477 ENTRY(bounds)
478 pushl $TRAP_bounds<<16
479 jmp error_code
481 ENTRY(invalid_op)
482 pushl $TRAP_invalid_op<<16
483 jmp error_code
485 ENTRY(coprocessor_segment_overrun)
486 pushl $TRAP_copro_seg<<16
487 jmp error_code
489 ENTRY(invalid_TSS)
490 movw $TRAP_invalid_tss,2(%esp)
491 jmp error_code
493 ENTRY(segment_not_present)
494 movw $TRAP_no_segment,2(%esp)
495 jmp error_code
497 ENTRY(stack_segment)
498 movw $TRAP_stack_error,2(%esp)
499 jmp error_code
501 ENTRY(general_protection)
502 movw $TRAP_gp_fault,2(%esp)
503 jmp error_code
505 ENTRY(alignment_check)
506 movw $TRAP_alignment_check,2(%esp)
507 jmp error_code
509 ENTRY(page_fault)
510 movw $TRAP_page_fault,2(%esp)
511 jmp error_code
513 ENTRY(machine_check)
514 pushl $TRAP_machine_check<<16
515 jmp error_code
517 ENTRY(spurious_interrupt_bug)
518 pushl $TRAP_spurious_int<<16
519 jmp error_code
521 ENTRY(nmi)
522 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
523 # NMI entry protocol is incompatible with guest kernel in ring 0.
524 iret
525 #else
526 # Save state but do not trash the segment registers!
527 # We may otherwise be unable to reload them or copy them to ring 1.
528 pushl %eax
529 SAVE_ALL_NOSEGREGS(a)
531 # We can only process the NMI if:
532 # A. We are the outermost Xen activation (in which case we have
533 # the selectors safely saved on our stack)
534 # B. DS and ES contain sane Xen values.
535 # In all other cases we bail without touching DS-GS, as we have
536 # interrupted an enclosing Xen activation in tricky prologue or
537 # epilogue code.
538 movl UREGS_eflags(%esp),%eax
539 movb UREGS_cs(%esp),%al
540 testl $(3|X86_EFLAGS_VM),%eax
541 jnz continue_nmi
542 movl %ds,%eax
543 cmpw $(__HYPERVISOR_DS),%ax
544 jne defer_nmi
545 movl %es,%eax
546 cmpw $(__HYPERVISOR_DS),%ax
547 jne defer_nmi
549 continue_nmi:
550 SET_XEN_SEGMENTS(d)
551 movl %esp,%edx
552 pushl %edx
553 call do_nmi
554 addl $4,%esp
555 jmp ret_from_intr
557 defer_nmi:
558 movl $FIXMAP_apic_base,%eax
559 # apic_wait_icr_idle()
560 1: movl %ss:APIC_ICR(%eax),%ebx
561 testl $APIC_ICR_BUSY,%ebx
562 jnz 1b
563 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
564 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
565 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
566 jmp restore_all_xen
567 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
569 ENTRY(setup_vm86_frame)
570 # Copies the entire stack frame forwards by 16 bytes.
571 .macro copy_vm86_words count=18
572 .if \count
573 pushl ((\count-1)*4)(%esp)
574 popl ((\count-1)*4)+16(%esp)
575 copy_vm86_words "(\count-1)"
576 .endif
577 .endm
578 copy_vm86_words
579 addl $16,%esp
580 ret
582 do_arch_sched_op_compat:
583 # Ensure we return success even if we return via schedule_tail()
584 xorl %eax,%eax
585 GET_GUEST_REGS(%ecx)
586 movl %eax,UREGS_eax(%ecx)
587 jmp do_sched_op_compat
589 do_arch_sched_op:
590 # Ensure we return success even if we return via schedule_tail()
591 xorl %eax,%eax
592 GET_GUEST_REGS(%ecx)
593 movl %eax,UREGS_eax(%ecx)
594 jmp do_sched_op
596 .data
598 ENTRY(exception_table)
599 .long do_divide_error
600 .long do_debug
601 .long 0 # nmi
602 .long do_int3
603 .long do_overflow
604 .long do_bounds
605 .long do_invalid_op
606 .long math_state_restore
607 .long 0 # double fault
608 .long do_coprocessor_segment_overrun
609 .long do_invalid_TSS
610 .long do_segment_not_present
611 .long do_stack_segment
612 .long do_general_protection
613 .long do_page_fault
614 .long do_spurious_interrupt_bug
615 .long do_coprocessor_error
616 .long do_alignment_check
617 .long do_machine_check
618 .long do_simd_coprocessor_error
620 ENTRY(hypercall_table)
621 .long do_set_trap_table /* 0 */
622 .long do_mmu_update
623 .long do_set_gdt
624 .long do_stack_switch
625 .long do_set_callbacks
626 .long do_fpu_taskswitch /* 5 */
627 .long do_arch_sched_op_compat
628 .long do_dom0_op
629 .long do_set_debugreg
630 .long do_get_debugreg
631 .long do_update_descriptor /* 10 */
632 .long do_ni_hypercall
633 .long do_memory_op
634 .long do_multicall
635 .long do_update_va_mapping
636 .long do_set_timer_op /* 15 */
637 .long do_event_channel_op
638 .long do_xen_version
639 .long do_console_io
640 .long do_physdev_op
641 .long do_grant_table_op /* 20 */
642 .long do_vm_assist
643 .long do_update_va_mapping_otherdomain
644 .long do_iret
645 .long do_vcpu_op
646 .long do_ni_hypercall /* 25 */
647 .long do_mmuext_op
648 .long do_acm_op
649 .long do_nmi_op
650 .long do_arch_sched_op
651 .rept NR_hypercalls-((.-hypercall_table)/4)
652 .long do_ni_hypercall
653 .endr
655 ENTRY(hypercall_args_table)
656 .byte 1 /* do_set_trap_table */ /* 0 */
657 .byte 4 /* do_mmu_update */
658 .byte 2 /* do_set_gdt */
659 .byte 2 /* do_stack_switch */
660 .byte 4 /* do_set_callbacks */
661 .byte 1 /* do_fpu_taskswitch */ /* 5 */
662 .byte 2 /* do_arch_sched_op_compat */
663 .byte 1 /* do_dom0_op */
664 .byte 2 /* do_set_debugreg */
665 .byte 1 /* do_get_debugreg */
666 .byte 4 /* do_update_descriptor */ /* 10 */
667 .byte 0 /* do_ni_hypercall */
668 .byte 2 /* do_memory_op */
669 .byte 2 /* do_multicall */
670 .byte 4 /* do_update_va_mapping */
671 .byte 2 /* do_set_timer_op */ /* 15 */
672 .byte 1 /* do_event_channel_op */
673 .byte 2 /* do_xen_version */
674 .byte 3 /* do_console_io */
675 .byte 1 /* do_physdev_op */
676 .byte 3 /* do_grant_table_op */ /* 20 */
677 .byte 2 /* do_vm_assist */
678 .byte 5 /* do_update_va_mapping_otherdomain */
679 .byte 0 /* do_iret */
680 .byte 3 /* do_vcpu_op */
681 .byte 0 /* do_ni_hypercall */ /* 25 */
682 .byte 4 /* do_mmuext_op */
683 .byte 1 /* do_acm_op */
684 .byte 2 /* do_nmi_op */
685 .byte 2 /* do_arch_sched_op */
686 .rept NR_hypercalls-(.-hypercall_args_table)
687 .byte 0 /* do_ni_hypercall */
688 .endr