ia64/xen-unstable

view xen/arch/x86/x86_32/entry.S @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents db20b819679c
children
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_GUEST_REGS(reg) \
65 movl $~(STACK_SIZE-1),reg; \
66 andl %esp,reg; \
67 orl $(STACK_SIZE-CPUINFO_sizeof),reg;
69 #define GET_CURRENT(reg) \
70 movl $STACK_SIZE-4, reg; \
71 orl %esp, reg; \
72 andl $~3,reg; \
73 movl (reg),reg;
75 ALIGN
76 restore_all_guest:
77 ASSERT_INTERRUPTS_DISABLED
78 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
79 popl %ebx
80 popl %ecx
81 popl %edx
82 popl %esi
83 popl %edi
84 popl %ebp
85 popl %eax
86 leal 4(%esp),%esp
87 jnz .Lrestore_iret_guest
88 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
89 testb $2,UREGS_cs-UREGS_eip(%esp)
90 jnz .Lrestore_sregs_guest
91 call restore_ring0_guest
92 jmp .Lrestore_iret_guest
93 #endif
94 .Lrestore_sregs_guest:
95 .Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
96 .Lft2: mov UREGS_es-UREGS_eip(%esp),%es
97 .Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
98 .Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
99 .Lrestore_iret_guest:
100 .Lft5: iret
101 .section .fixup,"ax"
102 .Lfx1: sti
103 SAVE_ALL_GPRS
104 mov UREGS_error_code(%esp),%esi
105 pushfl # EFLAGS
106 movl $__HYPERVISOR_CS,%eax
107 pushl %eax # CS
108 movl $.Ldf1,%eax
109 pushl %eax # EIP
110 pushl %esi # error_code/entry_vector
111 jmp handle_exception
112 .Ldf1: GET_CURRENT(%ebx)
113 jmp test_all_events
114 failsafe_callback:
115 GET_CURRENT(%ebx)
116 leal VCPU_trap_bounce(%ebx),%edx
117 movl VCPU_failsafe_addr(%ebx),%eax
118 movl %eax,TRAPBOUNCE_eip(%edx)
119 movl VCPU_failsafe_sel(%ebx),%eax
120 movw %ax,TRAPBOUNCE_cs(%edx)
121 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
122 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
123 jnc 1f
124 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
125 1: call create_bounce_frame
126 xorl %eax,%eax
127 movl %eax,UREGS_ds(%esp)
128 movl %eax,UREGS_es(%esp)
129 movl %eax,UREGS_fs(%esp)
130 movl %eax,UREGS_gs(%esp)
131 jmp test_all_events
132 .previous
133 .section __pre_ex_table,"a"
134 .long .Lft1,.Lfx1
135 .long .Lft2,.Lfx1
136 .long .Lft3,.Lfx1
137 .long .Lft4,.Lfx1
138 .long .Lft5,.Lfx1
139 .previous
140 .section __ex_table,"a"
141 .long .Ldf1,failsafe_callback
142 .previous
144 ALIGN
145 restore_all_xen:
146 popl %ebx
147 popl %ecx
148 popl %edx
149 popl %esi
150 popl %edi
151 popl %ebp
152 popl %eax
153 addl $4,%esp
154 iret
156 ALIGN
157 ENTRY(hypercall)
158 subl $4,%esp
159 FIXUP_RING0_GUEST_STACK
160 SAVE_ALL(1f,1f)
161 1: sti
162 GET_CURRENT(%ebx)
163 cmpl $NR_hypercalls,%eax
164 jae bad_hypercall
165 PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
166 #ifndef NDEBUG
167 /* Create shadow parameters and corrupt those not used by this call. */
168 pushl %eax
169 pushl UREGS_eip+4(%esp)
170 pushl 28(%esp) # EBP
171 pushl 28(%esp) # EDI
172 pushl 28(%esp) # ESI
173 pushl 28(%esp) # EDX
174 pushl 28(%esp) # ECX
175 pushl 28(%esp) # EBX
176 movzb hypercall_args_table(,%eax,1),%ecx
177 leal (%esp,%ecx,4),%edi
178 subl $6,%ecx
179 negl %ecx
180 movl %eax,%esi
181 movl $0xDEADBEEF,%eax
182 rep stosl
183 movl %esi,%eax
184 #define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */
185 #else
186 /*
187 * We need shadow parameters even on non-debug builds. We depend on the
188 * original versions not being clobbered (needed to create a hypercall
189 * continuation). But that isn't guaranteed by the function-call ABI.
190 */
191 pushl 20(%esp) # EBP
192 pushl 20(%esp) # EDI
193 pushl 20(%esp) # ESI
194 pushl 20(%esp) # EDX
195 pushl 20(%esp) # ECX
196 pushl 20(%esp) # EBX
197 #define SHADOW_BYTES 24 /* 6 shadow parameters */
198 #endif
199 cmpb $0,tb_init_done
200 je 1f
201 call trace_hypercall
202 /* Now restore all the registers that trace_hypercall clobbered */
203 movl UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */
204 #undef SHADOW_BYTES
205 1: call *hypercall_table(,%eax,4)
206 addl $24,%esp # Discard the shadow parameters
207 #ifndef NDEBUG
208 /* Deliberately corrupt real parameter regs used by this hypercall. */
209 popl %ecx # Shadow EIP
210 cmpl %ecx,UREGS_eip+4(%esp)
211 popl %ecx # Shadow hypercall index
212 jne skip_clobber # If EIP has changed then don't clobber
213 movzb hypercall_args_table(,%ecx,1),%ecx
214 movl %esp,%edi
215 movl %eax,%esi
216 movl $0xDEADBEEF,%eax
217 rep stosl
218 movl %esi,%eax
219 skip_clobber:
220 #endif
221 movl %eax,UREGS_eax(%esp) # save the return value
223 test_all_events:
224 xorl %ecx,%ecx
225 notl %ecx
226 cli # tests must not race interrupts
227 /*test_softirqs:*/
228 movl VCPU_processor(%ebx),%eax
229 shl $IRQSTAT_shift,%eax
230 test %ecx,irq_stat(%eax,1)
231 jnz process_softirqs
232 testb $1,VCPU_mce_pending(%ebx)
233 jnz process_mce
234 testb $1,VCPU_nmi_pending(%ebx)
235 jnz process_nmi
236 test_guest_events:
237 movl VCPU_vcpu_info(%ebx),%eax
238 testb $0xFF,VCPUINFO_upcall_mask(%eax)
239 jnz restore_all_guest
240 testb $0xFF,VCPUINFO_upcall_pending(%eax)
241 jz restore_all_guest
242 /*process_guest_events:*/
243 sti
244 leal VCPU_trap_bounce(%ebx),%edx
245 movl VCPU_event_addr(%ebx),%eax
246 movl %eax,TRAPBOUNCE_eip(%edx)
247 movl VCPU_event_sel(%ebx),%eax
248 movw %ax,TRAPBOUNCE_cs(%edx)
249 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
250 call create_bounce_frame
251 jmp test_all_events
253 ALIGN
254 process_softirqs:
255 sti
256 call do_softirq
257 jmp test_all_events
259 ALIGN
260 /* %ebx: struct vcpu */
261 process_mce:
262 cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)
263 jae test_guest_events
264 sti
265 movb $0,VCPU_mce_pending(%ebx)
266 call set_guest_machinecheck_trapbounce
267 test %eax,%eax
268 jz test_all_events
269 movw VCPU_trap_priority(%ebx),%dx # safe priority for the
270 movw %dx,VCPU_old_trap_priority(%ebx) # iret hypercall
271 movw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)
272 jmp process_trap
274 ALIGN
275 /* %ebx: struct vcpu */
276 process_nmi:
277 cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)
278 jae test_guest_events
279 sti
280 movb $0,VCPU_nmi_pending(%ebx)
281 call set_guest_nmi_trapbounce
282 test %eax,%eax
283 jz test_all_events
284 movw VCPU_trap_priority(%ebx),%dx # safe priority for the
285 movw %dx,VCPU_old_trap_priority(%ebx) # iret hypercall
286 movw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)
287 /* FALLTHROUGH */
288 process_trap:
289 leal VCPU_trap_bounce(%ebx),%edx
290 call create_bounce_frame
291 jmp test_all_events
293 bad_hypercall:
294 movl $-ENOSYS,UREGS_eax(%esp)
295 jmp test_all_events
297 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
298 /* {EIP, CS, EFLAGS, [ESP, SS]} */
299 /* %edx == trap_bounce, %ebx == struct vcpu */
300 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
301 create_bounce_frame:
302 ASSERT_INTERRUPTS_ENABLED
303 movl UREGS_eflags+4(%esp),%ecx
304 movb UREGS_cs+4(%esp),%cl
305 testl $(2|X86_EFLAGS_VM),%ecx
306 jz ring1 /* jump if returning to an existing ring-1 activation */
307 movl VCPU_kernel_sp(%ebx),%esi
308 .Lft6: mov VCPU_kernel_ss(%ebx),%gs
309 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
310 jz .Lnvm86_1
311 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
312 movl UREGS_es+4(%esp),%eax
313 .Lft7: movl %eax,%gs:(%esi)
314 movl UREGS_ds+4(%esp),%eax
315 .Lft8: movl %eax,%gs:4(%esi)
316 movl UREGS_fs+4(%esp),%eax
317 .Lft9: movl %eax,%gs:8(%esi)
318 movl UREGS_gs+4(%esp),%eax
319 .Lft10: movl %eax,%gs:12(%esi)
320 .Lnvm86_1:
321 subl $8,%esi /* push SS/ESP (inter-priv iret) */
322 movl UREGS_esp+4(%esp),%eax
323 .Lft11: movl %eax,%gs:(%esi)
324 movl UREGS_ss+4(%esp),%eax
325 .Lft12: movl %eax,%gs:4(%esi)
326 jmp 1f
327 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
328 movl UREGS_esp+4(%esp),%esi
329 .Lft13: mov UREGS_ss+4(%esp),%gs
330 1: /* Construct a stack frame: EFLAGS, CS/EIP */
331 movb TRAPBOUNCE_flags(%edx),%cl
332 subl $12,%esi
333 movl UREGS_eip+4(%esp),%eax
334 .Lft14: movl %eax,%gs:(%esi)
335 movl VCPU_vcpu_info(%ebx),%eax
336 pushl VCPUINFO_upcall_mask(%eax)
337 testb $TBF_INTERRUPT,%cl
338 setnz %ch # TBF_INTERRUPT -> set upcall mask
339 orb %ch,VCPUINFO_upcall_mask(%eax)
340 popl %eax
341 shll $16,%eax # Bits 16-23: saved_upcall_mask
342 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
343 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
344 testw $2,%ax
345 jnz .Lft15
346 and $~3,%ax # RPL 1 -> RPL 0
347 #endif
348 .Lft15: movl %eax,%gs:4(%esi)
349 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
350 setz %ch # %ch == !saved_upcall_mask
351 movl UREGS_eflags+4(%esp),%eax
352 andl $~X86_EFLAGS_IF,%eax
353 shlb $1,%ch # Bit 9 (EFLAGS.IF)
354 orb %ch,%ah # Fold EFLAGS.IF into %eax
355 .Lft16: movl %eax,%gs:8(%esi)
356 test $TBF_EXCEPTION_ERRCODE,%cl
357 jz 1f
358 subl $4,%esi # push error_code onto guest frame
359 movl TRAPBOUNCE_error_code(%edx),%eax
360 .Lft17: movl %eax,%gs:(%esi)
361 1: testb $TBF_FAILSAFE,%cl
362 jz 2f
363 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
364 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
365 jz .Lnvm86_2
366 xorl %eax,%eax # VM86: we write zero selector values
367 .Lft18: movl %eax,%gs:(%esi)
368 .Lft19: movl %eax,%gs:4(%esi)
369 .Lft20: movl %eax,%gs:8(%esi)
370 .Lft21: movl %eax,%gs:12(%esi)
371 jmp 2f
372 .Lnvm86_2:
373 movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
374 .Lft22: movl %eax,%gs:(%esi)
375 movl UREGS_es+4(%esp),%eax
376 .Lft23: movl %eax,%gs:4(%esi)
377 movl UREGS_fs+4(%esp),%eax
378 .Lft24: movl %eax,%gs:8(%esi)
379 movl UREGS_gs+4(%esp),%eax
380 .Lft25: movl %eax,%gs:12(%esi)
381 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
382 jz .Lnvm86_3
383 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
384 movl %eax,UREGS_ds+4(%esp)
385 movl %eax,UREGS_es+4(%esp)
386 movl %eax,UREGS_fs+4(%esp)
387 movl %eax,UREGS_gs+4(%esp)
388 .Lnvm86_3:
389 /* Rewrite our stack frame and return to ring 1. */
390 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
391 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
392 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
393 mov %gs,UREGS_ss+4(%esp)
394 movl %esi,UREGS_esp+4(%esp)
395 movzwl TRAPBOUNCE_cs(%edx),%eax
396 /* Null selectors (0-3) are not allowed. */
397 testl $~3,%eax
398 jz domain_crash_synchronous
399 movl %eax,UREGS_cs+4(%esp)
400 movl TRAPBOUNCE_eip(%edx),%eax
401 movl %eax,UREGS_eip+4(%esp)
402 ret
403 .section __ex_table,"a"
404 .long .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
405 .long .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
406 .long .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
407 .long .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
408 .long .Lft14,domain_crash_synchronous , .Lft15,domain_crash_synchronous
409 .long .Lft16,domain_crash_synchronous , .Lft17,domain_crash_synchronous
410 .long .Lft18,domain_crash_synchronous , .Lft19,domain_crash_synchronous
411 .long .Lft20,domain_crash_synchronous , .Lft21,domain_crash_synchronous
412 .long .Lft22,domain_crash_synchronous , .Lft23,domain_crash_synchronous
413 .long .Lft24,domain_crash_synchronous , .Lft25,domain_crash_synchronous
414 .previous
416 domain_crash_synchronous_string:
417 .asciz "domain_crash_sync called from entry.S (%lx)\n"
419 domain_crash_synchronous:
420 pushl $domain_crash_synchronous_string
421 call printk
422 jmp __domain_crash_synchronous
424 ALIGN
425 ENTRY(ret_from_intr)
426 GET_CURRENT(%ebx)
427 movl UREGS_eflags(%esp),%eax
428 movb UREGS_cs(%esp),%al
429 testl $(3|X86_EFLAGS_VM),%eax
430 jnz test_all_events
431 jmp restore_all_xen
433 ENTRY(divide_error)
434 pushl $TRAP_divide_error<<16
435 ALIGN
436 handle_exception:
437 FIXUP_RING0_GUEST_STACK
438 SAVE_ALL(1f,2f)
439 .text 1
440 /* Exception within Xen: make sure we have valid %ds,%es. */
441 1: mov %ecx,%ds
442 mov %ecx,%es
443 jmp 2f
444 .previous
445 2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
446 jz exception_with_ints_disabled
447 sti # re-enable interrupts
448 1: xorl %eax,%eax
449 movw UREGS_entry_vector(%esp),%ax
450 movl %esp,%edx
451 pushl %edx # push the cpu_user_regs pointer
452 GET_CURRENT(%ebx)
453 PERFC_INCR(PERFC_exceptions, %eax, %ebx)
454 call *exception_table(,%eax,4)
455 addl $4,%esp
456 movl UREGS_eflags(%esp),%eax
457 movb UREGS_cs(%esp),%al
458 testl $(3|X86_EFLAGS_VM),%eax
459 jz restore_all_xen
460 leal VCPU_trap_bounce(%ebx),%edx
461 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
462 jz test_all_events
463 call create_bounce_frame
464 movb $0,TRAPBOUNCE_flags(%edx)
465 jmp test_all_events
467 exception_with_ints_disabled:
468 movl UREGS_eflags(%esp),%eax
469 movb UREGS_cs(%esp),%al
470 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
471 jnz FATAL_exception_with_ints_disabled
472 pushl %esp
473 call search_pre_exception_table
474 addl $4,%esp
475 testl %eax,%eax # no fixup code for faulting EIP?
476 jz 1b
477 movl %eax,UREGS_eip(%esp)
478 movl %esp,%esi
479 subl $4,%esp
480 movl %esp,%edi
481 movl $UREGS_kernel_sizeof/4,%ecx
482 rep; movsl # make room for error_code/entry_vector
483 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
484 movl %eax,UREGS_kernel_sizeof(%esp)
485 jmp restore_all_xen # return to fixup code
487 FATAL_exception_with_ints_disabled:
488 xorl %esi,%esi
489 movw UREGS_entry_vector(%esp),%si
490 movl %esp,%edx
491 pushl %edx # push the cpu_user_regs pointer
492 pushl %esi # push the trapnr (entry vector)
493 call fatal_trap
494 ud2
496 ENTRY(coprocessor_error)
497 pushl $TRAP_copro_error<<16
498 jmp handle_exception
500 ENTRY(simd_coprocessor_error)
501 pushl $TRAP_simd_error<<16
502 jmp handle_exception
504 ENTRY(device_not_available)
505 pushl $TRAP_no_device<<16
506 jmp handle_exception
508 ENTRY(debug)
509 pushl $TRAP_debug<<16
510 jmp handle_exception
512 ENTRY(int3)
513 pushl $TRAP_int3<<16
514 jmp handle_exception
516 ENTRY(overflow)
517 pushl $TRAP_overflow<<16
518 jmp handle_exception
520 ENTRY(bounds)
521 pushl $TRAP_bounds<<16
522 jmp handle_exception
524 ENTRY(invalid_op)
525 pushl $TRAP_invalid_op<<16
526 jmp handle_exception
528 ENTRY(coprocessor_segment_overrun)
529 pushl $TRAP_copro_seg<<16
530 jmp handle_exception
532 ENTRY(invalid_TSS)
533 movw $TRAP_invalid_tss,2(%esp)
534 jmp handle_exception
536 ENTRY(segment_not_present)
537 movw $TRAP_no_segment,2(%esp)
538 jmp handle_exception
540 ENTRY(stack_segment)
541 movw $TRAP_stack_error,2(%esp)
542 jmp handle_exception
544 ENTRY(general_protection)
545 movw $TRAP_gp_fault,2(%esp)
546 jmp handle_exception
548 ENTRY(alignment_check)
549 movw $TRAP_alignment_check,2(%esp)
550 jmp handle_exception
552 ENTRY(page_fault)
553 movw $TRAP_page_fault,2(%esp)
554 jmp handle_exception
556 ENTRY(spurious_interrupt_bug)
557 pushl $TRAP_spurious_int<<16
558 jmp handle_exception
560 ENTRY(early_page_fault)
561 SAVE_ALL(1f,1f)
562 1: movl %esp,%eax
563 pushl %eax
564 call do_early_page_fault
565 addl $4,%esp
566 jmp restore_all_xen
568 handle_nmi_mce:
569 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
570 # NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
571 addl $4,%esp
572 iret
573 #else
574 # Save state but do not trash the segment registers!
575 SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
576 .Lnmi_mce_common:
577 xorl %eax,%eax
578 movw UREGS_entry_vector(%esp),%ax
579 movl %esp,%edx
580 pushl %edx
581 call *exception_table(,%eax,4)
582 addl $4,%esp
583 /*
584 * NB. We may return to Xen context with polluted %ds/%es. But in such
585 * cases we have put guest DS/ES on the guest stack frame, which will
586 * be detected by SAVE_ALL(), or we have rolled back restore_guest.
587 */
588 jmp ret_from_intr
589 .Lnmi_mce_xen:
590 /* Check the outer (guest) context for %ds/%es state validity. */
591 GET_GUEST_REGS(%ebx)
592 testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
593 mov %ds,%eax
594 mov %es,%edx
595 jnz .Lnmi_mce_vm86
596 /* We may have interrupted Xen while messing with %ds/%es... */
597 cmpw %ax,%cx
598 mov %ecx,%ds /* Ensure %ds is valid */
599 cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
600 cmpw %dx,%cx
601 movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
602 cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
603 mov %ecx,%es /* Ensure %es is valid */
604 movl $.Lrestore_sregs_guest,%ecx
605 movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
606 cmpl %ecx,UREGS_eip(%esp)
607 jbe .Lnmi_mce_common
608 cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
609 ja .Lnmi_mce_common
610 /* Roll outer context restore_guest back to restoring %ds/%es. */
611 movl %ecx,UREGS_eip(%esp)
612 jmp .Lnmi_mce_common
613 .Lnmi_mce_vm86:
614 /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
615 mov %ecx,%ds
616 mov %ecx,%es
617 jmp .Lnmi_mce_common
618 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
620 ENTRY(nmi)
621 pushl $TRAP_nmi<<16
622 jmp handle_nmi_mce
624 ENTRY(machine_check)
625 pushl $TRAP_machine_check<<16
626 jmp handle_nmi_mce
628 ENTRY(setup_vm86_frame)
629 mov %ecx,%ds
630 mov %ecx,%es
631 # Copies the entire stack frame forwards by 16 bytes.
632 .macro copy_vm86_words count=18
633 .if \count
634 pushl ((\count-1)*4)(%esp)
635 popl ((\count-1)*4)+16(%esp)
636 copy_vm86_words "(\count-1)"
637 .endif
638 .endm
639 copy_vm86_words
640 addl $16,%esp
641 ret
643 .data
645 ENTRY(exception_table)
646 .long do_divide_error
647 .long do_debug
648 .long do_nmi
649 .long do_int3
650 .long do_overflow
651 .long do_bounds
652 .long do_invalid_op
653 .long do_device_not_available
654 .long 0 # double fault
655 .long do_coprocessor_segment_overrun
656 .long do_invalid_TSS
657 .long do_segment_not_present
658 .long do_stack_segment
659 .long do_general_protection
660 .long do_page_fault
661 .long do_spurious_interrupt_bug
662 .long do_coprocessor_error
663 .long do_alignment_check
664 .long do_machine_check
665 .long do_simd_coprocessor_error
667 ENTRY(hypercall_table)
668 .long do_set_trap_table /* 0 */
669 .long do_mmu_update
670 .long do_set_gdt
671 .long do_stack_switch
672 .long do_set_callbacks
673 .long do_fpu_taskswitch /* 5 */
674 .long do_sched_op_compat
675 .long do_platform_op
676 .long do_set_debugreg
677 .long do_get_debugreg
678 .long do_update_descriptor /* 10 */
679 .long do_ni_hypercall
680 .long do_memory_op
681 .long do_multicall
682 .long do_update_va_mapping
683 .long do_set_timer_op /* 15 */
684 .long do_event_channel_op_compat
685 .long do_xen_version
686 .long do_console_io
687 .long do_physdev_op_compat
688 .long do_grant_table_op /* 20 */
689 .long do_vm_assist
690 .long do_update_va_mapping_otherdomain
691 .long do_iret
692 .long do_vcpu_op
693 .long do_ni_hypercall /* 25 */
694 .long do_mmuext_op
695 .long do_xsm_op
696 .long do_nmi_op
697 .long do_sched_op
698 .long do_callback_op /* 30 */
699 .long do_xenoprof_op
700 .long do_event_channel_op
701 .long do_physdev_op
702 .long do_hvm_op
703 .long do_sysctl /* 35 */
704 .long do_domctl
705 .long do_kexec_op
706 .long do_tmem_op
707 .rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
708 .long do_ni_hypercall
709 .endr
710 .long do_mca /* 48 */
711 .rept NR_hypercalls-((.-hypercall_table)/4)
712 .long do_ni_hypercall
713 .endr
715 ENTRY(hypercall_args_table)
716 .byte 1 /* do_set_trap_table */ /* 0 */
717 .byte 4 /* do_mmu_update */
718 .byte 2 /* do_set_gdt */
719 .byte 2 /* do_stack_switch */
720 .byte 4 /* do_set_callbacks */
721 .byte 1 /* do_fpu_taskswitch */ /* 5 */
722 .byte 2 /* do_sched_op_compat */
723 .byte 1 /* do_platform_op */
724 .byte 2 /* do_set_debugreg */
725 .byte 1 /* do_get_debugreg */
726 .byte 4 /* do_update_descriptor */ /* 10 */
727 .byte 0 /* do_ni_hypercall */
728 .byte 2 /* do_memory_op */
729 .byte 2 /* do_multicall */
730 .byte 4 /* do_update_va_mapping */
731 .byte 2 /* do_set_timer_op */ /* 15 */
732 .byte 1 /* do_event_channel_op_compat */
733 .byte 2 /* do_xen_version */
734 .byte 3 /* do_console_io */
735 .byte 1 /* do_physdev_op_compat */
736 .byte 3 /* do_grant_table_op */ /* 20 */
737 .byte 2 /* do_vm_assist */
738 .byte 5 /* do_update_va_mapping_otherdomain */
739 .byte 0 /* do_iret */
740 .byte 3 /* do_vcpu_op */
741 .byte 0 /* do_ni_hypercall */ /* 25 */
742 .byte 4 /* do_mmuext_op */
743 .byte 1 /* do_xsm_op */
744 .byte 2 /* do_nmi_op */
745 .byte 2 /* do_sched_op */
746 .byte 2 /* do_callback_op */ /* 30 */
747 .byte 2 /* do_xenoprof_op */
748 .byte 2 /* do_event_channel_op */
749 .byte 2 /* do_physdev_op */
750 .byte 2 /* do_hvm_op */
751 .byte 1 /* do_sysctl */ /* 35 */
752 .byte 1 /* do_domctl */
753 .byte 2 /* do_kexec_op */
754 .byte 1 /* do_tmem_op */
755 .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
756 .byte 0 /* do_ni_hypercall */
757 .endr
758 .byte 1 /* do_mca */ /* 48 */
759 .rept NR_hypercalls-(.-hypercall_args_table)
760 .byte 0 /* do_ni_hypercall */
761 .endr