direct-io.hg

view xen/arch/x86/x86_32/entry.S @ 15412:acb7aa72fac7

i386: remove NMI deferral by instead making sure selector registers
are always stored/restored correctly despite the potential for an NMI
(and also MCE, with a subsequent patch) to kick in.

The idea is to always check values read from %ds and %es against
__HYPERVISOR_DS, and only store into the current frame (all normal
handlers) or the outer-most one (NMI and MCE) if the value read is
different. That way, any NMI or MCE occurring during frame setup will
store selectors not saved so far on behalf of the interrupted handler,
with that interrupted handler either having managed to read the guest
selector (in which case it can store it regardless of whether NMI/MCE
kicked in between the read and the store) or finding __HYPERVISOR_DS
already in the register, in which case it'll know not to store (as the
nested handler would have done the store).

For the restore portion this makes use of the fact that there's
exactly one such code sequence, and by moving the selector restore
part past all other restores (including all stack pointer adjustments)
the NMI/MCE handlers can safely detect whether any selector would have
been restored already (by range checking EIP) and move EIP back to the
beginning of the selector restore sequence without having to play with
the stack pointer itself or any other gpr.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jun 21 12:13:06 2007 +0100 (2007-06-21)
parents 33e22185002a
children 3cf5052ba5e5
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_GUEST_REGS(reg) \
65 movl $~(STACK_SIZE-1),reg; \
66 andl %esp,reg; \
67 orl $(STACK_SIZE-CPUINFO_sizeof),reg;
69 #define GET_CURRENT(reg) \
70 movl $STACK_SIZE-4, reg; \
71 orl %esp, reg; \
72 andl $~3,reg; \
73 movl (reg),reg;
75 ALIGN
76 restore_all_guest:
77 ASSERT_INTERRUPTS_DISABLED
78 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
79 popl %ebx
80 popl %ecx
81 popl %edx
82 popl %esi
83 popl %edi
84 popl %ebp
85 popl %eax
86 leal 4(%esp),%esp
87 jnz .Lrestore_iret_guest
88 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
89 testb $2,UREGS_cs-UREGS_eip(%esp)
90 jnz .Lrestore_sregs_guest
91 call restore_ring0_guest
92 jmp .Lrestore_iret_guest
93 #endif
94 .Lrestore_sregs_guest:
95 .Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
96 .Lft2: mov UREGS_es-UREGS_eip(%esp),%es
97 .Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
98 .Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
99 .Lrestore_iret_guest:
100 .Lft5: iret
101 .section .fixup,"ax"
102 .Lfx1: subl $28,%esp
103 pushl 28(%esp) # error_code/entry_vector
104 movl %eax,UREGS_eax+4(%esp)
105 movl %ebp,UREGS_ebp+4(%esp)
106 movl %edi,UREGS_edi+4(%esp)
107 movl %esi,UREGS_esi+4(%esp)
108 movl %edx,UREGS_edx+4(%esp)
109 movl %ecx,UREGS_ecx+4(%esp)
110 movl %ebx,UREGS_ebx+4(%esp)
111 sti
112 popl %esi
113 pushfl # EFLAGS
114 movl $__HYPERVISOR_CS,%eax
115 pushl %eax # CS
116 movl $.Ldf1,%eax
117 pushl %eax # EIP
118 pushl %esi # error_code/entry_vector
119 jmp handle_exception
120 .Ldf1: GET_CURRENT(%ebx)
121 jmp test_all_events
122 failsafe_callback:
123 GET_CURRENT(%ebx)
124 leal VCPU_trap_bounce(%ebx),%edx
125 movl VCPU_failsafe_addr(%ebx),%eax
126 movl %eax,TRAPBOUNCE_eip(%edx)
127 movl VCPU_failsafe_sel(%ebx),%eax
128 movw %ax,TRAPBOUNCE_cs(%edx)
129 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
130 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
131 jnc 1f
132 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
133 1: call create_bounce_frame
134 xorl %eax,%eax
135 movl %eax,UREGS_ds(%esp)
136 movl %eax,UREGS_es(%esp)
137 movl %eax,UREGS_fs(%esp)
138 movl %eax,UREGS_gs(%esp)
139 jmp test_all_events
140 .previous
141 .section __pre_ex_table,"a"
142 .long .Lft1,.Lfx1
143 .long .Lft2,.Lfx1
144 .long .Lft3,.Lfx1
145 .long .Lft4,.Lfx1
146 .long .Lft5,.Lfx1
147 .previous
148 .section __ex_table,"a"
149 .long .Ldf1,failsafe_callback
150 .previous
152 ALIGN
153 restore_all_xen:
154 popl %ebx
155 popl %ecx
156 popl %edx
157 popl %esi
158 popl %edi
159 popl %ebp
160 popl %eax
161 addl $4,%esp
162 iret
164 ALIGN
165 ENTRY(hypercall)
166 subl $4,%esp
167 FIXUP_RING0_GUEST_STACK
168 SAVE_ALL(1f,1f)
169 1: sti
170 GET_CURRENT(%ebx)
171 cmpl $NR_hypercalls,%eax
172 jae bad_hypercall
173 PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
174 #ifndef NDEBUG
175 /* Create shadow parameters and corrupt those not used by this call. */
176 pushl %eax
177 pushl UREGS_eip+4(%esp)
178 pushl 28(%esp) # EBP
179 pushl 28(%esp) # EDI
180 pushl 28(%esp) # ESI
181 pushl 28(%esp) # EDX
182 pushl 28(%esp) # ECX
183 pushl 28(%esp) # EBX
184 movzb hypercall_args_table(,%eax,1),%ecx
185 leal (%esp,%ecx,4),%edi
186 subl $6,%ecx
187 negl %ecx
188 movl %eax,%esi
189 movl $0xDEADBEEF,%eax
190 rep stosl
191 movl %esi,%eax
192 #else
193 /*
194 * We need shadow parameters even on non-debug builds. We depend on the
195 * original versions not being clobbered (needed to create a hypercall
196 * continuation). But that isn't guaranteed by the function-call ABI.
197 */
198 pushl 20(%esp) # EBP
199 pushl 20(%esp) # EDI
200 pushl 20(%esp) # ESI
201 pushl 20(%esp) # EDX
202 pushl 20(%esp) # ECX
203 pushl 20(%esp) # EBX
204 #endif
205 call *hypercall_table(,%eax,4)
206 addl $24,%esp # Discard the shadow parameters
207 #ifndef NDEBUG
208 /* Deliberately corrupt real parameter regs used by this hypercall. */
209 popl %ecx # Shadow EIP
210 cmpl %ecx,UREGS_eip+4(%esp)
211 popl %ecx # Shadow hypercall index
212 jne skip_clobber # If EIP has changed then don't clobber
213 movzb hypercall_args_table(,%ecx,1),%ecx
214 movl %esp,%edi
215 movl %eax,%esi
216 movl $0xDEADBEEF,%eax
217 rep stosl
218 movl %esi,%eax
219 skip_clobber:
220 #endif
221 movl %eax,UREGS_eax(%esp) # save the return value
223 test_all_events:
224 xorl %ecx,%ecx
225 notl %ecx
226 cli # tests must not race interrupts
227 /*test_softirqs:*/
228 movl VCPU_processor(%ebx),%eax
229 shl $IRQSTAT_shift,%eax
230 test %ecx,irq_stat(%eax,1)
231 jnz process_softirqs
232 testb $1,VCPU_nmi_pending(%ebx)
233 jnz process_nmi
234 test_guest_events:
235 movl VCPU_vcpu_info(%ebx),%eax
236 testb $0xFF,VCPUINFO_upcall_mask(%eax)
237 jnz restore_all_guest
238 testb $0xFF,VCPUINFO_upcall_pending(%eax)
239 jz restore_all_guest
240 /*process_guest_events:*/
241 sti
242 leal VCPU_trap_bounce(%ebx),%edx
243 movl VCPU_event_addr(%ebx),%eax
244 movl %eax,TRAPBOUNCE_eip(%edx)
245 movl VCPU_event_sel(%ebx),%eax
246 movw %ax,TRAPBOUNCE_cs(%edx)
247 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
248 call create_bounce_frame
249 jmp test_all_events
251 ALIGN
252 process_softirqs:
253 sti
254 call do_softirq
255 jmp test_all_events
257 ALIGN
258 process_nmi:
259 testb $1,VCPU_nmi_masked(%ebx)
260 jnz test_guest_events
261 movb $0,VCPU_nmi_pending(%ebx)
262 movl VCPU_nmi_addr(%ebx),%eax
263 test %eax,%eax
264 jz test_guest_events
265 movb $1,VCPU_nmi_masked(%ebx)
266 sti
267 leal VCPU_trap_bounce(%ebx),%edx
268 movl %eax,TRAPBOUNCE_eip(%edx)
269 movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
270 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
271 call create_bounce_frame
272 jmp test_all_events
274 bad_hypercall:
275 movl $-ENOSYS,UREGS_eax(%esp)
276 jmp test_all_events
278 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
279 /* {EIP, CS, EFLAGS, [ESP, SS]} */
280 /* %edx == trap_bounce, %ebx == struct vcpu */
281 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
282 create_bounce_frame:
283 ASSERT_INTERRUPTS_ENABLED
284 movl UREGS_eflags+4(%esp),%ecx
285 movb UREGS_cs+4(%esp),%cl
286 testl $(2|X86_EFLAGS_VM),%ecx
287 jz ring1 /* jump if returning to an existing ring-1 activation */
288 movl VCPU_kernel_sp(%ebx),%esi
289 .Lft6: mov VCPU_kernel_ss(%ebx),%gs
290 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
291 jz .Lnvm86_1
292 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
293 movl UREGS_es+4(%esp),%eax
294 .Lft7: movl %eax,%gs:(%esi)
295 movl UREGS_ds+4(%esp),%eax
296 .Lft8: movl %eax,%gs:4(%esi)
297 movl UREGS_fs+4(%esp),%eax
298 .Lft9: movl %eax,%gs:8(%esi)
299 movl UREGS_gs+4(%esp),%eax
300 .Lft10: movl %eax,%gs:12(%esi)
301 .Lnvm86_1:
302 subl $8,%esi /* push SS/ESP (inter-priv iret) */
303 movl UREGS_esp+4(%esp),%eax
304 .Lft11: movl %eax,%gs:(%esi)
305 movl UREGS_ss+4(%esp),%eax
306 .Lft12: movl %eax,%gs:4(%esi)
307 jmp 1f
308 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
309 movl UREGS_esp+4(%esp),%esi
310 .Lft13: mov UREGS_ss+4(%esp),%gs
311 1: /* Construct a stack frame: EFLAGS, CS/EIP */
312 movb TRAPBOUNCE_flags(%edx),%cl
313 subl $12,%esi
314 movl UREGS_eip+4(%esp),%eax
315 .Lft14: movl %eax,%gs:(%esi)
316 movl VCPU_vcpu_info(%ebx),%eax
317 pushl VCPUINFO_upcall_mask(%eax)
318 testb $TBF_INTERRUPT,%cl
319 setnz %ch # TBF_INTERRUPT -> set upcall mask
320 orb %ch,VCPUINFO_upcall_mask(%eax)
321 popl %eax
322 shll $16,%eax # Bits 16-23: saved_upcall_mask
323 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
324 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
325 testw $2,%ax
326 jnz .Lft15
327 and $~3,%ax # RPL 1 -> RPL 0
328 #endif
329 .Lft15: movl %eax,%gs:4(%esi)
330 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
331 setz %ch # %ch == !saved_upcall_mask
332 movl UREGS_eflags+4(%esp),%eax
333 andl $~X86_EFLAGS_IF,%eax
334 shlb $1,%ch # Bit 9 (EFLAGS.IF)
335 orb %ch,%ah # Fold EFLAGS.IF into %eax
336 .Lft16: movl %eax,%gs:8(%esi)
337 test $TBF_EXCEPTION_ERRCODE,%cl
338 jz 1f
339 subl $4,%esi # push error_code onto guest frame
340 movl TRAPBOUNCE_error_code(%edx),%eax
341 .Lft17: movl %eax,%gs:(%esi)
342 1: testb $TBF_FAILSAFE,%cl
343 jz 2f
344 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
345 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
346 jz .Lnvm86_2
347 xorl %eax,%eax # VM86: we write zero selector values
348 .Lft18: movl %eax,%gs:(%esi)
349 .Lft19: movl %eax,%gs:4(%esi)
350 .Lft20: movl %eax,%gs:8(%esi)
351 .Lft21: movl %eax,%gs:12(%esi)
352 jmp 2f
353 .Lnvm86_2:
354 movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
355 .Lft22: movl %eax,%gs:(%esi)
356 movl UREGS_es+4(%esp),%eax
357 .Lft23: movl %eax,%gs:4(%esi)
358 movl UREGS_fs+4(%esp),%eax
359 .Lft24: movl %eax,%gs:8(%esi)
360 movl UREGS_gs+4(%esp),%eax
361 .Lft25: movl %eax,%gs:12(%esi)
362 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
363 jz .Lnvm86_3
364 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
365 movl %eax,UREGS_ds+4(%esp)
366 movl %eax,UREGS_es+4(%esp)
367 movl %eax,UREGS_fs+4(%esp)
368 movl %eax,UREGS_gs+4(%esp)
369 .Lnvm86_3:
370 /* Rewrite our stack frame and return to ring 1. */
371 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
372 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
373 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
374 mov %gs,UREGS_ss+4(%esp)
375 movl %esi,UREGS_esp+4(%esp)
376 movzwl TRAPBOUNCE_cs(%edx),%eax
377 /* Null selectors (0-3) are not allowed. */
378 testl $~3,%eax
379 jz domain_crash_synchronous
380 movl %eax,UREGS_cs+4(%esp)
381 movl TRAPBOUNCE_eip(%edx),%eax
382 movl %eax,UREGS_eip+4(%esp)
383 ret
384 .section __ex_table,"a"
385 .long .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
386 .long .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
387 .long .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
388 .long .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
389 .long .Lft14,domain_crash_synchronous , .Lft15,domain_crash_synchronous
390 .long .Lft16,domain_crash_synchronous , .Lft17,domain_crash_synchronous
391 .long .Lft18,domain_crash_synchronous , .Lft19,domain_crash_synchronous
392 .long .Lft20,domain_crash_synchronous , .Lft21,domain_crash_synchronous
393 .long .Lft22,domain_crash_synchronous , .Lft23,domain_crash_synchronous
394 .long .Lft24,domain_crash_synchronous , .Lft25,domain_crash_synchronous
395 .previous
397 domain_crash_synchronous_string:
398 .asciz "domain_crash_sync called from entry.S (%lx)\n"
400 domain_crash_synchronous:
401 pushl $domain_crash_synchronous_string
402 call printk
403 jmp __domain_crash_synchronous
405 ALIGN
406 ENTRY(ret_from_intr)
407 GET_CURRENT(%ebx)
408 movl UREGS_eflags(%esp),%eax
409 movb UREGS_cs(%esp),%al
410 testl $(3|X86_EFLAGS_VM),%eax
411 jnz test_all_events
412 jmp restore_all_xen
414 ENTRY(divide_error)
415 pushl $TRAP_divide_error<<16
416 ALIGN
417 handle_exception:
418 FIXUP_RING0_GUEST_STACK
419 SAVE_ALL(1f,2f)
420 .text 1
421 /* Exception within Xen: make sure we have valid %ds,%es. */
422 1: mov %ecx,%ds
423 mov %ecx,%es
424 jmp 2f
425 .previous
426 2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
427 jz exception_with_ints_disabled
428 sti # re-enable interrupts
429 1: xorl %eax,%eax
430 movw UREGS_entry_vector(%esp),%ax
431 movl %esp,%edx
432 pushl %edx # push the cpu_user_regs pointer
433 GET_CURRENT(%ebx)
434 PERFC_INCR(PERFC_exceptions, %eax, %ebx)
435 call *exception_table(,%eax,4)
436 addl $4,%esp
437 movl UREGS_eflags(%esp),%eax
438 movb UREGS_cs(%esp),%al
439 testl $(3|X86_EFLAGS_VM),%eax
440 jz restore_all_xen
441 leal VCPU_trap_bounce(%ebx),%edx
442 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
443 jz test_all_events
444 call create_bounce_frame
445 movb $0,TRAPBOUNCE_flags(%edx)
446 jmp test_all_events
448 exception_with_ints_disabled:
449 movl UREGS_eflags(%esp),%eax
450 movb UREGS_cs(%esp),%al
451 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
452 jnz FATAL_exception_with_ints_disabled
453 pushl %esp
454 call search_pre_exception_table
455 addl $4,%esp
456 testl %eax,%eax # no fixup code for faulting EIP?
457 jz 1b
458 movl %eax,UREGS_eip(%esp)
459 movl %esp,%esi
460 subl $4,%esp
461 movl %esp,%edi
462 movl $UREGS_kernel_sizeof/4,%ecx
463 rep; movsl # make room for error_code/entry_vector
464 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
465 movl %eax,UREGS_kernel_sizeof(%esp)
466 jmp restore_all_xen # return to fixup code
468 FATAL_exception_with_ints_disabled:
469 xorl %esi,%esi
470 movw UREGS_entry_vector(%esp),%si
471 movl %esp,%edx
472 pushl %edx # push the cpu_user_regs pointer
473 pushl %esi # push the trapnr (entry vector)
474 call fatal_trap
475 ud2
477 ENTRY(coprocessor_error)
478 pushl $TRAP_copro_error<<16
479 jmp handle_exception
481 ENTRY(simd_coprocessor_error)
482 pushl $TRAP_simd_error<<16
483 jmp handle_exception
485 ENTRY(device_not_available)
486 pushl $TRAP_no_device<<16
487 jmp handle_exception
489 ENTRY(debug)
490 pushl $TRAP_debug<<16
491 jmp handle_exception
493 ENTRY(int3)
494 pushl $TRAP_int3<<16
495 jmp handle_exception
497 ENTRY(overflow)
498 pushl $TRAP_overflow<<16
499 jmp handle_exception
501 ENTRY(bounds)
502 pushl $TRAP_bounds<<16
503 jmp handle_exception
505 ENTRY(invalid_op)
506 pushl $TRAP_invalid_op<<16
507 jmp handle_exception
509 ENTRY(coprocessor_segment_overrun)
510 pushl $TRAP_copro_seg<<16
511 jmp handle_exception
513 ENTRY(invalid_TSS)
514 movw $TRAP_invalid_tss,2(%esp)
515 jmp handle_exception
517 ENTRY(segment_not_present)
518 movw $TRAP_no_segment,2(%esp)
519 jmp handle_exception
521 ENTRY(stack_segment)
522 movw $TRAP_stack_error,2(%esp)
523 jmp handle_exception
525 ENTRY(general_protection)
526 movw $TRAP_gp_fault,2(%esp)
527 jmp handle_exception
529 ENTRY(alignment_check)
530 movw $TRAP_alignment_check,2(%esp)
531 jmp handle_exception
533 ENTRY(page_fault)
534 movw $TRAP_page_fault,2(%esp)
535 jmp handle_exception
537 ENTRY(machine_check)
538 pushl $TRAP_machine_check<<16
539 jmp handle_exception
541 ENTRY(spurious_interrupt_bug)
542 pushl $TRAP_spurious_int<<16
543 jmp handle_exception
545 ENTRY(early_page_fault)
546 SAVE_ALL(1f,1f)
547 1: movl %esp,%eax
548 pushl %eax
549 call do_early_page_fault
550 addl $4,%esp
551 jmp restore_all_xen
553 ENTRY(nmi)
554 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
555 # NMI entry protocol is incompatible with guest kernel in ring 0.
556 iret
557 #else
558 # Save state but do not trash the segment registers!
559 pushl $TRAP_nmi<<16
560 SAVE_ALL(.Lnmi_xen,.Lnmi_common)
561 .Lnmi_common:
562 movl %esp,%eax
563 pushl %eax
564 call do_nmi
565 addl $4,%esp
566 /*
567 * NB. We may return to Xen context with polluted %ds/%es. But in such
568 * cases we have put guest DS/ES on the guest stack frame, which will
569 * be detected by SAVE_ALL(), or we have rolled back restore_guest.
570 */
571 jmp ret_from_intr
572 .Lnmi_xen:
573 /* Check the outer (guest) context for %ds/%es state validity. */
574 GET_GUEST_REGS(%ebx)
575 testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
576 mov %ds,%eax
577 mov %es,%edx
578 jnz .Lnmi_vm86
579 /* We may have interrupted Xen while messing with %ds/%es... */
580 cmpw %ax,%cx
581 mov %ecx,%ds /* Ensure %ds is valid */
582 cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
583 cmpw %dx,%cx
584 movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
585 cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
586 mov %ecx,%es /* Ensure %es is valid */
587 movl $.Lrestore_sregs_guest,%ecx
588 movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
589 cmpl %ecx,UREGS_eip(%esp)
590 jbe .Lnmi_common
591 cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
592 ja .Lnmi_common
593 /* Roll outer context restore_guest back to restoring %ds/%es. */
594 movl %ecx,UREGS_eip(%esp)
595 jmp .Lnmi_common
596 .Lnmi_vm86:
597 /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
598 mov %ecx,%ds
599 mov %ecx,%es
600 jmp .Lnmi_common
601 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
603 ENTRY(setup_vm86_frame)
604 mov %ecx,%ds
605 mov %ecx,%es
606 # Copies the entire stack frame forwards by 16 bytes.
607 .macro copy_vm86_words count=18
608 .if \count
609 pushl ((\count-1)*4)(%esp)
610 popl ((\count-1)*4)+16(%esp)
611 copy_vm86_words "(\count-1)"
612 .endif
613 .endm
614 copy_vm86_words
615 addl $16,%esp
616 ret
618 .data
620 ENTRY(exception_table)
621 .long do_divide_error
622 .long do_debug
623 .long 0 # nmi
624 .long do_int3
625 .long do_overflow
626 .long do_bounds
627 .long do_invalid_op
628 .long math_state_restore
629 .long 0 # double fault
630 .long do_coprocessor_segment_overrun
631 .long do_invalid_TSS
632 .long do_segment_not_present
633 .long do_stack_segment
634 .long do_general_protection
635 .long do_page_fault
636 .long do_spurious_interrupt_bug
637 .long do_coprocessor_error
638 .long do_alignment_check
639 .long do_machine_check
640 .long do_simd_coprocessor_error
642 ENTRY(hypercall_table)
643 .long do_set_trap_table /* 0 */
644 .long do_mmu_update
645 .long do_set_gdt
646 .long do_stack_switch
647 .long do_set_callbacks
648 .long do_fpu_taskswitch /* 5 */
649 .long do_sched_op_compat
650 .long do_platform_op
651 .long do_set_debugreg
652 .long do_get_debugreg
653 .long do_update_descriptor /* 10 */
654 .long do_ni_hypercall
655 .long do_memory_op
656 .long do_multicall
657 .long do_update_va_mapping
658 .long do_set_timer_op /* 15 */
659 .long do_event_channel_op_compat
660 .long do_xen_version
661 .long do_console_io
662 .long do_physdev_op_compat
663 .long do_grant_table_op /* 20 */
664 .long do_vm_assist
665 .long do_update_va_mapping_otherdomain
666 .long do_iret
667 .long do_vcpu_op
668 .long do_ni_hypercall /* 25 */
669 .long do_mmuext_op
670 .long do_acm_op
671 .long do_nmi_op
672 .long do_sched_op
673 .long do_callback_op /* 30 */
674 .long do_xenoprof_op
675 .long do_event_channel_op
676 .long do_physdev_op
677 .long do_hvm_op
678 .long do_sysctl /* 35 */
679 .long do_domctl
680 .long do_kexec_op
681 .rept NR_hypercalls-((.-hypercall_table)/4)
682 .long do_ni_hypercall
683 .endr
685 ENTRY(hypercall_args_table)
686 .byte 1 /* do_set_trap_table */ /* 0 */
687 .byte 4 /* do_mmu_update */
688 .byte 2 /* do_set_gdt */
689 .byte 2 /* do_stack_switch */
690 .byte 4 /* do_set_callbacks */
691 .byte 1 /* do_fpu_taskswitch */ /* 5 */
692 .byte 2 /* do_sched_op_compat */
693 .byte 1 /* do_platform_op */
694 .byte 2 /* do_set_debugreg */
695 .byte 1 /* do_get_debugreg */
696 .byte 4 /* do_update_descriptor */ /* 10 */
697 .byte 0 /* do_ni_hypercall */
698 .byte 2 /* do_memory_op */
699 .byte 2 /* do_multicall */
700 .byte 4 /* do_update_va_mapping */
701 .byte 2 /* do_set_timer_op */ /* 15 */
702 .byte 1 /* do_event_channel_op_compat */
703 .byte 2 /* do_xen_version */
704 .byte 3 /* do_console_io */
705 .byte 1 /* do_physdev_op_compat */
706 .byte 3 /* do_grant_table_op */ /* 20 */
707 .byte 2 /* do_vm_assist */
708 .byte 5 /* do_update_va_mapping_otherdomain */
709 .byte 0 /* do_iret */
710 .byte 3 /* do_vcpu_op */
711 .byte 0 /* do_ni_hypercall */ /* 25 */
712 .byte 4 /* do_mmuext_op */
713 .byte 1 /* do_acm_op */
714 .byte 2 /* do_nmi_op */
715 .byte 2 /* do_sched_op */
716 .byte 2 /* do_callback_op */ /* 30 */
717 .byte 2 /* do_xenoprof_op */
718 .byte 2 /* do_event_channel_op */
719 .byte 2 /* do_physdev_op */
720 .byte 2 /* do_hvm_op */
721 .byte 1 /* do_sysctl */ /* 35 */
722 .byte 1 /* do_domctl */
723 .byte 2 /* do_kexec_op */
724 .rept NR_hypercalls-(.-hypercall_args_table)
725 .byte 0 /* do_ni_hypercall */
726 .endr