direct-io.hg

view linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S @ 7540:b5903c9aeda5

Fix floating-point corruption (a nasty race in fp task-switch
exception handling).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 30 10:45:49 2005 +0100 (2005-10-30)
parents 06d84bf87159
children 5823dbfbb4cd
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
43 #include <linux/config.h>
44 #include <linux/linkage.h>
45 #include <asm/thread_info.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
48 #include <asm/smp.h>
49 #include <asm/page.h>
50 #include <asm/desc.h>
51 #include "irq_vectors.h"
52 #include <asm-xen/xen-public/xen.h>
54 #define nr_syscalls ((syscall_table_size)/4)
56 EBX = 0x00
57 ECX = 0x04
58 EDX = 0x08
59 ESI = 0x0C
60 EDI = 0x10
61 EBP = 0x14
62 EAX = 0x18
63 DS = 0x1C
64 ES = 0x20
65 ORIG_EAX = 0x24
66 EIP = 0x28
67 CS = 0x2C
68 EVENT_MASK = 0x2E
69 EFLAGS = 0x30
70 OLDESP = 0x34
71 OLDSS = 0x38
73 CF_MASK = 0x00000001
74 TF_MASK = 0x00000100
75 IF_MASK = 0x00000200
76 DF_MASK = 0x00000400
77 NT_MASK = 0x00004000
78 VM_MASK = 0x00020000
80 /* Offsets into shared_info_t. */
81 #define evtchn_upcall_pending /* 0 */
82 #define evtchn_upcall_mask 1
84 #define sizeof_vcpu_shift 3
86 #ifdef CONFIG_SMP
87 #define preempt_disable(reg) incl TI_preempt_count(reg)
88 #define preempt_enable(reg) decl TI_preempt_count(reg)
89 #define XEN_GET_VCPU_INFO(reg) preempt_disable(%ebp) ; \
90 movl TI_cpu(%ebp),reg ; \
91 shl $sizeof_vcpu_shift,reg ; \
92 addl HYPERVISOR_shared_info,reg
93 #define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
94 #define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
95 #else
96 #define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
97 #define XEN_PUT_VCPU_INFO(reg)
98 #define XEN_PUT_VCPU_INFO_fixup
99 #endif
101 #define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
102 #define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
103 #define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
104 XEN_LOCKED_BLOCK_EVENTS(reg) ; \
105 XEN_PUT_VCPU_INFO(reg)
106 #define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
107 XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
108 XEN_PUT_VCPU_INFO(reg)
109 #define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
111 #ifdef CONFIG_PREEMPT
112 #define preempt_stop GET_THREAD_INFO(%ebp) ; \
113 XEN_BLOCK_EVENTS(%esi)
114 #else
115 #define preempt_stop
116 #define resume_kernel restore_nocheck
117 #endif
119 #define SAVE_ALL \
120 cld; \
121 pushl %es; \
122 pushl %ds; \
123 pushl %eax; \
124 pushl %ebp; \
125 pushl %edi; \
126 pushl %esi; \
127 pushl %edx; \
128 pushl %ecx; \
129 pushl %ebx; \
130 movl $(__USER_DS), %edx; \
131 movl %edx, %ds; \
132 movl %edx, %es;
134 #define RESTORE_INT_REGS \
135 popl %ebx; \
136 popl %ecx; \
137 popl %edx; \
138 popl %esi; \
139 popl %edi; \
140 popl %ebp; \
141 popl %eax
143 #define RESTORE_REGS \
144 RESTORE_INT_REGS; \
145 1: popl %ds; \
146 2: popl %es; \
147 .section .fixup,"ax"; \
148 3: movl $0,(%esp); \
149 jmp 1b; \
150 4: movl $0,(%esp); \
151 jmp 2b; \
152 .previous; \
153 .section __ex_table,"a";\
154 .align 4; \
155 .long 1b,3b; \
156 .long 2b,4b; \
157 .previous
160 #define RESTORE_ALL \
161 RESTORE_REGS \
162 addl $4, %esp; \
163 1: iret; \
164 .section .fixup,"ax"; \
165 2: pushl $0; \
166 pushl $do_iret_error; \
167 jmp error_code; \
168 .previous; \
169 .section __ex_table,"a";\
170 .align 4; \
171 .long 1b,2b; \
172 .previous
175 ENTRY(ret_from_fork)
176 pushl %eax
177 call schedule_tail
178 GET_THREAD_INFO(%ebp)
179 popl %eax
180 jmp syscall_exit
182 /*
183 * Return to user mode is not as complex as all this looks,
184 * but we want the default path for a system call return to
185 * go as quickly as possible which is why some of this is
186 * less clear than it otherwise should be.
187 */
189 # userspace resumption stub bypassing syscall exit tracing
190 ALIGN
191 ret_from_exception:
192 preempt_stop
193 ret_from_intr:
194 GET_THREAD_INFO(%ebp)
195 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
196 movb CS(%esp), %al
197 testl $(VM_MASK | 2), %eax
198 jz resume_kernel
199 ENTRY(resume_userspace)
200 XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
201 # setting need_resched or sigpending
202 # between sampling and the iret
203 movl TI_flags(%ebp), %ecx
204 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
205 # int/exception return?
206 jne work_pending
207 jmp restore_all
209 #ifdef CONFIG_PREEMPT
210 ENTRY(resume_kernel)
211 XEN_BLOCK_EVENTS(%esi)
212 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
213 jnz restore_nocheck
214 need_resched:
215 movl TI_flags(%ebp), %ecx # need_resched set ?
216 testb $_TIF_NEED_RESCHED, %cl
217 jz restore_all
218 testb $0xFF,EVENT_MASK(%esp) # interrupts off (exception path) ?
219 jnz restore_all
220 call preempt_schedule_irq
221 jmp need_resched
222 #endif
224 /* SYSENTER_RETURN points to after the "sysenter" instruction in
225 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
227 # sysenter call handler stub
228 ENTRY(sysenter_entry)
229 movl TSS_sysenter_esp0(%esp),%esp
230 sysenter_past_esp:
231 sti
232 pushl $(__USER_DS)
233 pushl %ebp
234 pushfl
235 pushl $(__USER_CS)
236 pushl $SYSENTER_RETURN
238 /*
239 * Load the potential sixth argument from user stack.
240 * Careful about security.
241 */
242 cmpl $__PAGE_OFFSET-3,%ebp
243 jae syscall_fault
244 1: movl (%ebp),%ebp
245 .section __ex_table,"a"
246 .align 4
247 .long 1b,syscall_fault
248 .previous
250 pushl %eax
251 SAVE_ALL
252 GET_THREAD_INFO(%ebp)
254 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
255 testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
256 jnz syscall_trace_entry
257 cmpl $(nr_syscalls), %eax
258 jae syscall_badsys
259 call *sys_call_table(,%eax,4)
260 movl %eax,EAX(%esp)
261 cli
262 movl TI_flags(%ebp), %ecx
263 testw $_TIF_ALLWORK_MASK, %cx
264 jne syscall_exit_work
265 /* if something modifies registers it must also disable sysexit */
266 movl EIP(%esp), %edx
267 movl OLDESP(%esp), %ecx
268 xorl %ebp,%ebp
269 sti
270 sysexit
273 # system call handler stub
274 ENTRY(system_call)
275 pushl %eax # save orig_eax
276 SAVE_ALL
277 GET_THREAD_INFO(%ebp)
278 # system call tracing in operation
279 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
280 testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
281 jnz syscall_trace_entry
282 cmpl $(nr_syscalls), %eax
283 jae syscall_badsys
284 syscall_call:
285 call *sys_call_table(,%eax,4)
286 movl %eax,EAX(%esp) # store the return value
287 syscall_exit:
288 XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
289 # setting need_resched or sigpending
290 # between sampling and the iret
291 movl TI_flags(%ebp), %ecx
292 testw $_TIF_ALLWORK_MASK, %cx # current->work
293 jne syscall_exit_work
295 restore_all:
296 #if 0 /* XEN */
297 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
298 # Warning: OLDSS(%esp) contains the wrong/random values if we
299 # are returning to the kernel.
300 # See comments in process.c:copy_thread() for details.
301 movb OLDSS(%esp), %ah
302 movb CS(%esp), %al
303 andl $(VM_MASK | (4 << 8) | 3), %eax
304 cmpl $((4 << 8) | 3), %eax
305 je ldt_ss # returning to user-space with LDT SS
306 #endif /* XEN */
307 restore_nocheck:
308 testl $VM_MASK, EFLAGS(%esp)
309 jnz resume_vm86
310 movb EVENT_MASK(%esp), %al
311 notb %al # %al == ~saved_mask
312 XEN_GET_VCPU_INFO(%esi)
313 andb evtchn_upcall_mask(%esi),%al
314 andb $1,%al # %al == mask & ~saved_mask
315 jnz restore_all_enable_events # != 0 => reenable event delivery
316 XEN_PUT_VCPU_INFO(%esi)
317 RESTORE_REGS
318 addl $4, %esp
319 1: iret
320 .section .fixup,"ax"
321 iret_exc:
322 pushl $0 # no error code
323 pushl $do_iret_error
324 jmp error_code
325 .previous
326 .section __ex_table,"a"
327 .align 4
328 .long 1b,iret_exc
329 .previous
331 resume_vm86:
332 XEN_UNBLOCK_EVENTS(%esi)
333 RESTORE_REGS
334 movl %eax,(%esp)
335 movl $__HYPERVISOR_switch_vm86,%eax
336 int $0x82
337 ud2
339 #if 0 /* XEN */
340 ldt_ss:
341 larl OLDSS(%esp), %eax
342 jnz restore_nocheck
343 testl $0x00400000, %eax # returning to 32bit stack?
344 jnz restore_nocheck # allright, normal return
345 /* If returning to userspace with 16bit stack,
346 * try to fix the higher word of ESP, as the CPU
347 * won't restore it.
348 * This is an "official" bug of all the x86-compatible
349 * CPUs, which we can try to work around to make
350 * dosemu and wine happy. */
351 subl $8, %esp # reserve space for switch16 pointer
352 cli
353 movl %esp, %eax
354 /* Set up the 16bit stack frame with switch32 pointer on top,
355 * and a switch16 pointer on top of the current frame. */
356 call setup_x86_bogus_stack
357 RESTORE_REGS
358 lss 20+4(%esp), %esp # switch to 16bit stack
359 1: iret
360 .section __ex_table,"a"
361 .align 4
362 .long 1b,iret_exc
363 .previous
364 #endif /* XEN */
366 # perform work that needs to be done immediately before resumption
367 ALIGN
368 work_pending:
369 testb $_TIF_NEED_RESCHED, %cl
370 jz work_notifysig
371 work_resched:
372 call schedule
373 XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
374 # setting need_resched or sigpending
375 # between sampling and the iret
376 movl TI_flags(%ebp), %ecx
377 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
378 # than syscall tracing?
379 jz restore_all
380 testb $_TIF_NEED_RESCHED, %cl
381 jnz work_resched
383 work_notifysig: # deal with pending signals and
384 # notify-resume requests
385 testl $VM_MASK, EFLAGS(%esp)
386 movl %esp, %eax
387 jne work_notifysig_v86 # returning to kernel-space or
388 # vm86-space
389 xorl %edx, %edx
390 call do_notify_resume
391 jmp restore_all
393 ALIGN
394 work_notifysig_v86:
395 pushl %ecx # save ti_flags for do_notify_resume
396 call save_v86_state # %eax contains pt_regs pointer
397 popl %ecx
398 movl %eax, %esp
399 xorl %edx, %edx
400 call do_notify_resume
401 jmp restore_all
403 # perform syscall exit tracing
404 ALIGN
405 syscall_trace_entry:
406 movl $-ENOSYS,EAX(%esp)
407 movl %esp, %eax
408 xorl %edx,%edx
409 call do_syscall_trace
410 movl ORIG_EAX(%esp), %eax
411 cmpl $(nr_syscalls), %eax
412 jnae syscall_call
413 jmp syscall_exit
415 # perform syscall exit tracing
416 ALIGN
417 syscall_exit_work:
418 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
419 jz work_pending
420 XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call
421 # schedule() instead
422 movl %esp, %eax
423 movl $1, %edx
424 call do_syscall_trace
425 jmp resume_userspace
427 ALIGN
428 syscall_fault:
429 pushl %eax # save orig_eax
430 SAVE_ALL
431 GET_THREAD_INFO(%ebp)
432 movl $-EFAULT,EAX(%esp)
433 jmp resume_userspace
435 ALIGN
436 syscall_badsys:
437 movl $-ENOSYS,EAX(%esp)
438 jmp resume_userspace
440 #if 0 /* XEN */
441 #define FIXUP_ESPFIX_STACK \
442 movl %esp, %eax; \
443 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
444 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
445 /* copy data from 16bit stack to 32bit stack */ \
446 call fixup_x86_bogus_stack; \
447 /* put ESP to the proper location */ \
448 movl %eax, %esp;
449 #define UNWIND_ESPFIX_STACK \
450 pushl %eax; \
451 movl %ss, %eax; \
452 /* see if on 16bit stack */ \
453 cmpw $__ESPFIX_SS, %ax; \
454 jne 28f; \
455 movl $__KERNEL_DS, %edx; \
456 movl %edx, %ds; \
457 movl %edx, %es; \
458 /* switch to 32bit stack */ \
459 FIXUP_ESPFIX_STACK \
460 28: popl %eax;
462 /*
463 * Build the entry stubs and pointer table with
464 * some assembler magic.
465 */
466 .data
467 ENTRY(interrupt)
468 .text
470 vector=0
471 ENTRY(irq_entries_start)
472 .rept NR_IRQS
473 ALIGN
474 1: pushl $vector-256
475 jmp common_interrupt
476 .data
477 .long 1b
478 .text
479 vector=vector+1
480 .endr
482 ALIGN
483 common_interrupt:
484 SAVE_ALL
485 movl %esp,%eax
486 call do_IRQ
487 jmp ret_from_intr
489 #define BUILD_INTERRUPT(name, nr) \
490 ENTRY(name) \
491 pushl $nr-256; \
492 SAVE_ALL \
493 movl %esp,%eax; \
494 call smp_/**/name; \
495 jmp ret_from_intr;
497 /* The include is where all of the SMP etc. interrupts come from */
498 #include "entry_arch.h"
499 #endif /* XEN */
501 ENTRY(divide_error)
502 pushl $0 # no error code
503 pushl $do_divide_error
504 ALIGN
505 error_code:
506 pushl %ds
507 pushl %eax
508 xorl %eax, %eax
509 pushl %ebp
510 pushl %edi
511 pushl %esi
512 pushl %edx
513 decl %eax # eax = -1
514 pushl %ecx
515 pushl %ebx
516 cld
517 pushl %es
518 # UNWIND_ESPFIX_STACK
519 popl %ecx
520 movl ES(%esp), %edi # get the function address
521 movl ORIG_EAX(%esp), %edx # get the error code
522 movl %eax, ORIG_EAX(%esp)
523 movl %ecx, ES(%esp)
524 movl $(__USER_DS), %ecx
525 movl %ecx, %ds
526 movl %ecx, %es
527 movl %esp,%eax # pt_regs pointer
528 call *%edi
529 jmp ret_from_exception
531 # A note on the "critical region" in our callback handler.
532 # We want to avoid stacking callback handlers due to events occurring
533 # during handling of the last event. To do this, we keep events disabled
534 # until we've done all processing. HOWEVER, we must enable events before
535 # popping the stack frame (can't be done atomically) and so it would still
536 # be possible to get enough handler activations to overflow the stack.
537 # Although unlikely, bugs of that kind are hard to track down, so we'd
538 # like to avoid the possibility.
539 # So, on entry to the handler we detect whether we interrupted an
540 # existing activation in its critical region -- if so, we pop the current
541 # activation and restart the handler using the previous one.
542 ENTRY(hypervisor_callback)
543 pushl %eax
544 SAVE_ALL
545 movl EIP(%esp),%eax
546 cmpl $scrit,%eax
547 jb 11f
548 cmpl $ecrit,%eax
549 jb critical_region_fixup
550 11: push %esp
551 call evtchn_do_upcall
552 add $4,%esp
553 jmp ret_from_intr
555 ALIGN
556 restore_all_enable_events:
557 XEN_LOCKED_UNBLOCK_EVENTS(%esi)
558 scrit: /**** START OF CRITICAL REGION ****/
559 XEN_TEST_PENDING(%esi)
560 jnz 14f # process more events if necessary...
561 XEN_PUT_VCPU_INFO(%esi)
562 RESTORE_ALL
563 14: XEN_LOCKED_BLOCK_EVENTS(%esi)
564 XEN_PUT_VCPU_INFO(%esi)
565 jmp 11b
566 ecrit: /**** END OF CRITICAL REGION ****/
567 # [How we do the fixup]. We want to merge the current stack frame with the
568 # just-interrupted frame. How we do this depends on where in the critical
569 # region the interrupted handler was executing, and so how many saved
570 # registers are in each frame. We do this quickly using the lookup table
571 # 'critical_fixup_table'. For each byte offset in the critical region, it
572 # provides the number of bytes which have already been popped from the
573 # interrupted stack frame.
574 critical_region_fixup:
575 addl $critical_fixup_table-scrit,%eax
576 movzbl (%eax),%eax # %eax contains num bytes popped
577 cmpb $0xff,%al # 0xff => vcpu_info critical region
578 jne 15f
579 GET_THREAD_INFO(%ebp)
580 XEN_PUT_VCPU_INFO(%esi) # abort vcpu_info critical region
581 xorl %eax,%eax
582 15: mov %esp,%esi
583 add %eax,%esi # %esi points at end of src region
584 mov %esp,%edi
585 add $0x34,%edi # %edi points at end of dst region
586 mov %eax,%ecx
587 shr $2,%ecx # convert words to bytes
588 je 17f # skip loop if nothing to copy
589 16: subl $4,%esi # pre-decrementing copy loop
590 subl $4,%edi
591 movl (%esi),%eax
592 movl %eax,(%edi)
593 loop 16b
594 17: movl %edi,%esp # final %edi is top of merged stack
595 jmp 11b
597 critical_fixup_table:
598 .byte 0xff,0xff,0xff # testb $0xff,(%esi) = XEN_TEST_PENDING
599 .byte 0xff,0xff # jnz 14f
600 XEN_PUT_VCPU_INFO_fixup
601 .byte 0x00 # pop %ebx
602 .byte 0x04 # pop %ecx
603 .byte 0x08 # pop %edx
604 .byte 0x0c # pop %esi
605 .byte 0x10 # pop %edi
606 .byte 0x14 # pop %ebp
607 .byte 0x18 # pop %eax
608 .byte 0x1c # pop %ds
609 .byte 0x20 # pop %es
610 .byte 0x24,0x24,0x24 # add $4,%esp
611 .byte 0x28 # iret
612 .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
613 XEN_PUT_VCPU_INFO_fixup
614 .byte 0x00,0x00 # jmp 11b
616 # Hypervisor uses this for application faults while it executes.
617 ENTRY(failsafe_callback)
618 1: popl %ds
619 2: popl %es
620 3: popl %fs
621 4: popl %gs
622 subl $4,%esp
623 SAVE_ALL
624 jmp ret_from_exception
625 .section .fixup,"ax"; \
626 6: movl $0,(%esp); \
627 jmp 1b; \
628 7: movl $0,(%esp); \
629 jmp 2b; \
630 8: movl $0,(%esp); \
631 jmp 3b; \
632 9: movl $0,(%esp); \
633 jmp 4b; \
634 .previous; \
635 .section __ex_table,"a";\
636 .align 4; \
637 .long 1b,6b; \
638 .long 2b,7b; \
639 .long 3b,8b; \
640 .long 4b,9b; \
641 .previous
643 ENTRY(coprocessor_error)
644 pushl $0
645 pushl $do_coprocessor_error
646 jmp error_code
648 ENTRY(simd_coprocessor_error)
649 pushl $0
650 pushl $do_simd_coprocessor_error
651 jmp error_code
653 ENTRY(device_not_available)
654 pushl $-1 # mark this as an int
655 SAVE_ALL
656 #preempt_stop /* This is already an interrupt gate on Xen. */
657 call math_state_restore
658 jmp ret_from_exception
660 /*
661 * Debug traps and NMI can happen at the one SYSENTER instruction
662 * that sets up the real kernel stack. Check here, since we can't
663 * allow the wrong stack to be used.
664 *
665 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
666 * already pushed 3 words if it hits on the sysenter instruction:
667 * eflags, cs and eip.
668 *
669 * We just load the right stack, and push the three (known) values
670 * by hand onto the new stack - while updating the return eip past
671 * the instruction that would have done it for sysenter.
672 */
673 #define FIX_STACK(offset, ok, label) \
674 cmpw $__KERNEL_CS,4(%esp); \
675 jne ok; \
676 label: \
677 movl TSS_sysenter_esp0+offset(%esp),%esp; \
678 pushfl; \
679 pushl $__KERNEL_CS; \
680 pushl $sysenter_past_esp
682 ENTRY(debug)
683 cmpl $sysenter_entry,(%esp)
684 jne debug_stack_correct
685 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
686 debug_stack_correct:
687 pushl $-1 # mark this as an int
688 SAVE_ALL
689 xorl %edx,%edx # error code 0
690 movl %esp,%eax # pt_regs pointer
691 call do_debug
692 jmp ret_from_exception
694 #if 0 /* XEN */
695 /*
696 * NMI is doubly nasty. It can happen _while_ we're handling
697 * a debug fault, and the debug fault hasn't yet been able to
698 * clear up the stack. So we first check whether we got an
699 * NMI on the sysenter entry path, but after that we need to
700 * check whether we got an NMI on the debug path where the debug
701 * fault happened on the sysenter path.
702 */
703 ENTRY(nmi)
704 pushl %eax
705 movl %ss, %eax
706 cmpw $__ESPFIX_SS, %ax
707 popl %eax
708 je nmi_16bit_stack
709 cmpl $sysenter_entry,(%esp)
710 je nmi_stack_fixup
711 pushl %eax
712 movl %esp,%eax
713 /* Do not access memory above the end of our stack page,
714 * it might not exist.
715 */
716 andl $(THREAD_SIZE-1),%eax
717 cmpl $(THREAD_SIZE-20),%eax
718 popl %eax
719 jae nmi_stack_correct
720 cmpl $sysenter_entry,12(%esp)
721 je nmi_debug_stack_check
722 nmi_stack_correct:
723 pushl %eax
724 SAVE_ALL
725 xorl %edx,%edx # zero error code
726 movl %esp,%eax # pt_regs pointer
727 call do_nmi
728 jmp restore_all
730 nmi_stack_fixup:
731 FIX_STACK(12,nmi_stack_correct, 1)
732 jmp nmi_stack_correct
733 nmi_debug_stack_check:
734 cmpw $__KERNEL_CS,16(%esp)
735 jne nmi_stack_correct
736 cmpl $debug - 1,(%esp)
737 jle nmi_stack_correct
738 cmpl $debug_esp_fix_insn,(%esp)
739 jle nmi_debug_stack_fixup
740 nmi_debug_stack_fixup:
741 FIX_STACK(24,nmi_stack_correct, 1)
742 jmp nmi_stack_correct
744 nmi_16bit_stack:
745 /* create the pointer to lss back */
746 pushl %ss
747 pushl %esp
748 movzwl %sp, %esp
749 addw $4, (%esp)
750 /* copy the iret frame of 12 bytes */
751 .rept 3
752 pushl 16(%esp)
753 .endr
754 pushl %eax
755 SAVE_ALL
756 FIXUP_ESPFIX_STACK # %eax == %esp
757 xorl %edx,%edx # zero error code
758 call do_nmi
759 RESTORE_REGS
760 lss 12+4(%esp), %esp # back to 16bit stack
761 1: iret
762 .section __ex_table,"a"
763 .align 4
764 .long 1b,iret_exc
765 .previous
766 #endif /* XEN */
768 ENTRY(int3)
769 pushl $-1 # mark this as an int
770 SAVE_ALL
771 xorl %edx,%edx # zero error code
772 movl %esp,%eax # pt_regs pointer
773 call do_int3
774 jmp ret_from_exception
776 ENTRY(overflow)
777 pushl $0
778 pushl $do_overflow
779 jmp error_code
781 ENTRY(bounds)
782 pushl $0
783 pushl $do_bounds
784 jmp error_code
786 ENTRY(invalid_op)
787 pushl $0
788 pushl $do_invalid_op
789 jmp error_code
791 ENTRY(coprocessor_segment_overrun)
792 pushl $0
793 pushl $do_coprocessor_segment_overrun
794 jmp error_code
796 ENTRY(invalid_TSS)
797 pushl $do_invalid_TSS
798 jmp error_code
800 ENTRY(segment_not_present)
801 pushl $do_segment_not_present
802 jmp error_code
804 ENTRY(stack_segment)
805 pushl $do_stack_segment
806 jmp error_code
808 ENTRY(general_protection)
809 pushl $do_general_protection
810 jmp error_code
812 ENTRY(alignment_check)
813 pushl $do_alignment_check
814 jmp error_code
816 # This handler is special, because it gets an extra value on its stack,
817 # which is the linear faulting address.
818 # fastcall register usage: %eax = pt_regs, %edx = error code,
819 # %ecx = fault address
820 ENTRY(page_fault)
821 pushl %ds
822 pushl %eax
823 xorl %eax, %eax
824 pushl %ebp
825 pushl %edi
826 pushl %esi
827 pushl %edx
828 decl %eax /* eax = -1 */
829 pushl %ecx
830 pushl %ebx
831 cld
832 pushl %es
833 # UNWIND_ESPFIX_STACK
834 popl %edi
835 movl ES(%esp), %ecx /* get the faulting address */
836 movl ORIG_EAX(%esp), %edx /* get the error code */
837 movl %eax, ORIG_EAX(%esp)
838 movl %edi, ES(%esp)
839 movl $(__KERNEL_DS),%eax
840 movl %eax, %ds
841 movl %eax, %es
842 movl %esp,%eax /* pt_regs pointer */
843 call do_page_fault
844 jmp ret_from_exception
846 #ifdef CONFIG_X86_MCE
847 ENTRY(machine_check)
848 pushl $0
849 pushl machine_check_vector
850 jmp error_code
851 #endif
853 ENTRY(fixup_4gb_segment)
854 pushl $do_fixup_4gb_segment
855 jmp error_code
857 #include "syscall_table.S"
859 syscall_table_size=(.-sys_call_table)