direct-io.hg

view linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c @ 8678:f1bfe32828a1

Checkin missing hunks from previous commit

Signed-off-by: Ian Campbell <Ian.Campbell@XenSource.com>
author Ian.Campbell@xensource.com
date Fri Jan 27 14:24:50 2006 +0000 (2006-01-27)
parents 8bcfcfc13e21
children a0a59beb6596
line source
1 /*
2 * linux/arch/i386/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
10 /*
11 * 'Traps.c' handles hardware traps and faults after we have saved some
12 * state in 'asm.s'.
13 */
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
26 #include <linux/kallsyms.h>
27 #include <linux/ptrace.h>
28 #include <linux/utsname.h>
29 #include <linux/kprobes.h>
31 #ifdef CONFIG_EISA
32 #include <linux/ioport.h>
33 #include <linux/eisa.h>
34 #endif
36 #ifdef CONFIG_MCA
37 #include <linux/mca.h>
38 #endif
40 #include <asm/processor.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/atomic.h>
45 #include <asm/debugreg.h>
46 #include <asm/desc.h>
47 #include <asm/i387.h>
48 #include <asm/nmi.h>
50 #include <asm/smp.h>
51 #include <asm/arch_hooks.h>
52 #include <asm/kdebug.h>
54 #include <linux/irq.h>
55 #include <linux/module.h>
57 #include "mach_traps.h"
59 asmlinkage int system_call(void);
61 /* Do we ignore FPU interrupts ? */
62 char ignore_fpu_irq = 0;
64 /*
65 * The IDT has to be page-aligned to simplify the Pentium
66 * F0 0F bug workaround.. We have a special link segment
67 * for this.
68 */
69 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
71 asmlinkage void divide_error(void);
72 asmlinkage void debug(void);
73 asmlinkage void nmi(void);
74 asmlinkage void int3(void);
75 asmlinkage void overflow(void);
76 asmlinkage void bounds(void);
77 asmlinkage void invalid_op(void);
78 asmlinkage void device_not_available(void);
79 asmlinkage void coprocessor_segment_overrun(void);
80 asmlinkage void invalid_TSS(void);
81 asmlinkage void segment_not_present(void);
82 asmlinkage void stack_segment(void);
83 asmlinkage void general_protection(void);
84 asmlinkage void page_fault(void);
85 asmlinkage void coprocessor_error(void);
86 asmlinkage void simd_coprocessor_error(void);
87 asmlinkage void alignment_check(void);
88 asmlinkage void fixup_4gb_segment(void);
89 asmlinkage void machine_check(void);
91 static int kstack_depth_to_print = 24;
92 struct notifier_block *i386die_chain;
93 static DEFINE_SPINLOCK(die_notifier_lock);
95 int register_die_notifier(struct notifier_block *nb)
96 {
97 int err = 0;
98 unsigned long flags;
99 spin_lock_irqsave(&die_notifier_lock, flags);
100 err = notifier_chain_register(&i386die_chain, nb);
101 spin_unlock_irqrestore(&die_notifier_lock, flags);
102 return err;
103 }
105 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
106 {
107 return p > (void *)tinfo &&
108 p < (void *)tinfo + THREAD_SIZE - 3;
109 }
111 static inline unsigned long print_context_stack(struct thread_info *tinfo,
112 unsigned long *stack, unsigned long ebp)
113 {
114 unsigned long addr;
116 #ifdef CONFIG_FRAME_POINTER
117 while (valid_stack_ptr(tinfo, (void *)ebp)) {
118 addr = *(unsigned long *)(ebp + 4);
119 printk(" [<%08lx>] ", addr);
120 print_symbol("%s", addr);
121 printk("\n");
122 ebp = *(unsigned long *)ebp;
123 }
124 #else
125 while (valid_stack_ptr(tinfo, stack)) {
126 addr = *stack++;
127 if (__kernel_text_address(addr)) {
128 printk(" [<%08lx>]", addr);
129 print_symbol(" %s", addr);
130 printk("\n");
131 }
132 }
133 #endif
134 return ebp;
135 }
137 void show_trace(struct task_struct *task, unsigned long * stack)
138 {
139 unsigned long ebp;
141 if (!task)
142 task = current;
144 if (task == current) {
145 /* Grab ebp right from our regs */
146 asm ("movl %%ebp, %0" : "=r" (ebp) : );
147 } else {
148 /* ebp is the last reg pushed by switch_to */
149 ebp = *(unsigned long *) task->thread.esp;
150 }
152 while (1) {
153 struct thread_info *context;
154 context = (struct thread_info *)
155 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
156 ebp = print_context_stack(context, stack, ebp);
157 stack = (unsigned long*)context->previous_esp;
158 if (!stack)
159 break;
160 printk(" =======================\n");
161 }
162 }
164 void show_stack(struct task_struct *task, unsigned long *esp)
165 {
166 unsigned long *stack;
167 int i;
169 if (esp == NULL) {
170 if (task)
171 esp = (unsigned long*)task->thread.esp;
172 else
173 esp = (unsigned long *)&esp;
174 }
176 stack = esp;
177 for(i = 0; i < kstack_depth_to_print; i++) {
178 if (kstack_end(stack))
179 break;
180 if (i && ((i % 8) == 0))
181 printk("\n ");
182 printk("%08lx ", *stack++);
183 }
184 printk("\nCall Trace:\n");
185 show_trace(task, esp);
186 }
188 /*
189 * The architecture-independent dump_stack generator
190 */
191 void dump_stack(void)
192 {
193 unsigned long stack;
195 show_trace(current, &stack);
196 }
198 EXPORT_SYMBOL(dump_stack);
200 void show_registers(struct pt_regs *regs)
201 {
202 int i;
203 int in_kernel = 1;
204 unsigned long esp;
205 unsigned short ss;
207 esp = (unsigned long) (&regs->esp);
208 ss = __KERNEL_DS;
209 if (regs->xcs & 2) {
210 in_kernel = 0;
211 esp = regs->esp;
212 ss = regs->xss & 0xffff;
213 }
214 print_modules();
215 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
216 " (%s) \n",
217 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
218 print_tainted(), regs->eflags, system_utsname.release);
219 print_symbol("EIP is at %s\n", regs->eip);
220 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
221 regs->eax, regs->ebx, regs->ecx, regs->edx);
222 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
223 regs->esi, regs->edi, regs->ebp, esp);
224 printk("ds: %04x es: %04x ss: %04x\n",
225 regs->xds & 0xffff, regs->xes & 0xffff, ss);
226 printk("Process %s (pid: %d, threadinfo=%p task=%p)",
227 current->comm, current->pid, current_thread_info(), current);
228 /*
229 * When in-kernel, we also print out the stack and code at the
230 * time of the fault..
231 */
232 if (in_kernel) {
233 u8 *eip;
235 printk("\nStack: ");
236 show_stack(NULL, (unsigned long*)esp);
238 printk("Code: ");
240 eip = (u8 *)regs->eip - 43;
241 for (i = 0; i < 64; i++, eip++) {
242 unsigned char c;
244 if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
245 printk(" Bad EIP value.");
246 break;
247 }
248 if (eip == (u8 *)regs->eip)
249 printk("<%02x> ", c);
250 else
251 printk("%02x ", c);
252 }
253 }
254 printk("\n");
255 }
257 static void handle_BUG(struct pt_regs *regs)
258 {
259 unsigned short ud2;
260 unsigned short line;
261 char *file;
262 char c;
263 unsigned long eip;
265 if (regs->xcs & 2)
266 goto no_bug; /* Not in kernel */
268 eip = regs->eip;
270 if (eip < PAGE_OFFSET)
271 goto no_bug;
272 if (__get_user(ud2, (unsigned short *)eip))
273 goto no_bug;
274 if (ud2 != 0x0b0f)
275 goto no_bug;
276 if (__get_user(line, (unsigned short *)(eip + 2)))
277 goto bug;
278 if (__get_user(file, (char **)(eip + 4)) ||
279 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
280 file = "<bad filename>";
282 printk("------------[ cut here ]------------\n");
283 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
285 no_bug:
286 return;
288 /* Here we know it was a BUG but file-n-line is unavailable */
289 bug:
290 printk("Kernel BUG\n");
291 }
293 void die(const char * str, struct pt_regs * regs, long err)
294 {
295 static struct {
296 spinlock_t lock;
297 u32 lock_owner;
298 int lock_owner_depth;
299 } die = {
300 .lock = SPIN_LOCK_UNLOCKED,
301 .lock_owner = -1,
302 .lock_owner_depth = 0
303 };
304 static int die_counter;
306 if (die.lock_owner != _smp_processor_id()) {
307 console_verbose();
308 spin_lock_irq(&die.lock);
309 die.lock_owner = smp_processor_id();
310 die.lock_owner_depth = 0;
311 bust_spinlocks(1);
312 }
314 if (++die.lock_owner_depth < 3) {
315 int nl = 0;
316 handle_BUG(regs);
317 printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
318 #ifdef CONFIG_PREEMPT
319 printk("PREEMPT ");
320 nl = 1;
321 #endif
322 #ifdef CONFIG_SMP
323 printk("SMP ");
324 nl = 1;
325 #endif
326 #ifdef CONFIG_DEBUG_PAGEALLOC
327 printk("DEBUG_PAGEALLOC");
328 nl = 1;
329 #endif
330 if (nl)
331 printk("\n");
332 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
333 show_registers(regs);
334 } else
335 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
337 bust_spinlocks(0);
338 die.lock_owner = -1;
339 spin_unlock_irq(&die.lock);
340 if (in_interrupt())
341 panic("Fatal exception in interrupt");
343 if (panic_on_oops) {
344 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
345 ssleep(5);
346 panic("Fatal exception");
347 }
348 do_exit(SIGSEGV);
349 }
351 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
352 {
353 if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
354 die(str, regs, err);
355 }
357 static void do_trap(int trapnr, int signr, char *str, int vm86,
358 struct pt_regs * regs, long error_code, siginfo_t *info)
359 {
360 if (regs->eflags & VM_MASK) {
361 if (vm86)
362 goto vm86_trap;
363 goto trap_signal;
364 }
366 if (!(regs->xcs & 2))
367 goto kernel_trap;
369 trap_signal: {
370 struct task_struct *tsk = current;
371 tsk->thread.error_code = error_code;
372 tsk->thread.trap_no = trapnr;
373 if (info)
374 force_sig_info(signr, info, tsk);
375 else
376 force_sig(signr, tsk);
377 return;
378 }
380 kernel_trap: {
381 if (!fixup_exception(regs))
382 die(str, regs, error_code);
383 return;
384 }
386 vm86_trap: {
387 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
388 if (ret) goto trap_signal;
389 return;
390 }
391 }
393 #define DO_ERROR(trapnr, signr, str, name) \
394 fastcall void do_##name(struct pt_regs * regs, long error_code) \
395 { \
396 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
397 == NOTIFY_STOP) \
398 return; \
399 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
400 }
402 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
403 fastcall void do_##name(struct pt_regs * regs, long error_code) \
404 { \
405 siginfo_t info; \
406 info.si_signo = signr; \
407 info.si_errno = 0; \
408 info.si_code = sicode; \
409 info.si_addr = (void __user *)siaddr; \
410 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
411 == NOTIFY_STOP) \
412 return; \
413 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
414 }
416 #define DO_VM86_ERROR(trapnr, signr, str, name) \
417 fastcall void do_##name(struct pt_regs * regs, long error_code) \
418 { \
419 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
420 == NOTIFY_STOP) \
421 return; \
422 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
423 }
425 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
426 fastcall void do_##name(struct pt_regs * regs, long error_code) \
427 { \
428 siginfo_t info; \
429 info.si_signo = signr; \
430 info.si_errno = 0; \
431 info.si_code = sicode; \
432 info.si_addr = (void __user *)siaddr; \
433 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
434 == NOTIFY_STOP) \
435 return; \
436 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
437 }
439 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
440 #ifndef CONFIG_KPROBES
441 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
442 #endif
443 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
444 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
445 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
446 DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
447 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
448 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
449 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
450 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
451 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
452 #ifdef CONFIG_X86_MCE
453 DO_ERROR(18, SIGBUS, "machine check", machine_check)
454 #endif
455 DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
457 fastcall void do_general_protection(struct pt_regs * regs, long error_code)
458 {
459 /*
460 * If we trapped on an LDT access then ensure that the default_ldt is
461 * loaded, if nothing else. We load default_ldt lazily because LDT
462 * switching costs time and many applications don't need it.
463 */
464 if (unlikely((error_code & 6) == 4)) {
465 unsigned long ldt;
466 __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
467 if (ldt == 0) {
468 xen_set_ldt((unsigned long)&default_ldt[0], 5);
469 return;
470 }
471 }
473 if (regs->eflags & VM_MASK)
474 goto gp_in_vm86;
476 if (!(regs->xcs & 2))
477 goto gp_in_kernel;
479 current->thread.error_code = error_code;
480 current->thread.trap_no = 13;
481 force_sig(SIGSEGV, current);
482 return;
484 gp_in_vm86:
485 local_irq_enable();
486 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
487 return;
489 gp_in_kernel:
490 if (!fixup_exception(regs)) {
491 if (notify_die(DIE_GPF, "general protection fault", regs,
492 error_code, 13, SIGSEGV) == NOTIFY_STOP)
493 return;
494 die("general protection fault", regs, error_code);
495 }
496 }
498 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
499 {
500 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
501 printk("You probably have a hardware problem with your RAM chips\n");
503 /* Clear and disable the memory parity error line. */
504 clear_mem_error(reason);
505 }
507 static void io_check_error(unsigned char reason, struct pt_regs * regs)
508 {
509 printk("NMI: IOCK error (debug interrupt?)\n");
510 show_registers(regs);
512 /* Re-enable the IOCK line, wait for a few seconds */
513 clear_io_check_error(reason);
514 }
516 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
517 {
518 #ifdef CONFIG_MCA
519 /* Might actually be able to figure out what the guilty party
520 * is. */
521 if( MCA_bus ) {
522 mca_handle_nmi();
523 return;
524 }
525 #endif
526 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
527 reason, smp_processor_id());
528 printk("Dazed and confused, but trying to continue\n");
529 printk("Do you have a strange power saving mode enabled?\n");
530 }
532 static DEFINE_SPINLOCK(nmi_print_lock);
534 void die_nmi (struct pt_regs *regs, const char *msg)
535 {
536 spin_lock(&nmi_print_lock);
537 /*
538 * We are in trouble anyway, lets at least try
539 * to get a message out.
540 */
541 bust_spinlocks(1);
542 printk(msg);
543 printk(" on CPU%d, eip %08lx, registers:\n",
544 smp_processor_id(), regs->eip);
545 show_registers(regs);
546 printk("console shuts up ...\n");
547 console_silent();
548 spin_unlock(&nmi_print_lock);
549 bust_spinlocks(0);
550 do_exit(SIGSEGV);
551 }
553 static void default_do_nmi(struct pt_regs * regs)
554 {
555 unsigned char reason = 0;
557 /* Only the BSP gets external NMIs from the system. */
558 if (!smp_processor_id())
559 reason = get_nmi_reason();
561 if (!(reason & 0xc0)) {
562 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
563 == NOTIFY_STOP)
564 return;
565 #ifdef CONFIG_X86_LOCAL_APIC
566 /*
567 * Ok, so this is none of the documented NMI sources,
568 * so it must be the NMI watchdog.
569 */
570 if (nmi_watchdog) {
571 nmi_watchdog_tick(regs);
572 return;
573 }
574 #endif
575 unknown_nmi_error(reason, regs);
576 return;
577 }
578 if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
579 return;
580 if (reason & 0x80)
581 mem_parity_error(reason, regs);
582 if (reason & 0x40)
583 io_check_error(reason, regs);
584 /*
585 * Reassert NMI in case it became active meanwhile
586 * as it's edge-triggered.
587 */
588 reassert_nmi();
589 }
591 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
592 {
593 return 0;
594 }
596 static nmi_callback_t nmi_callback = dummy_nmi_callback;
598 fastcall void do_nmi(struct pt_regs * regs, long error_code)
599 {
600 int cpu;
602 nmi_enter();
604 cpu = smp_processor_id();
606 #ifdef CONFIG_HOTPLUG_CPU
607 if (!cpu_online(cpu)) {
608 nmi_exit();
609 return;
610 }
611 #endif
613 ++nmi_count(cpu);
615 if (!nmi_callback(regs, cpu))
616 default_do_nmi(regs);
618 nmi_exit();
619 }
621 void set_nmi_callback(nmi_callback_t callback)
622 {
623 nmi_callback = callback;
624 }
626 void unset_nmi_callback(void)
627 {
628 nmi_callback = dummy_nmi_callback;
629 }
631 #ifdef CONFIG_KPROBES
632 fastcall void do_int3(struct pt_regs *regs, long error_code)
633 {
634 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
635 == NOTIFY_STOP)
636 return;
637 /* This is an interrupt gate, because kprobes wants interrupts
638 disabled. Normal trap handlers don't. */
639 restore_interrupts(regs);
640 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
641 }
642 #endif
644 /*
645 * Our handling of the processor debug registers is non-trivial.
646 * We do not clear them on entry and exit from the kernel. Therefore
647 * it is possible to get a watchpoint trap here from inside the kernel.
648 * However, the code in ./ptrace.c has ensured that the user can
649 * only set watchpoints on userspace addresses. Therefore the in-kernel
650 * watchpoint trap can only occur in code which is reading/writing
651 * from user space. Such code must not hold kernel locks (since it
652 * can equally take a page fault), therefore it is safe to call
653 * force_sig_info even though that claims and releases locks.
654 *
655 * Code in ./signal.c ensures that the debug control register
656 * is restored before we deliver any signal, and therefore that
657 * user code runs with the correct debug control register even though
658 * we clear it here.
659 *
660 * Being careful here means that we don't have to be as careful in a
661 * lot of more complicated places (task switching can be a bit lazy
662 * about restoring all the debug state, and ptrace doesn't have to
663 * find every occurrence of the TF bit that could be saved away even
664 * by user code)
665 */
666 fastcall void do_debug(struct pt_regs * regs, long error_code)
667 {
668 unsigned int condition;
669 struct task_struct *tsk = current;
671 condition = HYPERVISOR_get_debugreg(6);
673 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
674 SIGTRAP) == NOTIFY_STOP)
675 return;
676 /* It's safe to allow irq's after DR6 has been saved */
677 if (regs->eflags & X86_EFLAGS_IF)
678 local_irq_enable();
680 /* Mask out spurious debug traps due to lazy DR7 setting */
681 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
682 if (!tsk->thread.debugreg[7])
683 goto clear_dr7;
684 }
686 if (regs->eflags & VM_MASK)
687 goto debug_vm86;
689 /* Save debug status register where ptrace can see it */
690 tsk->thread.debugreg[6] = condition;
692 /*
693 * Single-stepping through TF: make sure we ignore any events in
694 * kernel space (but re-enable TF when returning to user mode).
695 */
696 if (condition & DR_STEP) {
697 /*
698 * We already checked v86 mode above, so we can
699 * check for kernel mode by just checking the CPL
700 * of CS.
701 */
702 if ((regs->xcs & 2) == 0)
703 goto clear_TF_reenable;
704 }
706 /* Ok, finally something we can handle */
707 send_sigtrap(tsk, regs, error_code);
709 /* Disable additional traps. They'll be re-enabled when
710 * the signal is delivered.
711 */
712 clear_dr7:
713 HYPERVISOR_set_debugreg(7, 0);
714 return;
716 debug_vm86:
717 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
718 return;
720 clear_TF_reenable:
721 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
722 regs->eflags &= ~TF_MASK;
723 return;
724 }
726 /*
727 * Note that we play around with the 'TS' bit in an attempt to get
728 * the correct behaviour even in the presence of the asynchronous
729 * IRQ13 behaviour
730 */
731 void math_error(void __user *eip)
732 {
733 struct task_struct * task;
734 siginfo_t info;
735 unsigned short cwd, swd;
737 /*
738 * Save the info for the exception handler and clear the error.
739 */
740 task = current;
741 save_init_fpu(task);
742 task->thread.trap_no = 16;
743 task->thread.error_code = 0;
744 info.si_signo = SIGFPE;
745 info.si_errno = 0;
746 info.si_code = __SI_FAULT;
747 info.si_addr = eip;
748 /*
749 * (~cwd & swd) will mask out exceptions that are not set to unmasked
750 * status. 0x3f is the exception bits in these regs, 0x200 is the
751 * C1 reg you need in case of a stack fault, 0x040 is the stack
752 * fault bit. We should only be taking one exception at a time,
753 * so if this combination doesn't produce any single exception,
754 * then we have a bad program that isn't syncronizing its FPU usage
755 * and it will suffer the consequences since we won't be able to
756 * fully reproduce the context of the exception
757 */
758 cwd = get_fpu_cwd(task);
759 swd = get_fpu_swd(task);
760 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
761 case 0x000:
762 default:
763 break;
764 case 0x001: /* Invalid Op */
765 case 0x041: /* Stack Fault */
766 case 0x241: /* Stack Fault | Direction */
767 info.si_code = FPE_FLTINV;
768 /* Should we clear the SF or let user space do it ???? */
769 break;
770 case 0x002: /* Denormalize */
771 case 0x010: /* Underflow */
772 info.si_code = FPE_FLTUND;
773 break;
774 case 0x004: /* Zero Divide */
775 info.si_code = FPE_FLTDIV;
776 break;
777 case 0x008: /* Overflow */
778 info.si_code = FPE_FLTOVF;
779 break;
780 case 0x020: /* Precision */
781 info.si_code = FPE_FLTRES;
782 break;
783 }
784 force_sig_info(SIGFPE, &info, task);
785 }
787 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
788 {
789 ignore_fpu_irq = 1;
790 math_error((void __user *)regs->eip);
791 }
793 static void simd_math_error(void __user *eip)
794 {
795 struct task_struct * task;
796 siginfo_t info;
797 unsigned short mxcsr;
799 /*
800 * Save the info for the exception handler and clear the error.
801 */
802 task = current;
803 save_init_fpu(task);
804 task->thread.trap_no = 19;
805 task->thread.error_code = 0;
806 info.si_signo = SIGFPE;
807 info.si_errno = 0;
808 info.si_code = __SI_FAULT;
809 info.si_addr = eip;
810 /*
811 * The SIMD FPU exceptions are handled a little differently, as there
812 * is only a single status/control register. Thus, to determine which
813 * unmasked exception was caught we must mask the exception mask bits
814 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
815 */
816 mxcsr = get_fpu_mxcsr(task);
817 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
818 case 0x000:
819 default:
820 break;
821 case 0x001: /* Invalid Op */
822 info.si_code = FPE_FLTINV;
823 break;
824 case 0x002: /* Denormalize */
825 case 0x010: /* Underflow */
826 info.si_code = FPE_FLTUND;
827 break;
828 case 0x004: /* Zero Divide */
829 info.si_code = FPE_FLTDIV;
830 break;
831 case 0x008: /* Overflow */
832 info.si_code = FPE_FLTOVF;
833 break;
834 case 0x020: /* Precision */
835 info.si_code = FPE_FLTRES;
836 break;
837 }
838 force_sig_info(SIGFPE, &info, task);
839 }
841 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
842 long error_code)
843 {
844 if (cpu_has_xmm) {
845 /* Handle SIMD FPU exceptions on PIII+ processors. */
846 ignore_fpu_irq = 1;
847 simd_math_error((void __user *)regs->eip);
848 } else {
849 /*
850 * Handle strange cache flush from user space exception
851 * in all other cases. This is undocumented behaviour.
852 */
853 if (regs->eflags & VM_MASK) {
854 handle_vm86_fault((struct kernel_vm86_regs *)regs,
855 error_code);
856 return;
857 }
858 die_if_kernel("cache flush denied", regs, error_code);
859 current->thread.trap_no = 19;
860 current->thread.error_code = error_code;
861 force_sig(SIGSEGV, current);
862 }
863 }
865 #ifndef CONFIG_XEN
866 fastcall void setup_x86_bogus_stack(unsigned char * stk)
867 {
868 unsigned long *switch16_ptr, *switch32_ptr;
869 struct pt_regs *regs;
870 unsigned long stack_top, stack_bot;
871 unsigned short iret_frame16_off;
872 int cpu = smp_processor_id();
873 /* reserve the space on 32bit stack for the magic switch16 pointer */
874 memmove(stk, stk + 8, sizeof(struct pt_regs));
875 switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
876 regs = (struct pt_regs *)stk;
877 /* now the switch32 on 16bit stack */
878 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
879 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
880 switch32_ptr = (unsigned long *)(stack_top - 8);
881 iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
882 /* copy iret frame on 16bit stack */
883 memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
884 /* fill in the switch pointers */
885 switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
886 switch16_ptr[1] = __ESPFIX_SS;
887 switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
888 8 - CPU_16BIT_STACK_SIZE;
889 switch32_ptr[1] = __KERNEL_DS;
890 }
892 fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
893 {
894 unsigned long *switch32_ptr;
895 unsigned char *stack16, *stack32;
896 unsigned long stack_top, stack_bot;
897 int len;
898 int cpu = smp_processor_id();
899 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
900 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
901 switch32_ptr = (unsigned long *)(stack_top - 8);
902 /* copy the data from 16bit stack to 32bit stack */
903 len = CPU_16BIT_STACK_SIZE - 8 - sp;
904 stack16 = (unsigned char *)(stack_bot + sp);
905 stack32 = (unsigned char *)
906 (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
907 memcpy(stack32, stack16, len);
908 return stack32;
909 }
910 #endif
912 /*
913 * 'math_state_restore()' saves the current math information in the
914 * old math state array, and gets the new ones from the current task
915 *
916 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
917 * Don't touch unless you *really* know how it works.
918 *
919 * Must be called with kernel preemption disabled (in this case,
920 * local interrupts are disabled at the call-site in entry.S).
921 */
922 asmlinkage void math_state_restore(struct pt_regs regs)
923 {
924 struct thread_info *thread = current_thread_info();
925 struct task_struct *tsk = thread->task;
927 /* NB. 'clts' is done for us by Xen during virtual trap. */
928 if (!tsk_used_math(tsk))
929 init_fpu(tsk);
930 restore_fpu(tsk);
931 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
932 }
934 #ifndef CONFIG_MATH_EMULATION
936 asmlinkage void math_emulate(long arg)
937 {
938 printk("math-emulation not enabled and no coprocessor found.\n");
939 printk("killing %s.\n",current->comm);
940 force_sig(SIGFPE,current);
941 schedule();
942 }
944 #endif /* CONFIG_MATH_EMULATION */
946 #ifdef CONFIG_X86_F00F_BUG
947 void __init trap_init_f00f_bug(void)
948 {
949 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
951 /*
952 * Update the IDT descriptor and reload the IDT so that
953 * it uses the read-only mapped virtual address.
954 */
955 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
956 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
957 }
958 #endif
961 /*
962 * NB. All these are "trap gates" (i.e. events_mask isn't set) except
963 * for those that specify <dpl>|4 in the second field.
964 */
965 static trap_info_t trap_table[] = {
966 { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
967 { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
968 { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
969 { 4, 3, __KERNEL_CS, (unsigned long)overflow },
970 { 5, 3, __KERNEL_CS, (unsigned long)bounds },
971 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
972 { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
973 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
974 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
975 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
976 { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
977 { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
978 { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
979 { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
980 { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
981 { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
982 #ifdef CONFIG_X86_MCE
983 { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
984 #endif
985 { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
986 { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
987 { 0, 0, 0, 0 }
988 };
990 void __init trap_init(void)
991 {
992 HYPERVISOR_set_trap_table(trap_table);
994 /*
995 * default LDT is a single-entry callgate to lcall7 for iBCS
996 * and a callgate to lcall27 for Solaris/x86 binaries
997 */
998 make_lowmem_mmu_page_readonly(&default_ldt[0]);
1000 /*
1001 * Should be a barrier for any external CPU state.
1002 */
1003 cpu_init();
1006 void smp_trap_init(trap_info_t *trap_ctxt)
1008 trap_info_t *t = trap_table;
1009 int i;
1011 for (i = 0; i < 256; i++) {
1012 trap_ctxt[i].vector = i;
1013 trap_ctxt[i].cs = FLAT_KERNEL_CS;
1016 for (t = trap_table; t->address; t++) {
1017 trap_ctxt[t->vector].flags = t->flags;
1018 trap_ctxt[t->vector].cs = t->cs;
1019 trap_ctxt[t->vector].address = t->address;
1023 static int __init kstack_setup(char *s)
1025 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1026 return 0;
1028 __setup("kstack=", kstack_setup);