ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c @ 5657:cb6b221bef55

Recent header file cleanup (cset 2b6c1a8098078f7e53de7cf72227fddf01f0b2b6)
broke x86_64. The cleanup removed a padding field in trap_info_t, so the
trap table set the address for each trap handler to zero.

Signed-off-by: Chris Wright <chrisw@osdl.org>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 04 08:18:42 2005 +0000 (2005-07-04)
parents 25ceeee71ab6
children 9b1866006aea 8bd2e8933277
line source
1 /*
2 * linux/arch/x86-64/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
6 *
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 *
10 * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
11 */
13 /*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'entry.S'.
16 */
17 #include <linux/config.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/atomic.h>
36 #include <asm/debugreg.h>
37 #include <asm/desc.h>
38 #include <asm/i387.h>
39 #include <asm/kdebug.h>
40 #include <asm/processor.h>
42 #include <asm/smp.h>
43 #include <asm/pgalloc.h>
44 #include <asm/pda.h>
45 #include <asm/proto.h>
46 #include <asm/nmi.h>
48 #include <linux/irq.h>
51 extern struct gate_struct idt_table[256];
53 asmlinkage void divide_error(void);
54 asmlinkage void debug(void);
55 asmlinkage void nmi(void);
56 asmlinkage void int3(void);
57 asmlinkage void overflow(void);
58 asmlinkage void bounds(void);
59 asmlinkage void invalid_op(void);
60 asmlinkage void device_not_available(void);
61 asmlinkage void double_fault(void);
62 asmlinkage void coprocessor_segment_overrun(void);
63 asmlinkage void invalid_TSS(void);
64 asmlinkage void segment_not_present(void);
65 asmlinkage void stack_segment(void);
66 asmlinkage void general_protection(void);
67 asmlinkage void page_fault(void);
68 asmlinkage void coprocessor_error(void);
69 asmlinkage void simd_coprocessor_error(void);
70 asmlinkage void reserved(void);
71 asmlinkage void alignment_check(void);
72 asmlinkage void machine_check(void);
73 asmlinkage void spurious_interrupt_bug(void);
74 asmlinkage void call_debug(void);
76 struct notifier_block *die_chain;
77 static DEFINE_SPINLOCK(die_notifier_lock);
79 int register_die_notifier(struct notifier_block *nb)
80 {
81 int err = 0;
82 unsigned long flags;
83 spin_lock_irqsave(&die_notifier_lock, flags);
84 err = notifier_chain_register(&die_chain, nb);
85 spin_unlock_irqrestore(&die_notifier_lock, flags);
86 return err;
87 }
89 static inline void conditional_sti(struct pt_regs *regs)
90 {
91 if (regs->eflags & X86_EFLAGS_IF)
92 local_irq_enable();
93 }
95 static int kstack_depth_to_print = 10;
97 #ifdef CONFIG_KALLSYMS
98 #include <linux/kallsyms.h>
99 int printk_address(unsigned long address)
100 {
101 unsigned long offset = 0, symsize;
102 const char *symname;
103 char *modname;
104 char *delim = ":";
105 char namebuf[128];
107 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
108 if (!symname)
109 return printk("[<%016lx>]", address);
110 if (!modname)
111 modname = delim = "";
112 return printk("<%016lx>{%s%s%s%s%+ld}",
113 address,delim,modname,delim,symname,offset);
114 }
115 #else
116 int printk_address(unsigned long address)
117 {
118 return printk("[<%016lx>]", address);
119 }
120 #endif
122 unsigned long *in_exception_stack(int cpu, unsigned long stack)
123 {
124 int k;
125 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
126 struct tss_struct *tss = &per_cpu(init_tss, cpu);
127 unsigned long end = tss->ist[k] + EXCEPTION_STKSZ;
129 if (stack >= tss->ist[k] && stack <= end)
130 return (unsigned long *)end;
131 }
132 return NULL;
133 }
135 /*
136 * x86-64 can have upto three kernel stacks:
137 * process stack
138 * interrupt stack
139 * severe exception (double fault, nmi, stack fault) hardware stack
140 * Check and process them in order.
141 */
143 void show_trace(unsigned long *stack)
144 {
145 unsigned long addr;
146 unsigned long *irqstack, *irqstack_end, *estack_end;
147 const int cpu = safe_smp_processor_id();
148 int i;
150 printk("\nCall Trace:");
151 i = 0;
153 estack_end = in_exception_stack(cpu, (unsigned long)stack);
154 if (estack_end) {
155 while (stack < estack_end) {
156 addr = *stack++;
157 if (__kernel_text_address(addr)) {
158 i += printk_address(addr);
159 i += printk(" ");
160 if (i > 50) {
161 printk("\n");
162 i = 0;
163 }
164 }
165 }
166 i += printk(" <EOE> ");
167 i += 7;
168 stack = (unsigned long *) estack_end[-2];
169 }
171 irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
172 irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE + 64);
174 if (stack >= irqstack && stack < irqstack_end) {
175 printk("<IRQ> ");
176 while (stack < irqstack_end) {
177 addr = *stack++;
178 /*
179 * If the address is either in the text segment of the
180 * kernel, or in the region which contains vmalloc'ed
181 * memory, it *may* be the address of a calling
182 * routine; if so, print it so that someone tracing
183 * down the cause of the crash will be able to figure
184 * out the call path that was taken.
185 */
186 if (__kernel_text_address(addr)) {
187 i += printk_address(addr);
188 i += printk(" ");
189 if (i > 50) {
190 printk("\n ");
191 i = 0;
192 }
193 }
194 }
195 stack = (unsigned long *) (irqstack_end[-1]);
196 printk(" <EOI> ");
197 i += 7;
198 }
200 while (((long) stack & (THREAD_SIZE-1)) != 0) {
201 addr = *stack++;
202 if (__kernel_text_address(addr)) {
203 i += printk_address(addr);
204 i += printk(" ");
205 if (i > 50) {
206 printk("\n ");
207 i = 0;
208 }
209 }
210 }
211 printk("\n");
212 }
214 void show_stack(struct task_struct *tsk, unsigned long * rsp)
215 {
216 unsigned long *stack;
217 int i;
218 const int cpu = safe_smp_processor_id();
219 unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
220 unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
222 // debugging aid: "show_stack(NULL, NULL);" prints the
223 // back trace for this cpu.
225 if (rsp == NULL) {
226 if (tsk)
227 rsp = (unsigned long *)tsk->thread.rsp;
228 else
229 rsp = (unsigned long *)&rsp;
230 }
232 stack = rsp;
233 for(i=0; i < kstack_depth_to_print; i++) {
234 if (stack >= irqstack && stack <= irqstack_end) {
235 if (stack == irqstack_end) {
236 stack = (unsigned long *) (irqstack_end[-1]);
237 printk(" <EOI> ");
238 }
239 } else {
240 if (((long) stack & (THREAD_SIZE-1)) == 0)
241 break;
242 }
243 if (i && ((i % 4) == 0))
244 printk("\n ");
245 printk("%016lx ", *stack++);
246 }
247 show_trace((unsigned long *)rsp);
248 }
250 /*
251 * The architecture-independent dump_stack generator
252 */
253 void dump_stack(void)
254 {
255 unsigned long dummy;
256 show_trace(&dummy);
257 }
259 EXPORT_SYMBOL(dump_stack);
261 void show_registers(struct pt_regs *regs)
262 {
263 int i;
264 int in_kernel = (regs->cs & 3) == 0;
265 unsigned long rsp;
266 const int cpu = safe_smp_processor_id();
267 struct task_struct *cur = cpu_pda[cpu].pcurrent;
269 rsp = regs->rsp;
271 printk("CPU %d ", cpu);
272 __show_regs(regs);
273 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
274 cur->comm, cur->pid, cur->thread_info, cur);
276 /*
277 * When in-kernel, we also print out the stack and code at the
278 * time of the fault..
279 */
280 if (in_kernel) {
282 printk("Stack: ");
283 show_stack(NULL, (unsigned long*)rsp);
285 printk("\nCode: ");
286 if(regs->rip < PAGE_OFFSET)
287 goto bad;
289 for(i=0;i<20;i++)
290 {
291 unsigned char c;
292 if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
293 bad:
294 printk(" Bad RIP value.");
295 break;
296 }
297 printk("%02x ", c);
298 }
299 }
300 printk("\n");
301 }
303 void handle_BUG(struct pt_regs *regs)
304 {
305 struct bug_frame f;
306 char tmp;
308 if (regs->cs & 3)
309 return;
310 if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
311 sizeof(struct bug_frame)))
312 return;
313 if ((unsigned long)f.filename < __PAGE_OFFSET ||
314 f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
315 return;
316 if (__get_user(tmp, f.filename))
317 f.filename = "unmapped filename";
318 printk("----------- [cut here ] --------- [please bite here ] ---------\n");
319 printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
320 }
322 void out_of_line_bug(void)
323 {
324 BUG();
325 }
327 static DEFINE_SPINLOCK(die_lock);
328 static int die_owner = -1;
330 void oops_begin(void)
331 {
332 int cpu = safe_smp_processor_id();
333 /* racy, but better than risking deadlock. */
334 local_irq_disable();
335 if (!spin_trylock(&die_lock)) {
336 if (cpu == die_owner)
337 /* nested oops. should stop eventually */;
338 else
339 spin_lock(&die_lock);
340 }
341 die_owner = cpu;
342 console_verbose();
343 bust_spinlocks(1);
344 }
346 void oops_end(void)
347 {
348 die_owner = -1;
349 bust_spinlocks(0);
350 spin_unlock(&die_lock);
351 local_irq_enable(); /* make sure back scroll still works */
352 if (panic_on_oops)
353 panic("Oops");
354 }
356 void __die(const char * str, struct pt_regs * regs, long err)
357 {
358 static int die_counter;
359 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
360 #ifdef CONFIG_PREEMPT
361 printk("PREEMPT ");
362 #endif
363 #ifdef CONFIG_SMP
364 printk("SMP ");
365 #endif
366 #ifdef CONFIG_DEBUG_PAGEALLOC
367 printk("DEBUG_PAGEALLOC");
368 #endif
369 printk("\n");
370 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
371 show_registers(regs);
372 /* Executive summary in case the oops scrolled away */
373 printk(KERN_ALERT "RIP ");
374 printk_address(regs->rip);
375 printk(" RSP <%016lx>\n", regs->rsp);
376 }
378 void die(const char * str, struct pt_regs * regs, long err)
379 {
380 oops_begin();
381 handle_BUG(regs);
382 __die(str, regs, err);
383 oops_end();
384 do_exit(SIGSEGV);
385 }
386 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
387 {
388 if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
389 die(str, regs, err);
390 }
392 #ifdef CONFIG_X86_LOCAL_APIC
393 void die_nmi(char *str, struct pt_regs *regs)
394 {
395 oops_begin();
396 /*
397 * We are in trouble anyway, lets at least try
398 * to get a message out.
399 */
400 printk(str, safe_smp_processor_id());
401 show_registers(regs);
402 if (panic_on_timeout || panic_on_oops)
403 panic("nmi watchdog");
404 printk("console shuts up ...\n");
405 oops_end();
406 do_exit(SIGSEGV);
407 }
408 #endif
410 static void do_trap(int trapnr, int signr, char *str,
411 struct pt_regs * regs, long error_code, siginfo_t *info)
412 {
413 conditional_sti(regs);
415 #ifdef CONFIG_CHECKING
416 {
417 unsigned long gs;
418 struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
419 rdmsrl(MSR_GS_BASE, gs);
420 if (gs != (unsigned long)pda) {
421 wrmsrl(MSR_GS_BASE, pda);
422 printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
423 regs->rip);
424 }
425 }
426 #endif
428 if ((regs->cs & 3) != 0) {
429 struct task_struct *tsk = current;
431 if (exception_trace && unhandled_signal(tsk, signr))
432 printk(KERN_INFO
433 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
434 tsk->comm, tsk->pid, str,
435 regs->rip,regs->rsp,error_code);
437 tsk->thread.error_code = error_code;
438 tsk->thread.trap_no = trapnr;
439 if (info)
440 force_sig_info(signr, info, tsk);
441 else
442 force_sig(signr, tsk);
443 return;
444 }
447 /* kernel trap */
448 {
449 const struct exception_table_entry *fixup;
450 fixup = search_exception_tables(regs->rip);
451 if (fixup) {
452 regs->rip = fixup->fixup;
453 } else
454 die(str, regs, error_code);
455 return;
456 }
457 }
459 #define DO_ERROR(trapnr, signr, str, name) \
460 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
461 { \
462 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
463 == NOTIFY_STOP) \
464 return; \
465 do_trap(trapnr, signr, str, regs, error_code, NULL); \
466 }
468 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
469 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
470 { \
471 siginfo_t info; \
472 info.si_signo = signr; \
473 info.si_errno = 0; \
474 info.si_code = sicode; \
475 info.si_addr = (void __user *)siaddr; \
476 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
477 == NOTIFY_STOP) \
478 return; \
479 do_trap(trapnr, signr, str, regs, error_code, &info); \
480 }
482 DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
483 DO_ERROR( 4, SIGSEGV, "overflow", overflow)
484 DO_ERROR( 5, SIGSEGV, "bounds", bounds)
485 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
486 DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
487 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
488 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
489 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
490 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
491 DO_ERROR(18, SIGSEGV, "reserved", reserved)
493 #define DO_ERROR_STACK(trapnr, signr, str, name) \
494 asmlinkage void *do_##name(struct pt_regs * regs, long error_code) \
495 { \
496 struct pt_regs *pr = ((struct pt_regs *)(current->thread.rsp0))-1; \
497 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
498 == NOTIFY_STOP) \
499 return regs; \
500 if (regs->cs & 3) { \
501 memcpy(pr, regs, sizeof(struct pt_regs)); \
502 regs = pr; \
503 } \
504 do_trap(trapnr, signr, str, regs, error_code, NULL); \
505 return regs; \
506 }
508 DO_ERROR_STACK(12, SIGBUS, "stack segment", stack_segment)
509 DO_ERROR_STACK( 8, SIGSEGV, "double fault", double_fault)
511 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
512 {
513 conditional_sti(regs);
515 #ifdef CONFIG_CHECKING
516 {
517 unsigned long gs;
518 struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
519 rdmsrl(MSR_GS_BASE, gs);
520 if (gs != (unsigned long)pda) {
521 wrmsrl(MSR_GS_BASE, pda);
522 oops_in_progress++;
523 printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
524 oops_in_progress--;
525 }
526 }
527 #endif
529 if ((regs->cs & 3)!=0) {
530 struct task_struct *tsk = current;
532 if (exception_trace && unhandled_signal(tsk, SIGSEGV))
533 printk(KERN_INFO
534 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
535 tsk->comm, tsk->pid,
536 regs->rip,regs->rsp,error_code);
538 tsk->thread.error_code = error_code;
539 tsk->thread.trap_no = 13;
540 force_sig(SIGSEGV, tsk);
541 return;
542 }
544 /* kernel gp */
545 {
546 const struct exception_table_entry *fixup;
547 fixup = search_exception_tables(regs->rip);
548 if (fixup) {
549 regs->rip = fixup->fixup;
550 return;
551 }
552 if (notify_die(DIE_GPF, "general protection fault", regs,
553 error_code, 13, SIGSEGV) == NOTIFY_STOP)
554 return;
555 die("general protection fault", regs, error_code);
556 }
557 }
559 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
560 {
561 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
562 printk("You probably have a hardware problem with your RAM chips\n");
564 /* Clear and disable the memory parity error line. */
565 reason = (reason & 0xf) | 4;
566 outb(reason, 0x61);
567 }
569 static void io_check_error(unsigned char reason, struct pt_regs * regs)
570 {
571 printk("NMI: IOCK error (debug interrupt?)\n");
572 show_registers(regs);
574 /* Re-enable the IOCK line, wait for a few seconds */
575 reason = (reason & 0xf) | 8;
576 outb(reason, 0x61);
577 mdelay(2000);
578 reason &= ~8;
579 outb(reason, 0x61);
580 }
582 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
583 { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
584 printk("Dazed and confused, but trying to continue\n");
585 printk("Do you have a strange power saving mode enabled?\n");
586 }
588 asmlinkage void default_do_nmi(struct pt_regs *regs)
589 {
590 unsigned char reason = 0;
592 /* Only the BSP gets external NMIs from the system. */
593 if (!smp_processor_id())
594 reason = get_nmi_reason();
596 if (!(reason & 0xc0)) {
597 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
598 == NOTIFY_STOP)
599 return;
600 #ifdef CONFIG_X86_LOCAL_APIC
601 /*
602 * Ok, so this is none of the documented NMI sources,
603 * so it must be the NMI watchdog.
604 */
605 if (nmi_watchdog > 0) {
606 nmi_watchdog_tick(regs,reason);
607 return;
608 }
609 #endif
610 unknown_nmi_error(reason, regs);
611 return;
612 }
613 if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
614 return;
616 /* AK: following checks seem to be broken on modern chipsets. FIXME */
618 if (reason & 0x80)
619 mem_parity_error(reason, regs);
620 if (reason & 0x40)
621 io_check_error(reason, regs);
623 /*
624 * Reassert NMI in case it became active meanwhile
625 * as it's edge-triggered.
626 */
627 outb(0x8f, 0x70);
628 inb(0x71); /* dummy */
629 outb(0x0f, 0x70);
630 inb(0x71); /* dummy */
631 }
633 asmlinkage void do_int3(struct pt_regs * regs, long error_code)
634 {
635 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
636 return;
637 }
638 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
639 return;
640 }
642 /* runs on IST stack. */
643 asmlinkage void *do_debug(struct pt_regs * regs, unsigned long error_code)
644 {
645 struct pt_regs *pr;
646 unsigned long condition;
647 struct task_struct *tsk = current;
648 siginfo_t info;
650 pr = (struct pt_regs *)(current->thread.rsp0)-1;
651 if (regs->cs & 3) {
652 memcpy(pr, regs, sizeof(struct pt_regs));
653 regs = pr;
654 }
656 #ifdef CONFIG_CHECKING
657 {
658 /* RED-PEN interaction with debugger - could destroy gs */
659 unsigned long gs;
660 struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
661 rdmsrl(MSR_GS_BASE, gs);
662 if (gs != (unsigned long)pda) {
663 wrmsrl(MSR_GS_BASE, pda);
664 printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
665 }
666 }
667 #endif
669 asm("movq %%db6,%0" : "=r" (condition));
671 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
672 SIGTRAP) == NOTIFY_STOP) {
673 return regs;
674 }
675 conditional_sti(regs);
677 /* Mask out spurious debug traps due to lazy DR7 setting */
678 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
679 if (!tsk->thread.debugreg7) {
680 goto clear_dr7;
681 }
682 }
684 tsk->thread.debugreg6 = condition;
686 /* Mask out spurious TF errors due to lazy TF clearing */
687 if ((condition & DR_STEP) &&
688 (notify_die(DIE_DEBUGSTEP, "debugstep", regs, condition,
689 1, SIGTRAP) != NOTIFY_STOP)) {
690 /*
691 * The TF error should be masked out only if the current
692 * process is not traced and if the TRAP flag has been set
693 * previously by a tracing process (condition detected by
694 * the PT_DTRACE flag); remember that the i386 TRAP flag
695 * can be modified by the process itself in user mode,
696 * allowing programs to debug themselves without the ptrace()
697 * interface.
698 */
699 if ((regs->cs & 3) == 0)
700 goto clear_TF_reenable;
701 if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
702 goto clear_TF;
703 }
705 /* Ok, finally something we can handle */
706 tsk->thread.trap_no = 1;
707 tsk->thread.error_code = error_code;
708 info.si_signo = SIGTRAP;
709 info.si_errno = 0;
710 info.si_code = TRAP_BRKPT;
711 if ((regs->cs & 3) == 0)
712 goto clear_dr7;
714 info.si_addr = (void __user *)regs->rip;
715 force_sig_info(SIGTRAP, &info, tsk);
716 clear_dr7:
717 asm volatile("movq %0,%%db7"::"r"(0UL));
718 notify_die(DIE_DEBUG, "debug", regs, condition, 1, SIGTRAP);
719 return regs;
721 clear_TF_reenable:
722 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
724 clear_TF:
725 /* RED-PEN could cause spurious errors */
726 if (notify_die(DIE_DEBUG, "debug2", regs, condition, 1, SIGTRAP)
727 != NOTIFY_STOP)
728 regs->eflags &= ~TF_MASK;
729 return regs;
730 }
732 static int kernel_math_error(struct pt_regs *regs, char *str)
733 {
734 const struct exception_table_entry *fixup;
735 fixup = search_exception_tables(regs->rip);
736 if (fixup) {
737 regs->rip = fixup->fixup;
738 return 1;
739 }
740 notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
741 #if 0
742 /* This should be a die, but warn only for now */
743 die(str, regs, 0);
744 #else
745 printk(KERN_DEBUG "%s: %s at ", current->comm, str);
746 printk_address(regs->rip);
747 printk("\n");
748 #endif
749 return 0;
750 }
752 /*
753 * Note that we play around with the 'TS' bit in an attempt to get
754 * the correct behaviour even in the presence of the asynchronous
755 * IRQ13 behaviour
756 */
757 asmlinkage void do_coprocessor_error(struct pt_regs *regs)
758 {
759 void __user *rip = (void __user *)(regs->rip);
760 struct task_struct * task;
761 siginfo_t info;
762 unsigned short cwd, swd;
764 conditional_sti(regs);
765 if ((regs->cs & 3) == 0 &&
766 kernel_math_error(regs, "kernel x87 math error"))
767 return;
769 /*
770 * Save the info for the exception handler and clear the error.
771 */
772 task = current;
773 save_init_fpu(task);
774 task->thread.trap_no = 16;
775 task->thread.error_code = 0;
776 info.si_signo = SIGFPE;
777 info.si_errno = 0;
778 info.si_code = __SI_FAULT;
779 info.si_addr = rip;
780 /*
781 * (~cwd & swd) will mask out exceptions that are not set to unmasked
782 * status. 0x3f is the exception bits in these regs, 0x200 is the
783 * C1 reg you need in case of a stack fault, 0x040 is the stack
784 * fault bit. We should only be taking one exception at a time,
785 * so if this combination doesn't produce any single exception,
786 * then we have a bad program that isn't synchronizing its FPU usage
787 * and it will suffer the consequences since we won't be able to
788 * fully reproduce the context of the exception
789 */
790 cwd = get_fpu_cwd(task);
791 swd = get_fpu_swd(task);
792 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
793 case 0x000:
794 default:
795 break;
796 case 0x001: /* Invalid Op */
797 case 0x041: /* Stack Fault */
798 case 0x241: /* Stack Fault | Direction */
799 info.si_code = FPE_FLTINV;
800 break;
801 case 0x002: /* Denormalize */
802 case 0x010: /* Underflow */
803 info.si_code = FPE_FLTUND;
804 break;
805 case 0x004: /* Zero Divide */
806 info.si_code = FPE_FLTDIV;
807 break;
808 case 0x008: /* Overflow */
809 info.si_code = FPE_FLTOVF;
810 break;
811 case 0x020: /* Precision */
812 info.si_code = FPE_FLTRES;
813 break;
814 }
815 force_sig_info(SIGFPE, &info, task);
816 }
818 asmlinkage void bad_intr(void)
819 {
820 printk("bad interrupt");
821 }
823 asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
824 {
825 void __user *rip = (void __user *)(regs->rip);
826 struct task_struct * task;
827 siginfo_t info;
828 unsigned short mxcsr;
830 conditional_sti(regs);
831 if ((regs->cs & 3) == 0 &&
832 kernel_math_error(regs, "simd math error"))
833 return;
835 /*
836 * Save the info for the exception handler and clear the error.
837 */
838 task = current;
839 save_init_fpu(task);
840 task->thread.trap_no = 19;
841 task->thread.error_code = 0;
842 info.si_signo = SIGFPE;
843 info.si_errno = 0;
844 info.si_code = __SI_FAULT;
845 info.si_addr = rip;
846 /*
847 * The SIMD FPU exceptions are handled a little differently, as there
848 * is only a single status/control register. Thus, to determine which
849 * unmasked exception was caught we must mask the exception mask bits
850 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
851 */
852 mxcsr = get_fpu_mxcsr(task);
853 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
854 case 0x000:
855 default:
856 break;
857 case 0x001: /* Invalid Op */
858 info.si_code = FPE_FLTINV;
859 break;
860 case 0x002: /* Denormalize */
861 case 0x010: /* Underflow */
862 info.si_code = FPE_FLTUND;
863 break;
864 case 0x004: /* Zero Divide */
865 info.si_code = FPE_FLTDIV;
866 break;
867 case 0x008: /* Overflow */
868 info.si_code = FPE_FLTOVF;
869 break;
870 case 0x020: /* Precision */
871 info.si_code = FPE_FLTRES;
872 break;
873 }
874 force_sig_info(SIGFPE, &info, task);
875 }
877 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
878 {
879 }
881 #if 0
882 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
883 {
884 }
885 #endif
887 /*
888 * 'math_state_restore()' saves the current math information in the
889 * old math state array, and gets the new ones from the current task
890 *
891 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
892 * Don't touch unless you *really* know how it works.
893 */
894 asmlinkage void math_state_restore(void)
895 {
896 struct task_struct *me = current;
898 /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
900 if (!used_math())
901 init_fpu(me);
902 restore_fpu_checking(&me->thread.i387.fxsave);
903 me->thread_info->status |= TS_USEDFPU;
904 }
906 void do_call_debug(struct pt_regs *regs)
907 {
908 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
909 }
912 static trap_info_t trap_table[] = {
913 { 0, 0, (__KERNEL_CS|0x3), (unsigned long)divide_error },
914 { 1, 0, (__KERNEL_CS|0x3), (unsigned long)debug },
915 { 3, 3, (__KERNEL_CS|0x3), (unsigned long)int3 },
916 { 4, 3, (__KERNEL_CS|0x3), (unsigned long)overflow },
917 { 5, 3, (__KERNEL_CS|0x3), (unsigned long)bounds },
918 { 6, 0, (__KERNEL_CS|0x3), (unsigned long)invalid_op },
919 { 7, 0, (__KERNEL_CS|0x3), (unsigned long)device_not_available },
920 { 9, 0, (__KERNEL_CS|0x3), (unsigned long)coprocessor_segment_overrun},
921 { 10, 0, (__KERNEL_CS|0x3), (unsigned long)invalid_TSS },
922 { 11, 0, (__KERNEL_CS|0x3), (unsigned long)segment_not_present },
923 { 12, 0, (__KERNEL_CS|0x3), (unsigned long)stack_segment },
924 { 13, 0, (__KERNEL_CS|0x3), (unsigned long)general_protection },
925 { 14, 0, (__KERNEL_CS|0x3), (unsigned long)page_fault },
926 { 15, 0, (__KERNEL_CS|0x3), (unsigned long)spurious_interrupt_bug },
927 { 16, 0, (__KERNEL_CS|0x3), (unsigned long)coprocessor_error },
928 { 17, 0, (__KERNEL_CS|0x3), (unsigned long)alignment_check },
929 #ifdef CONFIG_X86_MCE
930 { 18, 0, (__KERNEL_CS|0x3), (unsigned long)machine_check },
931 #endif
932 { 19, 0, (__KERNEL_CS|0x3), (unsigned long)simd_coprocessor_error },
933 { SYSCALL_VECTOR, 3, (__KERNEL_CS|0x3), (unsigned long)system_call },
934 #ifdef CONFIG_IA32_EMULATION
935 { IA32_SYSCALL_VECTOR, 3, (__KERNEL_CS|0x3), (unsigned long)ia32_syscall},
936 #endif
937 { 0, 0, 0, 0 }
938 };
940 void __init trap_init(void)
941 {
942 int ret;
944 ret = HYPERVISOR_set_trap_table(trap_table);
946 if (ret)
947 printk("HYPERVISOR_set_trap_table faild: error %d\n",
948 ret);
950 #ifdef CONFIG_IA32_EMULATION
951 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
952 #endif
954 /*
955 * Should be a barrier for any external CPU state.
956 */
957 cpu_init();
958 }
961 /* Actual parsing is done early in setup.c. */
962 static int __init oops_dummy(char *s)
963 {
964 panic_on_oops = 1;
965 return -1;
966 }
967 __setup("oops=", oops_dummy);
969 static int __init kstack_setup(char *s)
970 {
971 kstack_depth_to_print = simple_strtoul(s,NULL,0);
972 return 0;
973 }
974 __setup("kstack=", kstack_setup);