ia64/linux-2.6.18-xen.hg

view kernel/softlockup.c @ 912:dd42cdb0ab89

[IA64] Build blktap2 driver by default in x86 builds.

add CONFIG_XEN_BLKDEV_TAP2=y to buildconfigs/linux-defconfig_xen_ia64.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 12:09:16 2009 +0900 (2009-06-29)
parents 61ed8662b69c
children
line source
1 /*
2 * Detect Soft Lockups
3 *
4 * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc.
5 *
6 * this code detects soft lockups: incidents in where on a CPU
7 * the kernel does not reschedule for 10 seconds or more.
8 */
9 #include <linux/mm.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/kthread.h>
14 #include <linux/notifier.h>
15 #include <linux/module.h>
17 static DEFINE_SPINLOCK(print_lock);
19 static DEFINE_PER_CPU(unsigned long, touch_timestamp);
20 static DEFINE_PER_CPU(unsigned long, print_timestamp);
21 static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
23 static int did_panic = 0;
25 static int
26 softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
27 {
28 did_panic = 1;
30 return NOTIFY_DONE;
31 }
33 static struct notifier_block panic_block = {
34 .notifier_call = softlock_panic,
35 };
37 void touch_softlockup_watchdog(void)
38 {
39 __raw_get_cpu_var(touch_timestamp) = jiffies;
40 }
41 EXPORT_SYMBOL(touch_softlockup_watchdog);
43 unsigned long softlockup_get_next_event(void)
44 {
45 int this_cpu = smp_processor_id();
46 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
48 if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
49 did_panic ||
50 !per_cpu(watchdog_task, this_cpu))
51 return MAX_JIFFY_OFFSET;
53 return max_t(long, 0, touch_timestamp + HZ - jiffies);
54 }
56 /*
57 * This callback runs from the timer interrupt, and checks
58 * whether the watchdog thread has hung or not:
59 */
60 void softlockup_tick(void)
61 {
62 int this_cpu = smp_processor_id();
63 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
65 /* prevent double reports: */
66 if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
67 did_panic ||
68 !per_cpu(watchdog_task, this_cpu))
69 return;
71 /* do not print during early bootup: */
72 if (unlikely(system_state != SYSTEM_RUNNING)) {
73 touch_softlockup_watchdog();
74 return;
75 }
77 /* Wake up the high-prio watchdog task every second: */
78 if (time_after(jiffies, touch_timestamp + HZ))
79 wake_up_process(per_cpu(watchdog_task, this_cpu));
81 /* Warn about unreasonable 10+ seconds delays: */
82 if (time_after(jiffies, touch_timestamp + 10*HZ)) {
83 per_cpu(print_timestamp, this_cpu) = touch_timestamp;
85 spin_lock(&print_lock);
86 printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
87 this_cpu);
88 dump_stack();
89 spin_unlock(&print_lock);
90 }
91 }
93 /*
94 * The watchdog thread - runs every second and touches the timestamp.
95 */
96 static int watchdog(void * __bind_cpu)
97 {
98 struct sched_param param = { .sched_priority = 99 };
100 sched_setscheduler(current, SCHED_FIFO, &param);
101 current->flags |= PF_NOFREEZE;
103 /*
104 * Run briefly once per second to reset the softlockup timestamp.
105 * If this gets delayed for more than 10 seconds then the
106 * debug-printout triggers in softlockup_tick().
107 */
108 while (!kthread_should_stop()) {
109 set_current_state(TASK_INTERRUPTIBLE);
110 touch_softlockup_watchdog();
111 schedule();
112 }
114 return 0;
115 }
117 /*
118 * Create/destroy watchdog threads as CPUs come and go:
119 */
120 static int __cpuinit
121 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
122 {
123 int hotcpu = (unsigned long)hcpu;
124 struct task_struct *p;
126 switch (action) {
127 case CPU_UP_PREPARE:
128 BUG_ON(per_cpu(watchdog_task, hotcpu));
129 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
130 if (IS_ERR(p)) {
131 printk("watchdog for %i failed\n", hotcpu);
132 return NOTIFY_BAD;
133 }
134 per_cpu(touch_timestamp, hotcpu) = jiffies;
135 per_cpu(watchdog_task, hotcpu) = p;
136 kthread_bind(p, hotcpu);
137 break;
138 case CPU_ONLINE:
139 wake_up_process(per_cpu(watchdog_task, hotcpu));
140 break;
141 #ifdef CONFIG_HOTPLUG_CPU
142 case CPU_UP_CANCELED:
143 if (!per_cpu(watchdog_task, hotcpu))
144 break;
145 /* Unbind so it can run. Fall thru. */
146 kthread_bind(per_cpu(watchdog_task, hotcpu),
147 any_online_cpu(cpu_online_map));
148 case CPU_DEAD:
149 p = per_cpu(watchdog_task, hotcpu);
150 per_cpu(watchdog_task, hotcpu) = NULL;
151 kthread_stop(p);
152 break;
153 #endif /* CONFIG_HOTPLUG_CPU */
154 }
155 return NOTIFY_OK;
156 }
158 static struct notifier_block __cpuinitdata cpu_nfb = {
159 .notifier_call = cpu_callback
160 };
162 __init void spawn_softlockup_task(void)
163 {
164 void *cpu = (void *)(long)smp_processor_id();
166 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
167 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
168 register_cpu_notifier(&cpu_nfb);
170 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
171 }