ia64/xen-unstable

view patches/linux-2.6.12/i386-cpu-hotplug-updated-for-mm.patch @ 6516:455dff354413

When the guest shuts down a VM, make sure that the domain gets destroyed.

Device model needs to do xm destroy before it exits on
guest shutdown/poweroff/halt -p.

Signed-off-by: Edwin Zhai <edwin.zhai@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author adsharma@los-vmm.sc.intel.com
date Tue Aug 09 11:12:44 2005 -0800 (2005-08-09)
parents 56a63f9f378f
children 8799d14bef77 8799d14bef77 9312a3e8a6f8 dfaf788ab18c
line source
1 diff -Naur linux-2.6.12.orig/arch/i386/Kconfig linux-2.6.12/arch/i386/Kconfig
2 --- linux-2.6.12.orig/arch/i386/Kconfig 2005-07-08 12:33:40.000000000 -0400
3 +++ linux-2.6.12/arch/i386/Kconfig 2005-07-08 12:34:10.000000000 -0400
4 @@ -1226,6 +1226,15 @@
5 This support is also available as a module. If compiled as a
6 module, it will be called scx200.
8 +config HOTPLUG_CPU
9 + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
10 + depends on SMP && HOTPLUG && EXPERIMENTAL
11 + ---help---
12 + Say Y here to experiment with turning CPUs off and on. CPUs
13 + can be controlled through /sys/devices/system/cpu.
14 +
15 + Say N.
16 +
17 source "drivers/pcmcia/Kconfig"
19 source "drivers/pci/hotplug/Kconfig"
20 diff -Naur linux-2.6.12.orig/arch/i386/kernel/apic.c linux-2.6.12/arch/i386/kernel/apic.c
21 --- linux-2.6.12.orig/arch/i386/kernel/apic.c 2005-07-08 12:33:40.000000000 -0400
22 +++ linux-2.6.12/arch/i386/kernel/apic.c 2005-07-08 12:34:10.000000000 -0400
23 @@ -26,6 +26,7 @@
24 #include <linux/mc146818rtc.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/sysdev.h>
27 +#include <linux/cpu.h>
29 #include <asm/atomic.h>
30 #include <asm/smp.h>
31 @@ -1048,7 +1049,7 @@
32 setup_APIC_timer(calibration_result);
33 }
35 -void __init disable_APIC_timer(void)
36 +void __devinit disable_APIC_timer(void)
37 {
38 if (using_apic_timer) {
39 unsigned long v;
40 diff -Naur linux-2.6.12.orig/arch/i386/kernel/io_apic.c linux-2.6.12/arch/i386/kernel/io_apic.c
41 --- linux-2.6.12.orig/arch/i386/kernel/io_apic.c 2005-07-08 12:33:40.000000000 -0400
42 +++ linux-2.6.12/arch/i386/kernel/io_apic.c 2005-07-08 12:34:10.000000000 -0400
43 @@ -576,9 +576,11 @@
44 try_to_freeze(PF_FREEZE);
45 if (time_after(jiffies,
46 prev_balance_time+balanced_irq_interval)) {
47 + preempt_disable();
48 do_irq_balance();
49 prev_balance_time = jiffies;
50 time_remaining = balanced_irq_interval;
51 + preempt_enable();
52 }
53 }
54 return 0;
55 diff -Naur linux-2.6.12.orig/arch/i386/kernel/irq.c linux-2.6.12/arch/i386/kernel/irq.c
56 --- linux-2.6.12.orig/arch/i386/kernel/irq.c 2005-07-08 12:33:40.000000000 -0400
57 +++ linux-2.6.12/arch/i386/kernel/irq.c 2005-07-08 12:36:06.000000000 -0400
58 @@ -15,6 +15,9 @@
59 #include <linux/seq_file.h>
60 #include <linux/interrupt.h>
61 #include <linux/kernel_stat.h>
62 +#include <linux/notifier.h>
63 +#include <linux/cpu.h>
64 +#include <linux/delay.h>
66 DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
67 EXPORT_PER_CPU_SYMBOL(irq_stat);
68 @@ -210,9 +213,8 @@
70 if (i == 0) {
71 seq_printf(p, " ");
72 - for (j=0; j<NR_CPUS; j++)
73 - if (cpu_online(j))
74 - seq_printf(p, "CPU%d ",j);
75 + for_each_cpu(j)
76 + seq_printf(p, "CPU%d ",j);
77 seq_putc(p, '\n');
78 }
80 @@ -225,9 +227,8 @@
81 #ifndef CONFIG_SMP
82 seq_printf(p, "%10u ", kstat_irqs(i));
83 #else
84 - for (j = 0; j < NR_CPUS; j++)
85 - if (cpu_online(j))
86 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
87 + for_each_cpu(j)
88 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
89 #endif
90 seq_printf(p, " %14s", irq_desc[i].handler->typename);
91 seq_printf(p, " %s", action->name);
92 @@ -240,16 +241,13 @@
93 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
94 } else if (i == NR_IRQS) {
95 seq_printf(p, "NMI: ");
96 - for (j = 0; j < NR_CPUS; j++)
97 - if (cpu_online(j))
98 - seq_printf(p, "%10u ", nmi_count(j));
99 + for_each_cpu(j)
100 + seq_printf(p, "%10u ", nmi_count(j));
101 seq_putc(p, '\n');
102 #ifdef CONFIG_X86_LOCAL_APIC
103 seq_printf(p, "LOC: ");
104 - for (j = 0; j < NR_CPUS; j++)
105 - if (cpu_online(j))
106 - seq_printf(p, "%10u ",
107 - per_cpu(irq_stat,j).apic_timer_irqs);
108 + for_each_cpu(j)
109 + seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
110 seq_putc(p, '\n');
111 #endif
112 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
113 @@ -259,3 +257,45 @@
114 }
115 return 0;
116 }
117 +
118 +#ifdef CONFIG_HOTPLUG_CPU
119 +#include <mach_apic.h>
120 +
121 +void fixup_irqs(cpumask_t map)
122 +{
123 + unsigned int irq;
124 + static int warned;
125 +
126 + for (irq = 0; irq < NR_IRQS; irq++) {
127 + cpumask_t mask;
128 + if (irq == 2)
129 + continue;
130 +
131 + cpus_and(mask, irq_affinity[irq], map);
132 + if (any_online_cpu(mask) == NR_CPUS) {
133 + printk("Breaking affinity for irq %i\n", irq);
134 + mask = map;
135 + }
136 + if (irq_desc[irq].handler->set_affinity)
137 + irq_desc[irq].handler->set_affinity(irq, mask);
138 + else if (irq_desc[irq].action && !(warned++))
139 + printk("Cannot set affinity for irq %i\n", irq);
140 + }
141 +
142 +#if 0
143 + barrier();
144 + /* Ingo Molnar says: "after the IO-APIC masks have been redirected
145 + [note the nop - the interrupt-enable boundary on x86 is two
146 + instructions from sti] - to flush out pending hardirqs and
147 + IPIs. After this point nothing is supposed to reach this CPU." */
148 + __asm__ __volatile__("sti; nop; cli");
149 + barrier();
150 +#else
151 + /* That doesn't seem sufficient. Give it 1ms. */
152 + local_irq_enable();
153 + mdelay(1);
154 + local_irq_disable();
155 +#endif
156 +}
157 +#endif
158 +
159 diff -Naur linux-2.6.12.orig/arch/i386/kernel/msr.c linux-2.6.12/arch/i386/kernel/msr.c
160 --- linux-2.6.12.orig/arch/i386/kernel/msr.c 2005-07-08 12:33:40.000000000 -0400
161 +++ linux-2.6.12/arch/i386/kernel/msr.c 2005-07-08 12:34:10.000000000 -0400
162 @@ -260,7 +260,7 @@
163 .open = msr_open,
164 };
166 -static int msr_class_simple_device_add(int i)
167 +static int __devinit msr_class_simple_device_add(int i)
168 {
169 int err = 0;
170 struct class_device *class_err;
171 diff -Naur linux-2.6.12.orig/arch/i386/kernel/process.c linux-2.6.12/arch/i386/kernel/process.c
172 --- linux-2.6.12.orig/arch/i386/kernel/process.c 2005-07-08 12:33:40.000000000 -0400
173 +++ linux-2.6.12/arch/i386/kernel/process.c 2005-07-08 12:36:43.000000000 -0400
174 @@ -13,6 +13,7 @@
176 #include <stdarg.h>
178 +#include <linux/cpu.h>
179 #include <linux/errno.h>
180 #include <linux/sched.h>
181 #include <linux/fs.h>
182 @@ -54,6 +55,9 @@
183 #include <linux/irq.h>
184 #include <linux/err.h>
186 +#include <asm/tlbflush.h>
187 +#include <asm/cpu.h>
188 +
189 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
191 static int hlt_counter;
192 @@ -138,6 +142,34 @@
193 }
194 }
196 +#ifdef CONFIG_HOTPLUG_CPU
197 +#include <asm/nmi.h>
198 +/* We don't actually take CPU down, just spin without interrupts. */
199 +static inline void play_dead(void)
200 +{
201 + /* Ack it */
202 + __get_cpu_var(cpu_state) = CPU_DEAD;
203 +
204 + /* We shouldn't have to disable interrupts while dead, but
205 + * some interrupts just don't seem to go away, and this makes
206 + * it "work" for testing purposes. */
207 + /* Death loop */
208 + while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
209 + cpu_relax();
210 +
211 + local_irq_disable();
212 + __flush_tlb_all();
213 + cpu_set(smp_processor_id(), cpu_online_map);
214 + enable_APIC_timer();
215 + local_irq_enable();
216 +}
217 +#else
218 +static inline void play_dead(void)
219 +{
220 + BUG();
221 +}
222 +#endif /* CONFIG_HOTPLUG_CPU */
223 +
224 /*
225 * The idle thread. There's no useful work to be
226 * done, so just try to conserve power and have a
227 @@ -160,6 +192,9 @@
228 if (!idle)
229 idle = default_idle;
231 + if (cpu_is_offline(cpu))
232 + play_dead();
233 +
234 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
235 idle();
236 }
237 diff -Naur linux-2.6.12.orig/arch/i386/kernel/smpboot.c linux-2.6.12/arch/i386/kernel/smpboot.c
238 --- linux-2.6.12.orig/arch/i386/kernel/smpboot.c 2005-07-08 12:33:40.000000000 -0400
239 +++ linux-2.6.12/arch/i386/kernel/smpboot.c 2005-07-08 12:34:10.000000000 -0400
240 @@ -44,6 +44,9 @@
241 #include <linux/smp_lock.h>
242 #include <linux/irq.h>
243 #include <linux/bootmem.h>
244 +#include <linux/notifier.h>
245 +#include <linux/cpu.h>
246 +#include <linux/percpu.h>
248 #include <linux/delay.h>
249 #include <linux/mc146818rtc.h>
250 @@ -90,6 +93,9 @@
252 static void map_cpu_to_logical_apicid(void);
254 +/* State of each CPU. */
255 +DEFINE_PER_CPU(int, cpu_state) = { 0 };
256 +
257 /*
258 * Currently trivial. Write the real->protected mode
259 * bootstrap into the page concerned. The caller
260 @@ -1107,6 +1113,9 @@
261 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
262 void __init smp_prepare_cpus(unsigned int max_cpus)
263 {
264 + smp_commenced_mask = cpumask_of_cpu(0);
265 + cpu_callin_map = cpumask_of_cpu(0);
266 + mb();
267 smp_boot_cpus(max_cpus);
268 }
270 @@ -1116,20 +1125,99 @@
271 cpu_set(smp_processor_id(), cpu_callout_map);
272 }
274 -int __devinit __cpu_up(unsigned int cpu)
275 +#ifdef CONFIG_HOTPLUG_CPU
276 +
277 +/* must be called with the cpucontrol mutex held */
278 +static int __devinit cpu_enable(unsigned int cpu)
279 {
280 - /* This only works at boot for x86. See "rewrite" above. */
281 - if (cpu_isset(cpu, smp_commenced_mask)) {
282 - local_irq_enable();
283 - return -ENOSYS;
284 + /* get the target out of its holding state */
285 + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
286 + wmb();
287 +
288 + /* wait for the processor to ack it. timeout? */
289 + while (!cpu_online(cpu))
290 + cpu_relax();
291 +
292 + fixup_irqs(cpu_online_map);
293 + /* counter the disable in fixup_irqs() */
294 + local_irq_enable();
295 + return 0;
296 +}
297 +
298 +int __cpu_disable(void)
299 +{
300 + cpumask_t map = cpu_online_map;
301 + int cpu = smp_processor_id();
302 +
303 + /*
304 + * Perhaps use cpufreq to drop frequency, but that could go
305 + * into generic code.
306 + *
307 + * We won't take down the boot processor on i386 due to some
308 + * interrupts only being able to be serviced by the BSP.
309 + * Especially so if we're not using an IOAPIC -zwane
310 + */
311 + if (cpu == 0)
312 + return -EBUSY;
313 +
314 + /* We enable the timer again on the exit path of the death loop */
315 + disable_APIC_timer();
316 + /* Allow any queued timer interrupts to get serviced */
317 + local_irq_enable();
318 + mdelay(1);
319 + local_irq_disable();
320 +
321 + cpu_clear(cpu, map);
322 + fixup_irqs(map);
323 + /* It's now safe to remove this processor from the online map */
324 + cpu_clear(cpu, cpu_online_map);
325 + return 0;
326 +}
327 +
328 +void __cpu_die(unsigned int cpu)
329 +{
330 + /* We don't do anything here: idle task is faking death itself. */
331 + unsigned int i;
332 +
333 + for (i = 0; i < 10; i++) {
334 + /* They ack this in play_dead by setting CPU_DEAD */
335 + if (per_cpu(cpu_state, cpu) == CPU_DEAD)
336 + return;
337 + current->state = TASK_UNINTERRUPTIBLE;
338 + schedule_timeout(HZ/10);
339 }
340 + printk(KERN_ERR "CPU %u didn't die...\n", cpu);
341 +}
342 +#else /* ... !CONFIG_HOTPLUG_CPU */
343 +int __cpu_disable(void)
344 +{
345 + return -ENOSYS;
346 +}
348 +void __cpu_die(unsigned int cpu)
349 +{
350 + /* We said "no" in __cpu_disable */
351 + BUG();
352 +}
353 +#endif /* CONFIG_HOTPLUG_CPU */
354 +
355 +int __devinit __cpu_up(unsigned int cpu)
356 +{
357 /* In case one didn't come up */
358 if (!cpu_isset(cpu, cpu_callin_map)) {
359 + printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
360 local_irq_enable();
361 return -EIO;
362 }
364 +#ifdef CONFIG_HOTPLUG_CPU
365 + /* Already up, and in cpu_quiescent now? */
366 + if (cpu_isset(cpu, smp_commenced_mask)) {
367 + cpu_enable(cpu);
368 + return 0;
369 + }
370 +#endif
371 +
372 local_irq_enable();
373 /* Unleash the CPU! */
374 cpu_set(cpu, smp_commenced_mask);
375 diff -Naur linux-2.6.12.orig/arch/i386/kernel/smp.c linux-2.6.12/arch/i386/kernel/smp.c
376 --- linux-2.6.12.orig/arch/i386/kernel/smp.c 2005-07-08 12:33:40.000000000 -0400
377 +++ linux-2.6.12/arch/i386/kernel/smp.c 2005-07-08 12:34:10.000000000 -0400
378 @@ -19,6 +19,7 @@
379 #include <linux/mc146818rtc.h>
380 #include <linux/cache.h>
381 #include <linux/interrupt.h>
382 +#include <linux/cpu.h>
384 #include <asm/mtrr.h>
385 #include <asm/tlbflush.h>
386 @@ -163,7 +164,7 @@
387 unsigned long flags;
389 local_irq_save(flags);
390 -
391 + WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
392 /*
393 * Wait for idle.
394 */
395 @@ -345,21 +346,21 @@
396 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
397 unsigned long va)
398 {
399 - cpumask_t tmp;
400 /*
401 * A couple of (to be removed) sanity checks:
402 *
403 - * - we do not send IPIs to not-yet booted CPUs.
404 * - current CPU must not be in mask
405 * - mask must exist :)
406 */
407 BUG_ON(cpus_empty(cpumask));
408 -
409 - cpus_and(tmp, cpumask, cpu_online_map);
410 - BUG_ON(!cpus_equal(cpumask, tmp));
411 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
412 BUG_ON(!mm);
414 + /* If a CPU which we ran on has gone down, OK. */
415 + cpus_and(cpumask, cpumask, cpu_online_map);
416 + if (cpus_empty(cpumask))
417 + return;
418 +
419 /*
420 * i'm not happy about this global shared spinlock in the
421 * MM hot path, but we'll see how contended it is.
422 @@ -474,6 +475,7 @@
423 */
424 void smp_send_reschedule(int cpu)
425 {
426 + WARN_ON(cpu_is_offline(cpu));
427 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
428 }
430 @@ -514,10 +516,16 @@
431 */
432 {
433 struct call_data_struct data;
434 - int cpus = num_online_cpus()-1;
435 + int cpus;
437 - if (!cpus)
438 + /* Holding any lock stops cpus from going down. */
439 + spin_lock(&call_lock);
440 + cpus = num_online_cpus()-1;
441 +
442 + if (!cpus) {
443 + spin_unlock(&call_lock);
444 return 0;
445 + }
447 /* Can deadlock when called with interrupts disabled */
448 WARN_ON(irqs_disabled());
449 @@ -529,7 +537,6 @@
450 if (wait)
451 atomic_set(&data.finished, 0);
453 - spin_lock(&call_lock);
454 call_data = &data;
455 mb();
457 diff -Naur linux-2.6.12.orig/arch/i386/kernel/traps.c linux-2.6.12/arch/i386/kernel/traps.c
458 --- linux-2.6.12.orig/arch/i386/kernel/traps.c 2005-07-08 12:33:40.000000000 -0400
459 +++ linux-2.6.12/arch/i386/kernel/traps.c 2005-07-08 12:34:10.000000000 -0400
460 @@ -624,6 +624,14 @@
461 nmi_enter();
463 cpu = smp_processor_id();
464 +
465 +#ifdef CONFIG_HOTPLUG_CPU
466 + if (!cpu_online(cpu)) {
467 + nmi_exit();
468 + return;
469 + }
470 +#endif
471 +
472 ++nmi_count(cpu);
474 if (!nmi_callback(regs, cpu))
475 diff -Naur linux-2.6.12.orig/arch/ppc64/kernel/pSeries_smp.c linux-2.6.12/arch/ppc64/kernel/pSeries_smp.c
476 --- linux-2.6.12.orig/arch/ppc64/kernel/pSeries_smp.c 2005-07-08 12:33:42.000000000 -0400
477 +++ linux-2.6.12/arch/ppc64/kernel/pSeries_smp.c 2005-07-08 12:34:10.000000000 -0400
478 @@ -92,10 +92,13 @@
480 int pSeries_cpu_disable(void)
481 {
482 + int cpu = smp_processor_id();
483 +
484 + cpu_clear(cpu, cpu_online_map);
485 systemcfg->processorCount--;
487 /*fix boot_cpuid here*/
488 - if (smp_processor_id() == boot_cpuid)
489 + if (cpu == boot_cpuid)
490 boot_cpuid = any_online_cpu(cpu_online_map);
492 /* FIXME: abstract this to not be platform specific later on */
493 diff -Naur linux-2.6.12.orig/arch/s390/kernel/smp.c linux-2.6.12/arch/s390/kernel/smp.c
494 --- linux-2.6.12.orig/arch/s390/kernel/smp.c 2005-07-08 12:33:42.000000000 -0400
495 +++ linux-2.6.12/arch/s390/kernel/smp.c 2005-07-08 12:34:10.000000000 -0400
496 @@ -679,12 +679,14 @@
497 {
498 unsigned long flags;
499 ec_creg_mask_parms cr_parms;
500 + int cpu = smp_processor_id();
502 spin_lock_irqsave(&smp_reserve_lock, flags);
503 - if (smp_cpu_reserved[smp_processor_id()] != 0) {
504 + if (smp_cpu_reserved[cpu] != 0) {
505 spin_unlock_irqrestore(&smp_reserve_lock, flags);
506 return -EBUSY;
507 }
508 + cpu_clear(cpu, cpu_online_map);
510 #ifdef CONFIG_PFAULT
511 /* Disable pfault pseudo page faults on this cpu. */
512 diff -Naur linux-2.6.12.orig/include/asm-i386/cpu.h linux-2.6.12/include/asm-i386/cpu.h
513 --- linux-2.6.12.orig/include/asm-i386/cpu.h 2005-07-08 12:33:58.000000000 -0400
514 +++ linux-2.6.12/include/asm-i386/cpu.h 2005-07-08 12:34:10.000000000 -0400
515 @@ -5,6 +5,7 @@
516 #include <linux/cpu.h>
517 #include <linux/topology.h>
518 #include <linux/nodemask.h>
519 +#include <linux/percpu.h>
521 #include <asm/node.h>
523 @@ -16,4 +17,5 @@
524 extern void arch_unregister_cpu(int);
525 #endif
527 +DECLARE_PER_CPU(int, cpu_state);
528 #endif /* _ASM_I386_CPU_H_ */
529 diff -Naur linux-2.6.12.orig/include/asm-i386/irq.h linux-2.6.12/include/asm-i386/irq.h
530 --- linux-2.6.12.orig/include/asm-i386/irq.h 2005-07-08 12:33:58.000000000 -0400
531 +++ linux-2.6.12/include/asm-i386/irq.h 2005-07-08 12:34:10.000000000 -0400
532 @@ -38,4 +38,8 @@
533 extern int irqbalance_disable(char *str);
534 #endif
536 +#ifdef CONFIG_HOTPLUG_CPU
537 +extern void fixup_irqs(cpumask_t map);
538 +#endif
539 +
540 #endif /* _ASM_IRQ_H */
541 diff -Naur linux-2.6.12.orig/include/asm-i386/smp.h linux-2.6.12/include/asm-i386/smp.h
542 --- linux-2.6.12.orig/include/asm-i386/smp.h 2005-07-08 12:33:58.000000000 -0400
543 +++ linux-2.6.12/include/asm-i386/smp.h 2005-07-08 12:34:10.000000000 -0400
544 @@ -83,6 +83,9 @@
545 }
547 #endif
548 +
549 +extern int __cpu_disable(void);
550 +extern void __cpu_die(unsigned int cpu);
551 #endif /* !__ASSEMBLY__ */
553 #define NO_PROC_ID 0xFF /* No processor magic marker */
554 diff -Naur linux-2.6.12.orig/kernel/cpu.c linux-2.6.12/kernel/cpu.c
555 --- linux-2.6.12.orig/kernel/cpu.c 2005-07-08 12:33:26.000000000 -0400
556 +++ linux-2.6.12/kernel/cpu.c 2005-07-08 12:34:10.000000000 -0400
557 @@ -63,19 +63,15 @@
558 {
559 int err;
561 - /* Take offline: makes arch_cpu_down somewhat easier. */
562 - cpu_clear(smp_processor_id(), cpu_online_map);
563 -
564 /* Ensure this CPU doesn't handle any more interrupts. */
565 err = __cpu_disable();
566 if (err < 0)
567 - cpu_set(smp_processor_id(), cpu_online_map);
568 - else
569 - /* Force idle task to run as soon as we yield: it should
570 - immediately notice cpu is offline and die quickly. */
571 - sched_idle_next();
572 + return err;
574 - return err;
575 + /* Force idle task to run as soon as we yield: it should
576 + immediately notice cpu is offline and die quickly. */
577 + sched_idle_next();
578 + return 0;
579 }
581 int cpu_down(unsigned int cpu)