direct-io.hg

view patches/linux-2.6.11/i386-cpu-hotplug-updated-for-mm.patch @ 5617:22d7dda0e38d

bitkeeper revision 1.1782 (42c3a8e76Lioy1FVEJFTaTkbYVBy7Q)

[PATCH] Use copy_from_user when accessing guest_pt[]

Use copy_from_user when accessing guest_pt[]

Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author arun.sharma@intel.com[kaf24]
date Thu Jun 30 08:10:15 2005 +0000 (2005-06-30)
parents ddd290cc8f0d
children 56a63f9f378f
line source
2 From: Zwane Mwaikambo <zwane@linuxpower.ca>
4 Find attached the i386 cpu hotplug patch updated for Ingo's latest round of
5 goodies. In order to avoid dumping cpu hotplug code into kernel/irq/* i
6 dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The
7 difference being that on cpu offline, fixup_irqs() is called before we
8 clear the cpu from cpu_online_map and a long delay in order to ensure that
9 we never have any queued external interrupts on the APICs. Due to my usual
10 test victims being in boxes a continent away this hasn't been tested, but
11 i'll cover bug reports (nudge, Nathan! ;)
13 1) Add CONFIG_HOTPLUG_CPU
14 2) disable local APIC timer on dead cpus.
15 3) Disable preempt around irq balancing to prevent CPUs going down.
16 4) Print irq stats for all possible cpus.
17 5) Debugging check for interrupts on offline cpus.
18 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online.
19 7) play_dead() for offline cpus to spin inside.
20 8) Handle offline cpus set in flush_tlb_others().
21 9) Grab lock earlier in smp_call_function() to prevent CPUs going down.
22 10) Implement __cpu_disable() and __cpu_die().
23 11) Enable local interrupts in cpu_enable() after fixup_irqs()
24 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus.
25 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline.
27 Signed-off-by: Zwane Mwaikambo <zwane@linuxpower.ca>
28 DESC
29 ppc64: fix hotplug cpu
30 EDESC
31 From: Zwane Mwaikambo <zwane@fsmlabs.com>
33 I seem to have broken this when I moved the clearing of the dying cpu to
34 arch specific code.
36 Signed-off-by: Zwane Mwaikambo <zwane@fsmlabs.com>
37 Signed-off-by: Andrew Morton <akpm@osdl.org>
38 ---
40 25-akpm/arch/i386/Kconfig | 9 ++
41 25-akpm/arch/i386/kernel/apic.c | 3
42 25-akpm/arch/i386/kernel/io_apic.c | 2
43 25-akpm/arch/i386/kernel/irq.c | 66 +++++++++++++++++----
44 25-akpm/arch/i386/kernel/msr.c | 2
45 25-akpm/arch/i386/kernel/process.c | 35 +++++++++++
46 25-akpm/arch/i386/kernel/smp.c | 25 +++++---
47 25-akpm/arch/i386/kernel/smpboot.c | 98 ++++++++++++++++++++++++++++++--
48 25-akpm/arch/i386/kernel/traps.c | 8 ++
49 25-akpm/arch/ia64/kernel/smpboot.c | 3
50 25-akpm/arch/ppc64/kernel/pSeries_smp.c | 5 +
51 25-akpm/arch/s390/kernel/smp.c | 4 -
52 25-akpm/include/asm-i386/cpu.h | 2
53 25-akpm/include/asm-i386/irq.h | 4 +
54 25-akpm/include/asm-i386/smp.h | 3
55 25-akpm/kernel/cpu.c | 14 +---
56 arch/ppc64/kernel/smp.c | 0
57 17 files changed, 242 insertions(+), 41 deletions(-)
59 diff -puN arch/i386/Kconfig~i386-cpu-hotplug-updated-for-mm arch/i386/Kconfig
60 --- 25/arch/i386/Kconfig~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
61 +++ 25-akpm/arch/i386/Kconfig 2005-02-23 02:20:06.000000000 -0800
62 @@ -1205,6 +1205,15 @@ config SCx200
63 This support is also available as a module. If compiled as a
64 module, it will be called scx200.
66 +config HOTPLUG_CPU
67 + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
68 + depends on SMP && HOTPLUG && EXPERIMENTAL
69 + ---help---
70 + Say Y here to experiment with turning CPUs off and on. CPUs
71 + can be controlled through /sys/devices/system/cpu.
72 +
73 + Say N.
74 +
75 source "drivers/pcmcia/Kconfig"
77 source "drivers/pci/hotplug/Kconfig"
78 diff -puN arch/i386/kernel/apic.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/apic.c
79 --- 25/arch/i386/kernel/apic.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
80 +++ 25-akpm/arch/i386/kernel/apic.c 2005-02-23 02:20:06.000000000 -0800
81 @@ -26,6 +26,7 @@
82 #include <linux/mc146818rtc.h>
83 #include <linux/kernel_stat.h>
84 #include <linux/sysdev.h>
85 +#include <linux/cpu.h>
87 #include <asm/atomic.h>
88 #include <asm/smp.h>
89 @@ -1048,7 +1049,7 @@ void __init setup_secondary_APIC_clock(v
90 setup_APIC_timer(calibration_result);
91 }
93 -void __init disable_APIC_timer(void)
94 +void __devinit disable_APIC_timer(void)
95 {
96 if (using_apic_timer) {
97 unsigned long v;
98 diff -puN arch/i386/kernel/io_apic.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/io_apic.c
99 --- 25/arch/i386/kernel/io_apic.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
100 +++ 25-akpm/arch/i386/kernel/io_apic.c 2005-02-23 02:20:06.000000000 -0800
101 @@ -576,9 +576,11 @@ static int balanced_irq(void *unused)
102 try_to_freeze(PF_FREEZE);
103 if (time_after(jiffies,
104 prev_balance_time+balanced_irq_interval)) {
105 + preempt_disable();
106 do_irq_balance();
107 prev_balance_time = jiffies;
108 time_remaining = balanced_irq_interval;
109 + preempt_enable();
110 }
111 }
112 return 0;
113 diff -puN arch/i386/kernel/irq.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/irq.c
114 --- 25/arch/i386/kernel/irq.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
115 +++ 25-akpm/arch/i386/kernel/irq.c 2005-02-23 02:20:06.000000000 -0800
116 @@ -15,6 +15,9 @@
117 #include <linux/seq_file.h>
118 #include <linux/interrupt.h>
119 #include <linux/kernel_stat.h>
120 +#include <linux/notifier.h>
121 +#include <linux/cpu.h>
122 +#include <linux/delay.h>
124 #ifndef CONFIG_X86_LOCAL_APIC
125 /*
126 @@ -209,9 +212,8 @@ int show_interrupts(struct seq_file *p,
128 if (i == 0) {
129 seq_printf(p, " ");
130 - for (j=0; j<NR_CPUS; j++)
131 - if (cpu_online(j))
132 - seq_printf(p, "CPU%d ",j);
133 + for_each_cpu(j)
134 + seq_printf(p, "CPU%d ",j);
135 seq_putc(p, '\n');
136 }
138 @@ -224,9 +226,8 @@ int show_interrupts(struct seq_file *p,
139 #ifndef CONFIG_SMP
140 seq_printf(p, "%10u ", kstat_irqs(i));
141 #else
142 - for (j = 0; j < NR_CPUS; j++)
143 - if (cpu_online(j))
144 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
145 + for_each_cpu(j)
146 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
147 #endif
148 seq_printf(p, " %14s", irq_desc[i].handler->typename);
149 seq_printf(p, " %s", action->name);
150 @@ -239,16 +240,13 @@ skip:
151 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
152 } else if (i == NR_IRQS) {
153 seq_printf(p, "NMI: ");
154 - for (j = 0; j < NR_CPUS; j++)
155 - if (cpu_online(j))
156 - seq_printf(p, "%10u ", nmi_count(j));
157 + for_each_cpu(j)
158 + seq_printf(p, "%10u ", nmi_count(j));
159 seq_putc(p, '\n');
160 #ifdef CONFIG_X86_LOCAL_APIC
161 seq_printf(p, "LOC: ");
162 - for (j = 0; j < NR_CPUS; j++)
163 - if (cpu_online(j))
164 - seq_printf(p, "%10u ",
165 - irq_stat[j].apic_timer_irqs);
166 + for_each_cpu(j)
167 + seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
168 seq_putc(p, '\n');
169 #endif
170 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
171 @@ -258,3 +256,45 @@ skip:
172 }
173 return 0;
174 }
175 +
176 +#ifdef CONFIG_HOTPLUG_CPU
177 +#include <mach_apic.h>
178 +
179 +void fixup_irqs(cpumask_t map)
180 +{
181 + unsigned int irq;
182 + static int warned;
183 +
184 + for (irq = 0; irq < NR_IRQS; irq++) {
185 + cpumask_t mask;
186 + if (irq == 2)
187 + continue;
188 +
189 + cpus_and(mask, irq_affinity[irq], map);
190 + if (any_online_cpu(mask) == NR_CPUS) {
191 + printk("Breaking affinity for irq %i\n", irq);
192 + mask = map;
193 + }
194 + if (irq_desc[irq].handler->set_affinity)
195 + irq_desc[irq].handler->set_affinity(irq, mask);
196 + else if (irq_desc[irq].action && !(warned++))
197 + printk("Cannot set affinity for irq %i\n", irq);
198 + }
199 +
200 +#if 0
201 + barrier();
202 + /* Ingo Molnar says: "after the IO-APIC masks have been redirected
203 + [note the nop - the interrupt-enable boundary on x86 is two
204 + instructions from sti] - to flush out pending hardirqs and
205 + IPIs. After this point nothing is supposed to reach this CPU." */
206 + __asm__ __volatile__("sti; nop; cli");
207 + barrier();
208 +#else
209 + /* That doesn't seem sufficient. Give it 1ms. */
210 + local_irq_enable();
211 + mdelay(1);
212 + local_irq_disable();
213 +#endif
214 +}
215 +#endif
216 +
217 diff -puN arch/i386/kernel/msr.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/msr.c
218 --- 25/arch/i386/kernel/msr.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
219 +++ 25-akpm/arch/i386/kernel/msr.c 2005-02-23 02:20:06.000000000 -0800
220 @@ -260,7 +260,7 @@ static struct file_operations msr_fops =
221 .open = msr_open,
222 };
224 -static int msr_class_simple_device_add(int i)
225 +static int __devinit msr_class_simple_device_add(int i)
226 {
227 int err = 0;
228 struct class_device *class_err;
229 diff -puN arch/i386/kernel/process.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/process.c
230 --- 25/arch/i386/kernel/process.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
231 +++ 25-akpm/arch/i386/kernel/process.c 2005-02-23 02:20:06.000000000 -0800
232 @@ -13,6 +13,7 @@
234 #include <stdarg.h>
236 +#include <linux/cpu.h>
237 #include <linux/errno.h>
238 #include <linux/sched.h>
239 #include <linux/fs.h>
240 @@ -55,6 +56,9 @@
241 #include <linux/irq.h>
242 #include <linux/err.h>
244 +#include <asm/tlbflush.h>
245 +#include <asm/cpu.h>
246 +
247 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
249 int hlt_counter;
250 @@ -139,6 +143,34 @@ static void poll_idle (void)
251 }
252 }
254 +#ifdef CONFIG_HOTPLUG_CPU
255 +#include <asm/nmi.h>
256 +/* We don't actually take CPU down, just spin without interrupts. */
257 +static inline void play_dead(void)
258 +{
259 + /* Ack it */
260 + __get_cpu_var(cpu_state) = CPU_DEAD;
261 +
262 + /* We shouldn't have to disable interrupts while dead, but
263 + * some interrupts just don't seem to go away, and this makes
264 + * it "work" for testing purposes. */
265 + /* Death loop */
266 + while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
267 + cpu_relax();
268 +
269 + local_irq_disable();
270 + __flush_tlb_all();
271 + cpu_set(smp_processor_id(), cpu_online_map);
272 + enable_APIC_timer();
273 + local_irq_enable();
274 +}
275 +#else
276 +static inline void play_dead(void)
277 +{
278 + BUG();
279 +}
280 +#endif /* CONFIG_HOTPLUG_CPU */
281 +
282 /*
283 * The idle thread. There's no useful work to be
284 * done, so just try to conserve power and have a
285 @@ -162,6 +194,9 @@ void cpu_idle (void)
286 if (!idle)
287 idle = default_idle;
289 + if (cpu_is_offline(cpu))
290 + play_dead();
291 +
292 irq_stat[cpu].idle_timestamp = jiffies;
293 idle();
294 }
295 diff -puN arch/i386/kernel/smpboot.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/smpboot.c
296 --- 25/arch/i386/kernel/smpboot.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
297 +++ 25-akpm/arch/i386/kernel/smpboot.c 2005-02-23 02:20:06.000000000 -0800
298 @@ -44,6 +44,9 @@
299 #include <linux/smp_lock.h>
300 #include <linux/irq.h>
301 #include <linux/bootmem.h>
302 +#include <linux/notifier.h>
303 +#include <linux/cpu.h>
304 +#include <linux/percpu.h>
306 #include <linux/delay.h>
307 #include <linux/mc146818rtc.h>
308 @@ -89,6 +92,9 @@ extern unsigned char trampoline_end [];
309 static unsigned char *trampoline_base;
310 static int trampoline_exec;
312 +/* State of each CPU. */
313 +DEFINE_PER_CPU(int, cpu_state) = { 0 };
314 +
315 /*
316 * Currently trivial. Write the real->protected mode
317 * bootstrap into the page concerned. The caller
318 @@ -1095,6 +1101,9 @@ static void __init smp_boot_cpus(unsigne
319 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
320 void __init smp_prepare_cpus(unsigned int max_cpus)
321 {
322 + smp_commenced_mask = cpumask_of_cpu(0);
323 + cpu_callin_map = cpumask_of_cpu(0);
324 + mb();
325 smp_boot_cpus(max_cpus);
326 }
328 @@ -1104,20 +1113,99 @@ void __devinit smp_prepare_boot_cpu(void
329 cpu_set(smp_processor_id(), cpu_callout_map);
330 }
332 -int __devinit __cpu_up(unsigned int cpu)
333 +#ifdef CONFIG_HOTPLUG_CPU
334 +
335 +/* must be called with the cpucontrol mutex held */
336 +static int __devinit cpu_enable(unsigned int cpu)
337 {
338 - /* This only works at boot for x86. See "rewrite" above. */
339 - if (cpu_isset(cpu, smp_commenced_mask)) {
340 - local_irq_enable();
341 - return -ENOSYS;
342 + /* get the target out of its holding state */
343 + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
344 + wmb();
345 +
346 + /* wait for the processor to ack it. timeout? */
347 + while (!cpu_online(cpu))
348 + cpu_relax();
349 +
350 + fixup_irqs(cpu_online_map);
351 + /* counter the disable in fixup_irqs() */
352 + local_irq_enable();
353 + return 0;
354 +}
355 +
356 +int __cpu_disable(void)
357 +{
358 + cpumask_t map = cpu_online_map;
359 + int cpu = smp_processor_id();
360 +
361 + /*
362 + * Perhaps use cpufreq to drop frequency, but that could go
363 + * into generic code.
364 + *
365 + * We won't take down the boot processor on i386 due to some
366 + * interrupts only being able to be serviced by the BSP.
367 + * Especially so if we're not using an IOAPIC -zwane
368 + */
369 + if (cpu == 0)
370 + return -EBUSY;
371 +
372 + /* We enable the timer again on the exit path of the death loop */
373 + disable_APIC_timer();
374 + /* Allow any queued timer interrupts to get serviced */
375 + local_irq_enable();
376 + mdelay(1);
377 + local_irq_disable();
378 +
379 + cpu_clear(cpu, map);
380 + fixup_irqs(map);
381 + /* It's now safe to remove this processor from the online map */
382 + cpu_clear(cpu, cpu_online_map);
383 + return 0;
384 +}
385 +
386 +void __cpu_die(unsigned int cpu)
387 +{
388 + /* We don't do anything here: idle task is faking death itself. */
389 + unsigned int i;
390 +
391 + for (i = 0; i < 10; i++) {
392 + /* They ack this in play_dead by setting CPU_DEAD */
393 + if (per_cpu(cpu_state, cpu) == CPU_DEAD)
394 + return;
395 + current->state = TASK_UNINTERRUPTIBLE;
396 + schedule_timeout(HZ/10);
397 }
398 + printk(KERN_ERR "CPU %u didn't die...\n", cpu);
399 +}
400 +#else /* ... !CONFIG_HOTPLUG_CPU */
401 +int __cpu_disable(void)
402 +{
403 + return -ENOSYS;
404 +}
406 +void __cpu_die(unsigned int cpu)
407 +{
408 + /* We said "no" in __cpu_disable */
409 + BUG();
410 +}
411 +#endif /* CONFIG_HOTPLUG_CPU */
412 +
413 +int __devinit __cpu_up(unsigned int cpu)
414 +{
415 /* In case one didn't come up */
416 if (!cpu_isset(cpu, cpu_callin_map)) {
417 + printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
418 local_irq_enable();
419 return -EIO;
420 }
422 +#ifdef CONFIG_HOTPLUG_CPU
423 + /* Already up, and in cpu_quiescent now? */
424 + if (cpu_isset(cpu, smp_commenced_mask)) {
425 + cpu_enable(cpu);
426 + return 0;
427 + }
428 +#endif
429 +
430 local_irq_enable();
431 /* Unleash the CPU! */
432 cpu_set(cpu, smp_commenced_mask);
433 diff -puN arch/i386/kernel/smp.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/smp.c
434 --- 25/arch/i386/kernel/smp.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
435 +++ 25-akpm/arch/i386/kernel/smp.c 2005-02-23 02:20:06.000000000 -0800
436 @@ -19,6 +19,7 @@
437 #include <linux/mc146818rtc.h>
438 #include <linux/cache.h>
439 #include <linux/interrupt.h>
440 +#include <linux/cpu.h>
442 #include <asm/mtrr.h>
443 #include <asm/tlbflush.h>
444 @@ -163,7 +164,7 @@ void send_IPI_mask_bitmask(cpumask_t cpu
445 unsigned long flags;
447 local_irq_save(flags);
448 -
449 + WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
450 /*
451 * Wait for idle.
452 */
453 @@ -345,21 +346,21 @@ out:
454 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
455 unsigned long va)
456 {
457 - cpumask_t tmp;
458 /*
459 * A couple of (to be removed) sanity checks:
460 *
461 - * - we do not send IPIs to not-yet booted CPUs.
462 * - current CPU must not be in mask
463 * - mask must exist :)
464 */
465 BUG_ON(cpus_empty(cpumask));
466 -
467 - cpus_and(tmp, cpumask, cpu_online_map);
468 - BUG_ON(!cpus_equal(cpumask, tmp));
469 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
470 BUG_ON(!mm);
472 + /* If a CPU which we ran on has gone down, OK. */
473 + cpus_and(cpumask, cpumask, cpu_online_map);
474 + if (cpus_empty(cpumask))
475 + return;
476 +
477 /*
478 * i'm not happy about this global shared spinlock in the
479 * MM hot path, but we'll see how contended it is.
480 @@ -484,6 +485,7 @@ void smp_send_nmi_allbutself(void)
481 */
482 void smp_send_reschedule(int cpu)
483 {
484 + WARN_ON(cpu_is_offline(cpu));
485 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
486 }
488 @@ -524,10 +526,16 @@ int smp_call_function (void (*func) (voi
489 */
490 {
491 struct call_data_struct data;
492 - int cpus = num_online_cpus()-1;
493 + int cpus;
495 - if (!cpus)
496 + /* Holding any lock stops cpus from going down. */
497 + spin_lock(&call_lock);
498 + cpus = num_online_cpus()-1;
499 +
500 + if (!cpus) {
501 + spin_unlock(&call_lock);
502 return 0;
503 + }
505 /* Can deadlock when called with interrupts disabled */
506 WARN_ON(irqs_disabled());
507 @@ -539,7 +547,6 @@ int smp_call_function (void (*func) (voi
508 if (wait)
509 atomic_set(&data.finished, 0);
511 - spin_lock(&call_lock);
512 call_data = &data;
513 mb();
515 diff -puN arch/i386/kernel/traps.c~i386-cpu-hotplug-updated-for-mm arch/i386/kernel/traps.c
516 --- 25/arch/i386/kernel/traps.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
517 +++ 25-akpm/arch/i386/kernel/traps.c 2005-02-23 02:20:06.000000000 -0800
518 @@ -669,6 +669,14 @@ fastcall void do_nmi(struct pt_regs * re
519 nmi_enter();
521 cpu = smp_processor_id();
522 +
523 +#ifdef CONFIG_HOTPLUG_CPU
524 + if (!cpu_online(cpu)) {
525 + nmi_exit();
526 + return;
527 + }
528 +#endif
529 +
530 ++nmi_count(cpu);
532 if (!nmi_callback(regs, cpu))
533 diff -puN arch/ia64/kernel/smpboot.c~i386-cpu-hotplug-updated-for-mm arch/ia64/kernel/smpboot.c
534 --- 25/arch/ia64/kernel/smpboot.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
535 +++ 25-akpm/arch/ia64/kernel/smpboot.c 2005-02-23 02:20:06.000000000 -0800
536 @@ -590,9 +590,10 @@ int __cpu_disable(void)
537 if (cpu == 0)
538 return -EBUSY;
540 + cpu_clear(cpu, cpu_online_map);
541 fixup_irqs();
542 local_flush_tlb_all();
543 - printk ("Disabled cpu %u\n", smp_processor_id());
544 + printk("Disabled cpu %u\n", cpu);
545 return 0;
546 }
548 diff -puN arch/ppc64/kernel/smp.c~i386-cpu-hotplug-updated-for-mm arch/ppc64/kernel/smp.c
549 diff -puN arch/s390/kernel/smp.c~i386-cpu-hotplug-updated-for-mm arch/s390/kernel/smp.c
550 --- 25/arch/s390/kernel/smp.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
551 +++ 25-akpm/arch/s390/kernel/smp.c 2005-02-23 02:20:06.000000000 -0800
552 @@ -679,12 +679,14 @@ __cpu_disable(void)
553 {
554 unsigned long flags;
555 ec_creg_mask_parms cr_parms;
556 + int cpu = smp_processor_id();
558 spin_lock_irqsave(&smp_reserve_lock, flags);
559 - if (smp_cpu_reserved[smp_processor_id()] != 0) {
560 + if (smp_cpu_reserved[cpu] != 0) {
561 spin_unlock_irqrestore(&smp_reserve_lock, flags);
562 return -EBUSY;
563 }
564 + cpu_clear(cpu, cpu_online_map);
566 #ifdef CONFIG_PFAULT
567 /* Disable pfault pseudo page faults on this cpu. */
568 diff -puN include/asm-i386/cpu.h~i386-cpu-hotplug-updated-for-mm include/asm-i386/cpu.h
569 --- 25/include/asm-i386/cpu.h~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
570 +++ 25-akpm/include/asm-i386/cpu.h 2005-02-23 02:20:06.000000000 -0800
571 @@ -5,6 +5,7 @@
572 #include <linux/cpu.h>
573 #include <linux/topology.h>
574 #include <linux/nodemask.h>
575 +#include <linux/percpu.h>
577 #include <asm/node.h>
579 @@ -17,4 +18,5 @@ extern int arch_register_cpu(int num);
580 extern void arch_unregister_cpu(int);
581 #endif
583 +DECLARE_PER_CPU(int, cpu_state);
584 #endif /* _ASM_I386_CPU_H_ */
585 diff -puN include/asm-i386/irq.h~i386-cpu-hotplug-updated-for-mm include/asm-i386/irq.h
586 --- 25/include/asm-i386/irq.h~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
587 +++ 25-akpm/include/asm-i386/irq.h 2005-02-23 02:20:06.000000000 -0800
588 @@ -38,4 +38,8 @@ extern void release_vm86_irqs(struct tas
589 extern int irqbalance_disable(char *str);
590 #endif
592 +#ifdef CONFIG_HOTPLUG_CPU
593 +extern void fixup_irqs(cpumask_t map);
594 +#endif
595 +
596 #endif /* _ASM_IRQ_H */
597 diff -puN include/asm-i386/smp.h~i386-cpu-hotplug-updated-for-mm include/asm-i386/smp.h
598 --- 25/include/asm-i386/smp.h~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
599 +++ 25-akpm/include/asm-i386/smp.h 2005-02-23 02:20:06.000000000 -0800
600 @@ -85,6 +85,9 @@ static __inline int logical_smp_processo
601 }
603 #endif
604 +
605 +extern int __cpu_disable(void);
606 +extern void __cpu_die(unsigned int cpu);
607 #endif /* !__ASSEMBLY__ */
609 #define NO_PROC_ID 0xFF /* No processor magic marker */
610 diff -puN kernel/cpu.c~i386-cpu-hotplug-updated-for-mm kernel/cpu.c
611 --- 25/kernel/cpu.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:06.000000000 -0800
612 +++ 25-akpm/kernel/cpu.c 2005-02-23 02:20:06.000000000 -0800
613 @@ -63,19 +63,15 @@ static int take_cpu_down(void *unused)
614 {
615 int err;
617 - /* Take offline: makes arch_cpu_down somewhat easier. */
618 - cpu_clear(smp_processor_id(), cpu_online_map);
619 -
620 /* Ensure this CPU doesn't handle any more interrupts. */
621 err = __cpu_disable();
622 if (err < 0)
623 - cpu_set(smp_processor_id(), cpu_online_map);
624 - else
625 - /* Force idle task to run as soon as we yield: it should
626 - immediately notice cpu is offline and die quickly. */
627 - sched_idle_next();
628 + return err;
630 - return err;
631 + /* Force idle task to run as soon as we yield: it should
632 + immediately notice cpu is offline and die quickly. */
633 + sched_idle_next();
634 + return 0;
635 }
637 int cpu_down(unsigned int cpu)
638 diff -puN arch/ppc64/kernel/pSeries_smp.c~i386-cpu-hotplug-updated-for-mm arch/ppc64/kernel/pSeries_smp.c
639 --- 25/arch/ppc64/kernel/pSeries_smp.c~i386-cpu-hotplug-updated-for-mm 2005-02-23 02:20:08.000000000 -0800
640 +++ 25-akpm/arch/ppc64/kernel/pSeries_smp.c 2005-02-23 02:20:08.000000000 -0800
641 @@ -86,10 +86,13 @@ static int query_cpu_stopped(unsigned in
643 int pSeries_cpu_disable(void)
644 {
645 + int cpu = smp_processor_id();
646 +
647 + cpu_clear(cpu, cpu_online_map);
648 systemcfg->processorCount--;
650 /*fix boot_cpuid here*/
651 - if (smp_processor_id() == boot_cpuid)
652 + if (cpu == boot_cpuid)
653 boot_cpuid = any_online_cpu(cpu_online_map);
655 /* FIXME: abstract this to not be platform specific later on */
656 _