ia64/xen-unstable

view linux-2.6.9-xen-sparse/arch/xen/i386/kernel/smp.c @ 3001:7d1e3f96a1b8

bitkeeper revision 1.1159.1.428 (419a4e23aURf9dK_BkNINC85DT2RDw)

Remove debug printks.
author cl349@freefall.cl.cam.ac.uk
date Tue Nov 16 18:59:47 2004 +0000 (2004-11-16)
parents 7457699e5eb5
children
line source
1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
11 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/irq.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp_lock.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/cache.h>
21 #include <linux/interrupt.h>
23 #include <asm/mtrr.h>
24 #include <asm/tlbflush.h>
25 #if 0
26 #include <mach_apic.h>
27 #endif
28 #include <asm-xen/evtchn.h>
30 #define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
32 /*
33 * Some notes on x86 processor bugs affecting SMP operation:
34 *
35 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
36 * The Linux implications for SMP are handled as follows:
37 *
38 * Pentium III / [Xeon]
39 * None of the E1AP-E3AP errata are visible to the user.
40 *
41 * E1AP. see PII A1AP
42 * E2AP. see PII A2AP
43 * E3AP. see PII A3AP
44 *
45 * Pentium II / [Xeon]
46 * None of the A1AP-A3AP errata are visible to the user.
47 *
48 * A1AP. see PPro 1AP
49 * A2AP. see PPro 2AP
50 * A3AP. see PPro 7AP
51 *
52 * Pentium Pro
53 * None of 1AP-9AP errata are visible to the normal user,
54 * except occasional delivery of 'spurious interrupt' as trap #15.
55 * This is very rare and a non-problem.
56 *
57 * 1AP. Linux maps APIC as non-cacheable
58 * 2AP. worked around in hardware
59 * 3AP. fixed in C0 and above steppings microcode update.
60 * Linux does not use excessive STARTUP_IPIs.
61 * 4AP. worked around in hardware
62 * 5AP. symmetric IO mode (normal Linux operation) not affected.
63 * 'noapic' mode has vector 0xf filled out properly.
64 * 6AP. 'noapic' mode might be affected - fixed in later steppings
65 * 7AP. We do not assume writes to the LVT deassering IRQs
66 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
67 * 9AP. We do not use mixed mode
68 *
69 * Pentium
70 * There is a marginal case where REP MOVS on 100MHz SMP
71 * machines with B stepping processors can fail. XXX should provide
72 * an L1cache=Writethrough or L1cache=off option.
73 *
74 * B stepping CPUs may hang. There are hardware work arounds
75 * for this. We warn about it in case your board doesn't have the work
76 * arounds. Basically thats so I can tell anyone with a B stepping
77 * CPU and SMP problems "tough".
78 *
79 * Specific items [From Pentium Processor Specification Update]
80 *
81 * 1AP. Linux doesn't use remote read
82 * 2AP. Linux doesn't trust APIC errors
83 * 3AP. We work around this
84 * 4AP. Linux never generated 3 interrupts of the same priority
85 * to cause a lost local interrupt.
86 * 5AP. Remote read is never used
87 * 6AP. not affected - worked around in hardware
88 * 7AP. not affected - worked around in hardware
89 * 8AP. worked around in hardware - we get explicit CS errors if not
90 * 9AP. only 'noapic' mode affected. Might generate spurious
91 * interrupts, we log only the first one and count the
92 * rest silently.
93 * 10AP. not affected - worked around in hardware
94 * 11AP. Linux reads the APIC between writes to avoid this, as per
95 * the documentation. Make sure you preserve this as it affects
96 * the C stepping chips too.
97 * 12AP. not affected - worked around in hardware
98 * 13AP. not affected - worked around in hardware
99 * 14AP. we always deassert INIT during bootup
100 * 15AP. not affected - worked around in hardware
101 * 16AP. not affected - worked around in hardware
102 * 17AP. not affected - worked around in hardware
103 * 18AP. not affected - worked around in hardware
104 * 19AP. not affected - worked around in BIOS
105 *
106 * If this sounds worrying believe me these bugs are either ___RARE___,
107 * or are signal timing bugs worked around in hardware and there's
108 * about nothing of note with C stepping upwards.
109 */
111 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
113 /*
114 * the following functions deal with sending IPIs between CPUs.
115 *
116 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
117 */
119 static inline int __prepare_ICR (unsigned int shortcut, int vector)
120 {
121 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
122 }
124 static inline int __prepare_ICR2 (unsigned int mask)
125 {
126 return SET_APIC_DEST_FIELD(mask);
127 }
129 DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
131 static inline void __send_IPI_one(unsigned int cpu, int vector)
132 {
133 unsigned int evtchn;
135 evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
136 // printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
137 if (evtchn) {
138 #if 0
139 shared_info_t *s = HYPERVISOR_shared_info;
140 while (synch_test_bit(evtchn, &s->evtchn_pending[0]) ||
141 synch_test_bit(evtchn, &s->evtchn_mask[0]))
142 ;
143 #endif
144 notify_via_evtchn(evtchn);
145 } else
146 printk("send_IPI to unbound port %d/%d",
147 cpu, vector);
148 }
150 void __send_IPI_shortcut(unsigned int shortcut, int vector)
151 {
152 int cpu;
154 switch (shortcut) {
155 case APIC_DEST_SELF:
156 __send_IPI_one(smp_processor_id(), vector);
157 break;
158 case APIC_DEST_ALLBUT:
159 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
160 if (cpu == smp_processor_id())
161 continue;
162 if (cpu_isset(cpu, cpu_online_map)) {
163 __send_IPI_one(cpu, vector);
164 }
165 }
166 break;
167 default:
168 printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
169 vector);
170 break;
171 }
172 }
174 void fastcall send_IPI_self(int vector)
175 {
176 __send_IPI_shortcut(APIC_DEST_SELF, vector);
177 }
179 /*
180 * This is only used on smaller machines.
181 */
182 void send_IPI_mask_bitmask(cpumask_t mask, int vector)
183 {
184 unsigned long flags;
185 unsigned int cpu;
187 local_irq_save(flags);
189 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
190 if (cpu_isset(cpu, mask)) {
191 __send_IPI_one(cpu, vector);
192 }
193 }
195 local_irq_restore(flags);
196 }
198 inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
199 {
201 send_IPI_mask_bitmask(mask, vector);
202 }
204 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
206 /*
207 * Smarter SMP flushing macros.
208 * c/o Linus Torvalds.
209 *
210 * These mean you can really definitely utterly forget about
211 * writing to user space from interrupts. (Its not allowed anyway).
212 *
213 * Optimizations Manfred Spraul <manfred@colorfullife.com>
214 */
216 static cpumask_t flush_cpumask;
217 static struct mm_struct * flush_mm;
218 static unsigned long flush_va;
219 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
220 #define FLUSH_ALL 0xffffffff
222 /*
223 * We cannot call mmdrop() because we are in interrupt context,
224 * instead update mm->cpu_vm_mask.
225 *
226 * We need to reload %cr3 since the page tables may be going
227 * away from under us..
228 */
229 static inline void leave_mm (unsigned long cpu)
230 {
231 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
232 BUG();
233 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
234 load_cr3(swapper_pg_dir);
235 }
237 /*
238 *
239 * The flush IPI assumes that a thread switch happens in this order:
240 * [cpu0: the cpu that switches]
241 * 1) switch_mm() either 1a) or 1b)
242 * 1a) thread switch to a different mm
243 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
244 * Stop ipi delivery for the old mm. This is not synchronized with
245 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
246 * for the wrong mm, and in the worst case we perform a superflous
247 * tlb flush.
248 * 1a2) set cpu_tlbstate to TLBSTATE_OK
249 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
250 * was in lazy tlb mode.
251 * 1a3) update cpu_tlbstate[].active_mm
252 * Now cpu0 accepts tlb flushes for the new mm.
253 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
254 * Now the other cpus will send tlb flush ipis.
255 * 1a4) change cr3.
256 * 1b) thread switch without mm change
257 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
258 * flush ipis.
259 * 1b1) set cpu_tlbstate to TLBSTATE_OK
260 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
261 * Atomically set the bit [other cpus will start sending flush ipis],
262 * and test the bit.
263 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
264 * 2) switch %%esp, ie current
265 *
266 * The interrupt must handle 2 special cases:
267 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
268 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
269 * runs in kernel space, the cpu could load tlb entries for user space
270 * pages.
271 *
272 * The good news is that cpu_tlbstate is local to each cpu, no
273 * write/read ordering problems.
274 */
276 /*
277 * TLB flush IPI:
278 *
279 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
280 * 2) Leave the mm if we are in the lazy tlb mode.
281 */
283 irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
284 struct pt_regs *regs)
285 {
286 unsigned long cpu;
288 cpu = get_cpu();
290 if (!cpu_isset(cpu, flush_cpumask))
291 goto out;
292 /*
293 * This was a BUG() but until someone can quote me the
294 * line from the intel manual that guarantees an IPI to
295 * multiple CPUs is retried _only_ on the erroring CPUs
296 * its staying as a return
297 *
298 * BUG();
299 */
301 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
302 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
303 if (flush_va == FLUSH_ALL)
304 local_flush_tlb();
305 else
306 __flush_tlb_one(flush_va);
307 } else
308 leave_mm(cpu);
309 }
310 smp_mb__before_clear_bit();
311 cpu_clear(cpu, flush_cpumask);
312 smp_mb__after_clear_bit();
313 out:
314 put_cpu_no_resched();
316 return IRQ_HANDLED;
317 }
319 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
320 unsigned long va)
321 {
322 cpumask_t tmp;
323 /*
324 * A couple of (to be removed) sanity checks:
325 *
326 * - we do not send IPIs to not-yet booted CPUs.
327 * - current CPU must not be in mask
328 * - mask must exist :)
329 */
330 BUG_ON(cpus_empty(cpumask));
332 cpus_and(tmp, cpumask, cpu_online_map);
333 BUG_ON(!cpus_equal(cpumask, tmp));
334 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
335 BUG_ON(!mm);
337 /*
338 * i'm not happy about this global shared spinlock in the
339 * MM hot path, but we'll see how contended it is.
340 * Temporarily this turns IRQs off, so that lockups are
341 * detected by the NMI watchdog.
342 */
343 spin_lock(&tlbstate_lock);
345 flush_mm = mm;
346 flush_va = va;
347 #if NR_CPUS <= BITS_PER_LONG
348 atomic_set_mask(cpumask, &flush_cpumask);
349 #else
350 {
351 int k;
352 unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
353 unsigned long *cpu_mask = (unsigned long *)&cpumask;
354 for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
355 atomic_set_mask(cpu_mask[k], &flush_mask[k]);
356 }
357 #endif
358 /*
359 * We have to send the IPI only to
360 * CPUs affected.
361 */
362 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
364 while (!cpus_empty(flush_cpumask))
365 /* nothing. lockup detection does not belong here */
366 mb();
368 flush_mm = NULL;
369 flush_va = 0;
370 spin_unlock(&tlbstate_lock);
371 }
373 void flush_tlb_current_task(void)
374 {
375 struct mm_struct *mm = current->mm;
376 cpumask_t cpu_mask;
378 preempt_disable();
379 cpu_mask = mm->cpu_vm_mask;
380 cpu_clear(smp_processor_id(), cpu_mask);
382 local_flush_tlb();
383 if (!cpus_empty(cpu_mask))
384 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
385 preempt_enable();
386 }
388 void flush_tlb_mm (struct mm_struct * mm)
389 {
390 cpumask_t cpu_mask;
392 preempt_disable();
393 cpu_mask = mm->cpu_vm_mask;
394 cpu_clear(smp_processor_id(), cpu_mask);
396 if (current->active_mm == mm) {
397 if (current->mm)
398 local_flush_tlb();
399 else
400 leave_mm(smp_processor_id());
401 }
402 if (!cpus_empty(cpu_mask))
403 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
405 preempt_enable();
406 }
408 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
409 {
410 struct mm_struct *mm = vma->vm_mm;
411 cpumask_t cpu_mask;
413 preempt_disable();
414 cpu_mask = mm->cpu_vm_mask;
415 cpu_clear(smp_processor_id(), cpu_mask);
417 if (current->active_mm == mm) {
418 if(current->mm)
419 __flush_tlb_one(va);
420 else
421 leave_mm(smp_processor_id());
422 }
424 if (!cpus_empty(cpu_mask))
425 flush_tlb_others(cpu_mask, mm, va);
427 preempt_enable();
428 }
430 static void do_flush_tlb_all(void* info)
431 {
432 unsigned long cpu = smp_processor_id();
434 __flush_tlb_all();
435 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
436 leave_mm(cpu);
437 }
439 void flush_tlb_all(void)
440 {
441 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
442 }
444 /*
445 * this function sends a 'reschedule' IPI to another CPU.
446 * it goes straight through and wastes no time serializing
447 * anything. Worst case is that we lose a reschedule ...
448 */
449 void smp_send_reschedule(int cpu)
450 {
451 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
452 }
454 /*
455 * Structure and data for smp_call_function(). This is designed to minimise
456 * static memory requirements. It also looks cleaner.
457 */
458 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
460 struct call_data_struct {
461 void (*func) (void *info);
462 void *info;
463 atomic_t started;
464 atomic_t finished;
465 int wait;
466 };
468 static struct call_data_struct * call_data;
470 /*
471 * this function sends a 'generic call function' IPI to all other CPUs
472 * in the system.
473 */
475 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
476 int wait)
477 /*
478 * [SUMMARY] Run a function on all other CPUs.
479 * <func> The function to run. This must be fast and non-blocking.
480 * <info> An arbitrary pointer to pass to the function.
481 * <nonatomic> currently unused.
482 * <wait> If true, wait (atomically) until function has completed on other CPUs.
483 * [RETURNS] 0 on success, else a negative status code. Does not return until
484 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
485 *
486 * You must not call this function with disabled interrupts or from a
487 * hardware interrupt handler or from a bottom half handler.
488 */
489 {
490 struct call_data_struct data;
491 int cpus = num_online_cpus()-1;
493 if (!cpus)
494 return 0;
496 /* Can deadlock when called with interrupts disabled */
497 WARN_ON(irqs_disabled());
499 data.func = func;
500 data.info = info;
501 atomic_set(&data.started, 0);
502 data.wait = wait;
503 if (wait)
504 atomic_set(&data.finished, 0);
506 spin_lock(&call_lock);
507 call_data = &data;
508 mb();
510 /* Send a message to all other CPUs and wait for them to respond */
511 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
513 /* Wait for response */
514 while (atomic_read(&data.started) != cpus)
515 barrier();
517 if (wait)
518 while (atomic_read(&data.finished) != cpus)
519 barrier();
520 spin_unlock(&call_lock);
522 return 0;
523 }
525 static void stop_this_cpu (void * dummy)
526 {
527 /*
528 * Remove this CPU:
529 */
530 cpu_clear(smp_processor_id(), cpu_online_map);
531 local_irq_disable();
532 #if 1
533 xxprint("stop_this_cpu disable_local_APIC\n");
534 #else
535 disable_local_APIC();
536 #endif
537 if (cpu_data[smp_processor_id()].hlt_works_ok)
538 for(;;) __asm__("hlt");
539 for (;;);
540 }
542 /*
543 * this function calls the 'stop' function on all other CPUs in the system.
544 */
546 void smp_send_stop(void)
547 {
548 smp_call_function(stop_this_cpu, NULL, 1, 0);
550 local_irq_disable();
551 #if 1
552 xxprint("smp_send_stop disable_local_APIC\n");
553 #else
554 disable_local_APIC();
555 #endif
556 local_irq_enable();
557 }
559 /*
560 * Reschedule call back. Nothing to do,
561 * all the work is done automatically when
562 * we return from the interrupt.
563 */
564 irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
565 struct pt_regs *regs)
566 {
568 return IRQ_HANDLED;
569 }
571 #include <linux/kallsyms.h>
572 irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
573 struct pt_regs *regs)
574 {
575 void (*func) (void *info) = call_data->func;
576 void *info = call_data->info;
577 int wait = call_data->wait;
579 /*
580 * Notify initiating CPU that I've grabbed the data and am
581 * about to execute the function
582 */
583 mb();
584 atomic_inc(&call_data->started);
585 /*
586 * At this point the info structure may be out of scope unless wait==1
587 */
588 irq_enter();
589 (*func)(info);
590 irq_exit();
592 if (wait) {
593 mb();
594 atomic_inc(&call_data->finished);
595 }
597 return IRQ_HANDLED;
598 }