ia64/xen-unstable

view xen/arch/x86/smpboot.c @ 4605:9e4daf2e49d2

bitkeeper revision 1.1347 (4266ee31uFDz7ZMst_Cy-vYbXJ2EZw)


__vmxon() is called twice for the boot cpu, because identify_cpu() is called
twice. This patch ensures that identify_cpu() and hence __vmxon() is called
exactly once for the boot CPU on SMP.

Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
author iap10@tetris.cl.cam.ac.uk
date Thu Apr 21 00:05:05 2005 +0000 (2005-04-21)
parents d56c3246d889
children 67c40314aa6e
line source
1 /*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 */
35 #include <xen/config.h>
36 #include <xen/init.h>
37 #include <xen/irq.h>
38 #include <xen/mm.h>
39 #include <xen/slab.h>
40 #include <asm/flushtlb.h>
41 #include <asm/mc146818rtc.h>
42 #include <asm/smpboot.h>
43 #include <xen/smp.h>
44 #include <asm/msr.h>
45 #include <asm/system.h>
46 #include <asm/mpspec.h>
47 #include <asm/io_apic.h>
48 #include <xen/sched.h>
49 #include <xen/delay.h>
50 #include <xen/lib.h>
52 #ifdef CONFIG_SMP
54 /* Cconfigured maximum number of CPUs to activate. We name the parameter
55 "maxcpus" rather than max_cpus to be compatible with Linux */
56 static int max_cpus = -1;
57 integer_param("maxcpus", max_cpus);
59 /* Total count of live CPUs */
60 int smp_num_cpus = 1;
62 /* Number of hyperthreads per core */
63 int ht_per_core = 1;
65 /* Bitmask of currently online CPUs */
66 unsigned long cpu_online_map;
68 static volatile unsigned long cpu_callin_map;
69 static volatile unsigned long cpu_callout_map;
71 /* Per CPU bogomips and other parameters */
72 struct cpuinfo_x86 cpu_data[NR_CPUS];
74 /* Set when the idlers are all forked */
75 int smp_threads_ready;
77 /*
78 * Trampoline 80x86 program as an array.
79 */
81 extern unsigned char trampoline_data [];
82 extern unsigned char trampoline_end [];
83 static unsigned char *trampoline_base;
85 /*
86 * Currently trivial. Write the real->protected mode
87 * bootstrap into the page concerned. The caller
88 * has made sure it's suitably aligned.
89 */
91 static unsigned long __init setup_trampoline(void)
92 {
93 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
94 return virt_to_phys(trampoline_base);
95 }
97 /*
98 * We are called very early to get the low memory for the
99 * SMP bootup trampoline page.
100 */
101 void __init smp_alloc_memory(void)
102 {
103 /*
104 * Has to be in very low memory so we can execute
105 * real-mode AP code.
106 */
107 trampoline_base = __va(0x90000);
108 }
110 /*
111 * The bootstrap kernel entry code has set these up. Save them for
112 * a given CPU
113 */
115 void __init smp_store_cpu_info(int id)
116 {
117 cpu_data[id] = boot_cpu_data;
118 if (id != 0)
119 identify_cpu(&cpu_data[id]);
120 }
122 /*
123 * Architecture specific routine called by the kernel just before init is
124 * fired off. This allows the BP to have everything in order [we hope].
125 * At the end of this all the APs will hit the system scheduling and off
126 * we go. Each AP will load the system gdt's and jump through the kernel
127 * init into idle(). At this point the scheduler will one day take over
128 * and give them jobs to do. smp_callin is a standard routine
129 * we use to track CPUs as they power up.
130 */
132 static atomic_t smp_commenced = ATOMIC_INIT(0);
134 void __init smp_commence(void)
135 {
136 /*
137 * Lets the callins below out of their loop.
138 */
139 Dprintk("Setting commenced=1, go go go\n");
141 wmb();
142 atomic_set(&smp_commenced,1);
143 }
145 /*
146 * TSC synchronization.
147 *
148 * We first check wether all CPUs have their TSC's synchronized,
149 * then we print a warning if not, and always resync.
150 */
152 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
153 static atomic_t tsc_count_start = ATOMIC_INIT(0);
154 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
155 static unsigned long long tsc_values[NR_CPUS];
157 #define NR_LOOPS 5
159 /*
160 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
161 * multiplication. Not terribly optimized but we need it at boot time only
162 * anyway.
163 *
164 * result == a / b
165 * == (a1 + a2*(2^32)) / b
166 * == a1/b + a2*(2^32/b)
167 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
168 * ^---- (this multiplication can overflow)
169 */
171 static unsigned long long div64 (unsigned long long a, unsigned long b0)
172 {
173 unsigned int a1, a2;
174 unsigned long long res;
176 a1 = ((unsigned int*)&a)[0];
177 a2 = ((unsigned int*)&a)[1];
179 res = a1/b0 +
180 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
181 a2 / b0 +
182 (a2 * (0xffffffff % b0)) / b0;
184 return res;
185 }
187 static void __init synchronize_tsc_bp (void)
188 {
189 int i;
190 unsigned long long t0;
191 unsigned long long sum, avg;
192 long long delta;
193 int buggy = 0;
195 printk("checking TSC synchronization across CPUs: ");
197 atomic_set(&tsc_start_flag, 1);
198 wmb();
200 /*
201 * We loop a few times to get a primed instruction cache,
202 * then the last pass is more or less synchronized and
203 * the BP and APs set their cycle counters to zero all at
204 * once. This reduces the chance of having random offsets
205 * between the processors, and guarantees that the maximum
206 * delay between the cycle counters is never bigger than
207 * the latency of information-passing (cachelines) between
208 * two CPUs.
209 */
210 for (i = 0; i < NR_LOOPS; i++) {
211 /*
212 * all APs synchronize but they loop on '== num_cpus'
213 */
214 while (atomic_read(&tsc_count_start) != smp_num_cpus-1) mb();
215 atomic_set(&tsc_count_stop, 0);
216 wmb();
217 /*
218 * this lets the APs save their current TSC:
219 */
220 atomic_inc(&tsc_count_start);
222 rdtscll(tsc_values[smp_processor_id()]);
223 /*
224 * We clear the TSC in the last loop:
225 */
226 if (i == NR_LOOPS-1)
227 write_tsc(0, 0);
229 /*
230 * Wait for all APs to leave the synchronization point:
231 */
232 while (atomic_read(&tsc_count_stop) != smp_num_cpus-1) mb();
233 atomic_set(&tsc_count_start, 0);
234 wmb();
235 atomic_inc(&tsc_count_stop);
236 }
238 sum = 0;
239 for (i = 0; i < smp_num_cpus; i++) {
240 t0 = tsc_values[i];
241 sum += t0;
242 }
243 avg = div64(sum, smp_num_cpus);
245 sum = 0;
246 for (i = 0; i < smp_num_cpus; i++) {
247 delta = tsc_values[i] - avg;
248 if (delta < 0)
249 delta = -delta;
250 /*
251 * We report bigger than 2 microseconds clock differences.
252 */
253 if (delta > 2*ticks_per_usec) {
254 long realdelta;
255 if (!buggy) {
256 buggy = 1;
257 printk("\n");
258 }
259 realdelta = div64(delta, ticks_per_usec);
260 if (tsc_values[i] < avg)
261 realdelta = -realdelta;
263 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
264 i, realdelta);
265 }
267 sum += delta;
268 }
269 if (!buggy)
270 printk("passed.\n");
271 }
273 static void __init synchronize_tsc_ap (void)
274 {
275 int i;
277 /*
278 * smp_num_cpus is not necessarily known at the time
279 * this gets called, so we first wait for the BP to
280 * finish SMP initialization:
281 */
282 while (!atomic_read(&tsc_start_flag)) mb();
284 for (i = 0; i < NR_LOOPS; i++) {
285 atomic_inc(&tsc_count_start);
286 while (atomic_read(&tsc_count_start) != smp_num_cpus) mb();
288 rdtscll(tsc_values[smp_processor_id()]);
289 if (i == NR_LOOPS-1)
290 write_tsc(0, 0);
292 atomic_inc(&tsc_count_stop);
293 while (atomic_read(&tsc_count_stop) != smp_num_cpus) mb();
294 }
295 }
296 #undef NR_LOOPS
298 static atomic_t init_deasserted;
300 void __init smp_callin(void)
301 {
302 int cpuid, phys_id, i;
304 /*
305 * If waken up by an INIT in an 82489DX configuration
306 * we may get here before an INIT-deassert IPI reaches
307 * our local APIC. We have to wait for the IPI or we'll
308 * lock up on an APIC access.
309 */
310 while (!atomic_read(&init_deasserted));
312 /*
313 * (This works even if the APIC is not enabled.)
314 */
315 phys_id = GET_APIC_ID(apic_read(APIC_ID));
316 cpuid = smp_processor_id();
317 if (test_and_set_bit(cpuid, &cpu_online_map)) {
318 printk("huh, phys CPU#%d, CPU#%d already present??\n",
319 phys_id, cpuid);
320 BUG();
321 }
322 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
324 /*
325 * STARTUP IPIs are fragile beasts as they might sometimes
326 * trigger some glue motherboard logic. Complete APIC bus
327 * silence for 1 second, this overestimates the time the
328 * boot CPU is spending to send the up to 2 STARTUP IPIs
329 * by a factor of two. This should be enough.
330 */
332 for ( i = 0; i < 200; i++ )
333 {
334 if ( test_bit(cpuid, &cpu_callout_map) ) break;
335 mdelay(10);
336 }
338 if (!test_bit(cpuid, &cpu_callout_map)) {
339 printk("BUG: CPU%d started up but did not get a callout!\n",
340 cpuid);
341 BUG();
342 }
344 /*
345 * the boot CPU has finished the init stage and is spinning
346 * on callin_map until we finish. We are free to set up this
347 * CPU, first the APIC. (this is probably redundant on most
348 * boards)
349 */
351 Dprintk("CALLIN, before setup_local_APIC().\n");
353 setup_local_APIC();
355 __sti();
357 Dprintk("Stack at about %p\n",&cpuid);
359 /*
360 * Save our processor parameters
361 */
362 smp_store_cpu_info(cpuid);
364 if (nmi_watchdog == NMI_LOCAL_APIC)
365 setup_apic_nmi_watchdog();
367 /*
368 * Allow the master to continue.
369 */
370 set_bit(cpuid, &cpu_callin_map);
372 /*
373 * Synchronize the TSC with the BP
374 */
375 synchronize_tsc_ap();
376 }
378 static int cpucount;
380 #ifdef __i386__
381 static void construct_percpu_idt(unsigned int cpu)
382 {
383 unsigned char idt_load[10];
385 idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
386 memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*sizeof(idt_entry_t));
388 *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*sizeof(idt_entry_t))-1;
389 *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
390 __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
391 }
392 #endif
394 /*
395 * Activate a secondary processor.
396 */
397 void __init start_secondary(void)
398 {
399 unsigned int cpu = cpucount;
401 extern void percpu_traps_init(void);
402 extern void cpu_init(void);
404 set_current(idle_task[cpu]);
406 percpu_traps_init();
408 cpu_init();
410 smp_callin();
412 while (!atomic_read(&smp_commenced))
413 cpu_relax();
415 #ifdef __i386__
416 /*
417 * At this point, boot CPU has fully initialised the IDT. It is
418 * now safe to make ourselves a private copy.
419 */
420 construct_percpu_idt(cpu);
421 #endif
423 local_flush_tlb();
425 startup_cpu_idle_loop();
427 BUG();
428 }
430 extern struct {
431 unsigned long esp, ss;
432 } stack_start;
434 /* which physical APIC ID maps to which logical CPU number */
435 volatile int physical_apicid_2_cpu[MAX_APICID];
436 /* which logical CPU number maps to which physical APIC ID */
437 volatile int cpu_2_physical_apicid[NR_CPUS];
439 /* which logical APIC ID maps to which logical CPU number */
440 volatile int logical_apicid_2_cpu[MAX_APICID];
441 /* which logical CPU number maps to which logical APIC ID */
442 volatile int cpu_2_logical_apicid[NR_CPUS];
444 static inline void init_cpu_to_apicid(void)
445 /* Initialize all maps between cpu number and apicids */
446 {
447 int apicid, cpu;
449 for (apicid = 0; apicid < MAX_APICID; apicid++) {
450 physical_apicid_2_cpu[apicid] = -1;
451 logical_apicid_2_cpu[apicid] = -1;
452 }
453 for (cpu = 0; cpu < NR_CPUS; cpu++) {
454 cpu_2_physical_apicid[cpu] = -1;
455 cpu_2_logical_apicid[cpu] = -1;
456 }
457 }
459 static inline void map_cpu_to_boot_apicid(int cpu, int apicid)
460 /*
461 * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
462 * else physical apic ids
463 */
464 {
465 physical_apicid_2_cpu[apicid] = cpu;
466 cpu_2_physical_apicid[cpu] = apicid;
467 }
469 static inline void unmap_cpu_to_boot_apicid(int cpu, int apicid)
470 /*
471 * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
472 * else physical apic ids
473 */
474 {
475 physical_apicid_2_cpu[apicid] = -1;
476 cpu_2_physical_apicid[cpu] = -1;
477 }
479 #if APIC_DEBUG
480 static inline void inquire_remote_apic(int apicid)
481 {
482 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
483 char *names[] = { "ID", "VERSION", "SPIV" };
484 int timeout, status;
486 printk("Inquiring remote APIC #%d...\n", apicid);
488 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
489 printk("... APIC #%d %s: ", apicid, names[i]);
491 /*
492 * Wait for idle.
493 */
494 apic_wait_icr_idle();
496 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
497 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
499 timeout = 0;
500 do {
501 udelay(100);
502 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
503 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
505 switch (status) {
506 case APIC_ICR_RR_VALID:
507 status = apic_read(APIC_RRR);
508 printk("%08x\n", status);
509 break;
510 default:
511 printk("failed\n");
512 }
513 }
514 }
515 #endif
518 static int wakeup_secondary_via_INIT(int phys_apicid, unsigned long start_eip)
519 {
520 unsigned long send_status = 0, accept_status = 0;
521 int maxlvt, timeout, num_starts, j;
523 Dprintk("Asserting INIT.\n");
525 /*
526 * Turn INIT on target chip
527 */
528 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
530 /*
531 * Send IPI
532 */
533 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
534 | APIC_DM_INIT);
536 Dprintk("Waiting for send to finish...\n");
537 timeout = 0;
538 do {
539 Dprintk("+");
540 udelay(100);
541 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
542 } while (send_status && (timeout++ < 1000));
544 mdelay(10);
546 Dprintk("Deasserting INIT.\n");
548 /* Target chip */
549 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
551 /* Send IPI */
552 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
554 Dprintk("Waiting for send to finish...\n");
555 timeout = 0;
556 do {
557 Dprintk("+");
558 udelay(100);
559 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
560 } while (send_status && (timeout++ < 1000));
562 atomic_set(&init_deasserted, 1);
564 /*
565 * Should we send STARTUP IPIs ?
566 *
567 * Determine this based on the APIC version.
568 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
569 */
570 if (APIC_INTEGRATED(apic_version[phys_apicid]))
571 num_starts = 2;
572 else
573 num_starts = 0;
575 /*
576 * Run STARTUP IPI loop.
577 */
578 Dprintk("#startup loops: %d.\n", num_starts);
580 maxlvt = get_maxlvt();
582 for (j = 1; j <= num_starts; j++) {
583 Dprintk("Sending STARTUP #%d.\n",j);
585 apic_read_around(APIC_SPIV);
586 apic_write(APIC_ESR, 0);
587 apic_read(APIC_ESR);
588 Dprintk("After apic_write.\n");
590 /*
591 * STARTUP IPI
592 */
594 /* Target chip */
595 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
597 /* Boot on the stack */
598 /* Kick the second */
599 apic_write_around(APIC_ICR, APIC_DM_STARTUP
600 | (start_eip >> 12));
602 /*
603 * Give the other CPU some time to accept the IPI.
604 */
605 udelay(300);
607 Dprintk("Startup point 1.\n");
609 Dprintk("Waiting for send to finish...\n");
610 timeout = 0;
611 do {
612 Dprintk("+");
613 udelay(100);
614 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
615 } while (send_status && (timeout++ < 1000));
617 /*
618 * Give the other CPU some time to accept the IPI.
619 */
620 udelay(200);
621 /*
622 * Due to the Pentium erratum 3AP.
623 */
624 if (maxlvt > 3) {
625 apic_read_around(APIC_SPIV);
626 apic_write(APIC_ESR, 0);
627 }
628 accept_status = (apic_read(APIC_ESR) & 0xEF);
629 if (send_status || accept_status)
630 break;
631 }
632 Dprintk("After Startup.\n");
634 if (send_status)
635 printk("APIC never delivered???\n");
636 if (accept_status)
637 printk("APIC delivery error (%lx).\n", accept_status);
639 return (send_status | accept_status);
640 }
642 extern unsigned long cpu_initialized;
644 static void __init do_boot_cpu (int apicid)
645 /*
646 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
647 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
648 */
649 {
650 struct domain *idle;
651 struct exec_domain *ed;
652 unsigned long boot_error = 0;
653 int timeout, cpu;
654 unsigned long start_eip;
655 void *stack;
657 cpu = ++cpucount;
659 if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
660 panic("failed 'createdomain' for CPU %d", cpu);
662 ed = idle->exec_domain[0];
664 set_bit(DF_IDLETASK, &idle->d_flags);
666 ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
668 map_cpu_to_boot_apicid(cpu, apicid);
670 idle_task[cpu] = ed;
672 /* start_eip had better be page-aligned! */
673 start_eip = setup_trampoline();
675 /* So we see what's up. */
676 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
678 stack = (void *)alloc_xenheap_pages(STACK_ORDER);
679 #if defined(__i386__)
680 stack_start.esp = __pa(stack) + STACK_SIZE - STACK_RESERVED;
681 #elif defined(__x86_64__)
682 stack_start.esp = (unsigned long)stack + STACK_SIZE - STACK_RESERVED;
683 #endif
685 /* Debug build: detect stack overflow by setting up a guard page. */
686 memguard_guard_stack(stack);
688 /*
689 * This grunge runs the startup process for
690 * the targeted processor.
691 */
693 atomic_set(&init_deasserted, 0);
695 Dprintk("Setting warm reset code and vector.\n");
697 CMOS_WRITE(0xa, 0xf);
698 local_flush_tlb();
699 Dprintk("1.\n");
700 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
701 Dprintk("2.\n");
702 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
703 Dprintk("3.\n");
705 /*
706 * Be paranoid about clearing APIC errors.
707 */
708 if ( APIC_INTEGRATED(apic_version[apicid]) )
709 {
710 apic_read_around(APIC_SPIV);
711 apic_write(APIC_ESR, 0);
712 apic_read(APIC_ESR);
713 }
715 /*
716 * Status is now clean
717 */
718 boot_error = 0;
720 /*
721 * Starting actual IPI sequence...
722 */
724 boot_error = wakeup_secondary_via_INIT(apicid, start_eip);
726 if (!boot_error) {
727 /*
728 * allow APs to start initializing.
729 */
730 Dprintk("Before Callout %d.\n", cpu);
731 set_bit(cpu, &cpu_callout_map);
732 Dprintk("After Callout %d.\n", cpu);
734 /*
735 * Wait 5s total for a response
736 */
737 for (timeout = 0; timeout < 50000; timeout++) {
738 if (test_bit(cpu, &cpu_callin_map))
739 break; /* It has booted */
740 udelay(100);
741 }
743 if (test_bit(cpu, &cpu_callin_map)) {
744 /* number CPUs logically, starting from 1 (BSP is 0) */
745 printk("CPU%d has booted.\n", cpu);
746 } else {
747 boot_error= 1;
748 if (*((volatile unsigned int *)phys_to_virt(start_eip))
749 == 0xA5A5A5A5)
750 /* trampoline started but...? */
751 printk("Stuck ??\n");
752 else
753 /* trampoline code not run */
754 printk("Not responding.\n");
755 #if APIC_DEBUG
756 inquire_remote_apic(apicid);
757 #endif
758 }
759 }
760 if (boot_error) {
761 /* Try to put things back the way they were before ... */
762 unmap_cpu_to_boot_apicid(cpu, apicid);
763 clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
764 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
765 clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
766 cpucount--;
767 }
768 }
771 /*
772 * Cycle through the processors sending APIC IPIs to boot each.
773 */
775 static int boot_cpu_logical_apicid;
776 /* Where the IO area was mapped on multiquad, always 0 otherwise */
777 void *xquad_portio = NULL;
779 void __init smp_boot_cpus(void)
780 {
781 int apicid, bit;
783 /* Initialize the logical to physical CPU number mapping */
784 init_cpu_to_apicid();
786 /*
787 * Setup boot CPU information
788 */
789 smp_store_cpu_info(0); /* Final full version of the data */
790 printk("CPU%d booted\n", 0);
792 /*
793 * We have the boot CPU online for sure.
794 */
795 set_bit(0, &cpu_online_map);
796 boot_cpu_logical_apicid = logical_smp_processor_id();
797 map_cpu_to_boot_apicid(0, boot_cpu_apicid);
799 /*
800 * If we couldnt find an SMP configuration at boot time,
801 * get out of here now!
802 */
803 if (!smp_found_config) {
804 printk("SMP motherboard not detected.\n");
805 io_apic_irqs = 0;
806 cpu_online_map = phys_cpu_present_map = 1;
807 smp_num_cpus = 1;
808 if (APIC_init_uniprocessor())
809 printk("Local APIC not detected."
810 " Using dummy APIC emulation.\n");
811 goto smp_done;
812 }
814 /*
815 * Should not be necessary because the MP table should list the boot
816 * CPU too, but we do it for the sake of robustness anyway.
817 */
818 if (!test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map)) {
819 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
820 boot_cpu_physical_apicid);
821 phys_cpu_present_map |= (1 << hard_smp_processor_id());
822 }
824 /*
825 * If we couldn't find a local APIC, then get out of here now!
826 */
827 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
828 !test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)) {
829 printk("BIOS bug, local APIC #%d not detected!...\n",
830 boot_cpu_physical_apicid);
831 printk("... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
832 io_apic_irqs = 0;
833 cpu_online_map = phys_cpu_present_map = 1;
834 smp_num_cpus = 1;
835 goto smp_done;
836 }
838 verify_local_APIC();
840 /*
841 * If SMP should be disabled, then really disable it!
842 */
843 if (!max_cpus) {
844 smp_found_config = 0;
845 printk("SMP mode deactivated, forcing use of dummy APIC emulation.\n");
846 io_apic_irqs = 0;
847 cpu_online_map = phys_cpu_present_map = 1;
848 smp_num_cpus = 1;
849 goto smp_done;
850 }
852 connect_bsp_APIC();
853 setup_local_APIC();
855 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
856 BUG();
858 /*
859 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
860 *
861 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
862 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
863 * clustered apic ID.
864 */
865 Dprintk("CPU present map: %lx\n", phys_cpu_present_map);
867 for (bit = 0; bit < NR_CPUS; bit++) {
868 apicid = cpu_present_to_apicid(bit);
869 /*
870 * Don't even attempt to start the boot CPU!
871 */
872 if (apicid == boot_cpu_apicid)
873 continue;
875 /*
876 * Don't start hyperthreads if option noht requested.
877 */
878 if (opt_noht && (apicid & (ht_per_core - 1)))
879 continue;
881 if (!(phys_cpu_present_map & (1 << bit)))
882 continue;
883 if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
884 continue;
886 do_boot_cpu(apicid);
888 /*
889 * Make sure we unmap all failed CPUs
890 */
891 if ((boot_apicid_to_cpu(apicid) == -1) &&
892 (phys_cpu_present_map & (1 << bit)))
893 printk("CPU #%d not responding - cannot use it.\n",
894 apicid);
895 }
897 /*
898 * Cleanup possible dangling ends...
899 */
900 /*
901 * Install writable page 0 entry to set BIOS data area.
902 */
903 local_flush_tlb();
905 /*
906 * Paranoid: Set warm reset code and vector here back
907 * to default values.
908 */
909 CMOS_WRITE(0, 0xf);
911 *((volatile long *) phys_to_virt(0x467)) = 0;
913 if (!cpucount) {
914 printk("Error: only one processor found.\n");
915 } else {
916 printk("Total of %d processors activated.\n", cpucount+1);
917 }
918 smp_num_cpus = cpucount + 1;
920 Dprintk("Boot done.\n");
922 /*
923 * Here we can be sure that there is an IO-APIC in the system. Let's
924 * go and set it up:
925 */
926 if ( nr_ioapics ) setup_IO_APIC();
928 /* Set up all local APIC timers in the system. */
929 setup_APIC_clocks();
931 /* Synchronize the TSC with the AP(s). */
932 if ( cpucount ) synchronize_tsc_bp();
934 smp_done:
935 ;
936 }
938 #endif /* CONFIG_SMP */
940 /*
941 * Local variables:
942 * mode: C
943 * c-set-style: "BSD"
944 * c-basic-offset: 4
945 * tab-width: 4
946 * indent-tabs-mode: nil
947 * End:
948 */