ia64/xen-unstable

view xen/arch/x86/smpboot.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents 8e18dd41c6c7
children 4d5203f95498
line source
1 /*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <xen/config.h>
37 #include <xen/init.h>
38 #include <xen/kernel.h>
39 #include <xen/mm.h>
40 #include <xen/domain.h>
41 #include <xen/sched.h>
42 #include <xen/irq.h>
43 #include <xen/delay.h>
44 #include <xen/softirq.h>
45 #include <xen/serial.h>
46 #include <xen/numa.h>
47 #include <asm/current.h>
48 #include <asm/mc146818rtc.h>
49 #include <asm/desc.h>
50 #include <asm/div64.h>
51 #include <asm/flushtlb.h>
52 #include <asm/msr.h>
53 #include <asm/mtrr.h>
54 #include <mach_apic.h>
55 #include <mach_wakecpu.h>
56 #include <smpboot_hooks.h>
57 #include <xen/stop_machine.h>
58 #include <acpi/cpufreq/processor_perf.h>
60 #define set_kernel_exec(x, y) (0)
61 #define setup_trampoline() (bootsym_phys(trampoline_realmode_entry))
63 /* Set if we find a B stepping CPU */
64 static int __devinitdata smp_b_stepping;
66 /* Number of siblings per CPU package */
67 int smp_num_siblings = 1;
68 #ifdef CONFIG_X86_HT
69 EXPORT_SYMBOL(smp_num_siblings);
70 #endif
72 /* Package ID of each logical CPU */
73 int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
75 /* Core ID of each logical CPU */
76 int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
78 /* representing HT siblings of each logical CPU */
79 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
80 EXPORT_SYMBOL(cpu_sibling_map);
82 /* representing HT and core siblings of each logical CPU */
83 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
84 EXPORT_SYMBOL(cpu_core_map);
86 /* bitmap of online cpus */
87 cpumask_t cpu_online_map __read_mostly;
88 EXPORT_SYMBOL(cpu_online_map);
90 cpumask_t cpu_callin_map;
91 cpumask_t cpu_callout_map;
92 EXPORT_SYMBOL(cpu_callout_map);
93 cpumask_t cpu_possible_map;
94 EXPORT_SYMBOL(cpu_possible_map);
95 static cpumask_t smp_commenced_mask;
97 /* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
98 * is no way to resync one AP against BP. TBD: for prescott and above, we
99 * should use IA64's algorithm
100 */
101 static int __devinitdata tsc_sync_disabled;
103 /* Per CPU bogomips and other parameters */
104 struct cpuinfo_x86 cpu_data[NR_CPUS];
105 EXPORT_SYMBOL(cpu_data);
107 u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
108 { [0 ... NR_CPUS-1] = -1U };
109 EXPORT_SYMBOL(x86_cpu_to_apicid);
111 static void map_cpu_to_logical_apicid(void);
112 /* State of each CPU. */
113 DEFINE_PER_CPU(int, cpu_state) = { 0 };
115 static void *stack_base[NR_CPUS];
116 static DEFINE_SPINLOCK(cpu_add_remove_lock);
118 /*
119 * The bootstrap kernel entry code has set these up. Save them for
120 * a given CPU
121 */
123 static void __devinit smp_store_cpu_info(int id)
124 {
125 struct cpuinfo_x86 *c = cpu_data + id;
127 *c = boot_cpu_data;
128 if (id!=0)
129 identify_cpu(c);
130 /*
131 * Mask B, Pentium, but not Pentium MMX
132 */
133 if (c->x86_vendor == X86_VENDOR_INTEL &&
134 c->x86 == 5 &&
135 c->x86_mask >= 1 && c->x86_mask <= 4 &&
136 c->x86_model <= 3)
137 /*
138 * Remember we have B step Pentia with bugs
139 */
140 smp_b_stepping = 1;
142 /*
143 * Certain Athlons might work (for various values of 'work') in SMP
144 * but they are not certified as MP capable.
145 */
146 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
148 /* Athlon 660/661 is valid. */
149 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
150 goto valid_k7;
152 /* Duron 670 is valid */
153 if ((c->x86_model==7) && (c->x86_mask==0))
154 goto valid_k7;
156 /*
157 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
158 * It's worth noting that the A5 stepping (662) of some Athlon XP's
159 * have the MP bit set.
160 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
161 */
162 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
163 ((c->x86_model==7) && (c->x86_mask>=1)) ||
164 (c->x86_model> 7))
165 if (cpu_has_mp)
166 goto valid_k7;
168 /* If we get here, it's not a certified SMP capable AMD system. */
169 add_taint(TAINT_UNSAFE_SMP);
170 }
172 valid_k7:
173 ;
174 }
176 /*
177 * TSC synchronization.
178 *
179 * We first check whether all CPUs have their TSC's synchronized,
180 * then we print a warning if not, and always resync.
181 */
183 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
184 static atomic_t tsc_count_start = ATOMIC_INIT(0);
185 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
186 static unsigned long long tsc_values[NR_CPUS];
188 #define NR_LOOPS 5
190 static void __init synchronize_tsc_bp (void)
191 {
192 int i;
193 unsigned long long t0;
194 unsigned long long sum, avg;
195 long long delta;
196 unsigned int one_usec;
197 int buggy = 0;
199 printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
201 /* convert from kcyc/sec to cyc/usec */
202 one_usec = cpu_khz / 1000;
204 atomic_set(&tsc_start_flag, 1);
205 wmb();
207 /*
208 * We loop a few times to get a primed instruction cache,
209 * then the last pass is more or less synchronized and
210 * the BP and APs set their cycle counters to zero all at
211 * once. This reduces the chance of having random offsets
212 * between the processors, and guarantees that the maximum
213 * delay between the cycle counters is never bigger than
214 * the latency of information-passing (cachelines) between
215 * two CPUs.
216 */
217 for (i = 0; i < NR_LOOPS; i++) {
218 /*
219 * all APs synchronize but they loop on '== num_cpus'
220 */
221 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
222 mb();
223 atomic_set(&tsc_count_stop, 0);
224 wmb();
225 /*
226 * this lets the APs save their current TSC:
227 */
228 atomic_inc(&tsc_count_start);
230 rdtscll(tsc_values[smp_processor_id()]);
231 /*
232 * We clear the TSC in the last loop:
233 */
234 if (i == NR_LOOPS-1)
235 write_tsc(0, 0);
237 /*
238 * Wait for all APs to leave the synchronization point:
239 */
240 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
241 mb();
242 atomic_set(&tsc_count_start, 0);
243 wmb();
244 atomic_inc(&tsc_count_stop);
245 }
247 sum = 0;
248 for (i = 0; i < NR_CPUS; i++) {
249 if (cpu_isset(i, cpu_callout_map)) {
250 t0 = tsc_values[i];
251 sum += t0;
252 }
253 }
254 avg = sum;
255 do_div(avg, num_booting_cpus());
257 sum = 0;
258 for (i = 0; i < NR_CPUS; i++) {
259 if (!cpu_isset(i, cpu_callout_map))
260 continue;
261 delta = tsc_values[i] - avg;
262 if (delta < 0)
263 delta = -delta;
264 /*
265 * We report bigger than 2 microseconds clock differences.
266 */
267 if (delta > 2*one_usec) {
268 long realdelta;
269 if (!buggy) {
270 buggy = 1;
271 printk("\n");
272 }
273 realdelta = delta;
274 do_div(realdelta, one_usec);
275 if (tsc_values[i] < avg)
276 realdelta = -realdelta;
278 printk("CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
279 }
281 sum += delta;
282 }
283 if (!buggy)
284 printk("passed.\n");
285 }
287 static void __init synchronize_tsc_ap (void)
288 {
289 int i;
291 /*
292 * Not every cpu is online at the time
293 * this gets called, so we first wait for the BP to
294 * finish SMP initialization:
295 */
296 while (!atomic_read(&tsc_start_flag)) mb();
298 for (i = 0; i < NR_LOOPS; i++) {
299 atomic_inc(&tsc_count_start);
300 while (atomic_read(&tsc_count_start) != num_booting_cpus())
301 mb();
303 rdtscll(tsc_values[smp_processor_id()]);
304 if (i == NR_LOOPS-1)
305 write_tsc(0, 0);
307 atomic_inc(&tsc_count_stop);
308 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
309 }
310 }
311 #undef NR_LOOPS
313 extern void calibrate_delay(void);
315 static atomic_t init_deasserted;
317 void __devinit smp_callin(void)
318 {
319 int cpuid, phys_id, i;
321 /*
322 * If waken up by an INIT in an 82489DX configuration
323 * we may get here before an INIT-deassert IPI reaches
324 * our local APIC. We have to wait for the IPI or we'll
325 * lock up on an APIC access.
326 */
327 wait_for_init_deassert(&init_deasserted);
329 if ( x2apic_enabled )
330 enable_x2apic();
332 /*
333 * (This works even if the APIC is not enabled.)
334 */
335 phys_id = get_apic_id();
336 cpuid = smp_processor_id();
337 if (cpu_isset(cpuid, cpu_callin_map)) {
338 printk("huh, phys CPU#%d, CPU#%d already present??\n",
339 phys_id, cpuid);
340 BUG();
341 }
342 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
344 /*
345 * STARTUP IPIs are fragile beasts as they might sometimes
346 * trigger some glue motherboard logic. Complete APIC bus
347 * silence for 1 second, this overestimates the time the
348 * boot CPU is spending to send the up to 2 STARTUP IPIs
349 * by a factor of two. This should be enough.
350 */
352 /*
353 * Waiting 2s total for startup
354 */
355 for (i = 0; i < 200; i++) {
356 /*
357 * Has the boot CPU finished it's STARTUP sequence?
358 */
359 if (cpu_isset(cpuid, cpu_callout_map))
360 break;
361 rep_nop();
362 mdelay(10);
363 }
365 if (!cpu_isset(cpuid, cpu_callout_map)) {
366 printk("BUG: CPU%d started up but did not get a callout!\n",
367 cpuid);
368 BUG();
369 }
371 /*
372 * the boot CPU has finished the init stage and is spinning
373 * on callin_map until we finish. We are free to set up this
374 * CPU, first the APIC. (this is probably redundant on most
375 * boards)
376 */
378 Dprintk("CALLIN, before setup_local_APIC().\n");
379 smp_callin_clear_local_apic();
380 setup_local_APIC();
381 map_cpu_to_logical_apicid();
383 #if 0
384 /*
385 * Get our bogomips.
386 */
387 calibrate_delay();
388 Dprintk("Stack at about %p\n",&cpuid);
389 #endif
391 /*
392 * Save our processor parameters
393 */
394 smp_store_cpu_info(cpuid);
396 disable_APIC_timer();
398 /*
399 * Allow the master to continue.
400 */
401 cpu_set(cpuid, cpu_callin_map);
403 /*
404 * Synchronize the TSC with the BP
405 */
406 if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled) {
407 synchronize_tsc_ap();
408 /* No sync for same reason as above */
409 calibrate_tsc_ap();
410 }
411 }
413 static int cpucount, booting_cpu;
415 /* representing cpus for which sibling maps can be computed */
416 static cpumask_t cpu_sibling_setup_map;
418 static inline void
419 set_cpu_sibling_map(int cpu)
420 {
421 int i;
422 struct cpuinfo_x86 *c = cpu_data;
424 cpu_set(cpu, cpu_sibling_setup_map);
426 if (smp_num_siblings > 1) {
427 for_each_cpu_mask(i, cpu_sibling_setup_map) {
428 if (phys_proc_id[cpu] == phys_proc_id[i] &&
429 cpu_core_id[cpu] == cpu_core_id[i]) {
430 cpu_set(i, cpu_sibling_map[cpu]);
431 cpu_set(cpu, cpu_sibling_map[i]);
432 cpu_set(i, cpu_core_map[cpu]);
433 cpu_set(cpu, cpu_core_map[i]);
434 }
435 }
436 } else {
437 cpu_set(cpu, cpu_sibling_map[cpu]);
438 }
440 if (current_cpu_data.x86_max_cores == 1) {
441 cpu_core_map[cpu] = cpu_sibling_map[cpu];
442 c[cpu].booted_cores = 1;
443 return;
444 }
446 for_each_cpu_mask(i, cpu_sibling_setup_map) {
447 if (phys_proc_id[cpu] == phys_proc_id[i]) {
448 cpu_set(i, cpu_core_map[cpu]);
449 cpu_set(cpu, cpu_core_map[i]);
450 /*
451 * Does this new cpu bringup a new core?
452 */
453 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
454 /*
455 * for each core in package, increment
456 * the booted_cores for this new cpu
457 */
458 if (first_cpu(cpu_sibling_map[i]) == i)
459 c[cpu].booted_cores++;
460 /*
461 * increment the core count for all
462 * the other cpus in this package
463 */
464 if (i != cpu)
465 c[i].booted_cores++;
466 } else if (i != cpu && !c[cpu].booted_cores)
467 c[cpu].booted_cores = c[i].booted_cores;
468 }
469 }
470 }
472 static void construct_percpu_idt(unsigned int cpu)
473 {
474 unsigned char idt_load[10];
476 *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*sizeof(idt_entry_t))-1;
477 *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
478 __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
479 }
481 /*
482 * Activate a secondary processor.
483 */
484 void __devinit start_secondary(void *unused)
485 {
486 /*
487 * Dont put anything before smp_callin(), SMP
488 * booting is too fragile that we want to limit the
489 * things done here to the most necessary things.
490 */
491 unsigned int cpu = booting_cpu;
493 set_processor_id(cpu);
494 set_current(idle_vcpu[cpu]);
495 this_cpu(curr_vcpu) = idle_vcpu[cpu];
496 if ( cpu_has_efer )
497 rdmsrl(MSR_EFER, this_cpu(efer));
498 asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
500 percpu_traps_init();
502 cpu_init();
503 /*preempt_disable();*/
504 smp_callin();
505 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
506 rep_nop();
508 /*
509 * At this point, boot CPU has fully initialised the IDT. It is
510 * now safe to make ourselves a private copy.
511 */
512 construct_percpu_idt(cpu);
514 setup_secondary_APIC_clock();
515 enable_APIC_timer();
516 /*
517 * low-memory mappings have been cleared, flush them from
518 * the local TLBs too.
519 */
520 flush_tlb_local();
522 /* This must be done before setting cpu_online_map */
523 set_cpu_sibling_map(raw_smp_processor_id());
524 wmb();
526 cpu_set(smp_processor_id(), cpu_online_map);
527 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
529 init_percpu_time();
531 /* We can take interrupts now: we're officially "up". */
532 local_irq_enable();
534 wmb();
535 startup_cpu_idle_loop();
536 }
538 extern struct {
539 void * esp;
540 unsigned short ss;
541 } stack_start;
543 u32 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
545 static void map_cpu_to_logical_apicid(void)
546 {
547 int cpu = smp_processor_id();
548 int apicid = logical_smp_processor_id();
550 cpu_2_logical_apicid[cpu] = apicid;
551 }
553 static void unmap_cpu_to_logical_apicid(int cpu)
554 {
555 cpu_2_logical_apicid[cpu] = BAD_APICID;
556 }
558 #if APIC_DEBUG
559 static inline void __inquire_remote_apic(int apicid)
560 {
561 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
562 char *names[] = { "ID", "VERSION", "SPIV" };
563 int timeout, status;
565 printk("Inquiring remote APIC #%d...\n", apicid);
567 for (i = 0; i < ARRAY_SIZE(regs); i++) {
568 printk("... APIC #%d %s: ", apicid, names[i]);
570 /*
571 * Wait for idle.
572 */
573 apic_wait_icr_idle();
575 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
577 timeout = 0;
578 do {
579 udelay(100);
580 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
581 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
583 switch (status) {
584 case APIC_ICR_RR_VALID:
585 status = apic_read(APIC_RRR);
586 printk("%08x\n", status);
587 break;
588 default:
589 printk("failed\n");
590 }
591 }
592 }
593 #endif
595 #ifdef WAKE_SECONDARY_VIA_NMI
597 static int logical_apicid_to_cpu(int logical_apicid)
598 {
599 int i;
601 for ( i = 0; i < sizeof(cpu_2_logical_apicid); i++ )
602 if ( cpu_2_logical_apicid[i] == logical_apicid )
603 break;
605 if ( i == sizeof(cpu_2_logical_apicid) );
606 i = -1; /* not found */
608 return i;
609 }
611 /*
612 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
613 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
614 * won't ... remember to clear down the APIC, etc later.
615 */
616 static int __devinit
617 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
618 {
619 unsigned long send_status = 0, accept_status = 0;
620 int timeout, maxlvt;
621 int dest_cpu;
622 u32 dest;
624 dest_cpu = logical_apicid_to_cpu(logical_apicid);
625 BUG_ON(dest_cpu == -1);
627 dest = cpu_physical_id(dest_cpu);
629 /* Boot on the stack */
630 apic_icr_write(APIC_DM_NMI | APIC_DEST_PHYSICAL, dest_cpu);
632 Dprintk("Waiting for send to finish...\n");
633 timeout = 0;
634 do {
635 Dprintk("+");
636 udelay(100);
637 if ( !x2apic_enabled )
638 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
639 else
640 send_status = 0; /* We go out of the loop directly. */
641 } while (send_status && (timeout++ < 1000));
643 /*
644 * Give the other CPU some time to accept the IPI.
645 */
646 udelay(200);
647 /*
648 * Due to the Pentium erratum 3AP.
649 */
650 maxlvt = get_maxlvt();
651 if (maxlvt > 3) {
652 apic_read_around(APIC_SPIV);
653 apic_write(APIC_ESR, 0);
654 }
655 accept_status = (apic_read(APIC_ESR) & 0xEF);
656 Dprintk("NMI sent.\n");
658 if (send_status)
659 printk("APIC never delivered???\n");
660 if (accept_status)
661 printk("APIC delivery error (%lx).\n", accept_status);
663 return (send_status | accept_status);
664 }
665 #endif /* WAKE_SECONDARY_VIA_NMI */
667 #ifdef WAKE_SECONDARY_VIA_INIT
668 static int __devinit
669 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
670 {
671 unsigned long send_status = 0, accept_status = 0;
672 int maxlvt, timeout, num_starts, j;
674 /*
675 * Be paranoid about clearing APIC errors.
676 */
677 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
678 apic_read_around(APIC_SPIV);
679 apic_write(APIC_ESR, 0);
680 apic_read(APIC_ESR);
681 }
683 Dprintk("Asserting INIT.\n");
685 /*
686 * Turn INIT on target chip via IPI
687 */
688 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
689 phys_apicid);
691 Dprintk("Waiting for send to finish...\n");
692 timeout = 0;
693 do {
694 Dprintk("+");
695 udelay(100);
696 if ( !x2apic_enabled )
697 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
698 else
699 send_status = 0; /* We go out of the loop dirctly. */
700 } while (send_status && (timeout++ < 1000));
702 mdelay(10);
704 Dprintk("Deasserting INIT.\n");
706 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
708 Dprintk("Waiting for send to finish...\n");
709 timeout = 0;
710 do {
711 Dprintk("+");
712 udelay(100);
713 if ( !x2apic_enabled )
714 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
715 else
716 send_status = 0; /* We go out of the loop dirctly. */
717 } while (send_status && (timeout++ < 1000));
719 atomic_set(&init_deasserted, 1);
721 /*
722 * Should we send STARTUP IPIs ?
723 *
724 * Determine this based on the APIC version.
725 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
726 */
727 if (APIC_INTEGRATED(apic_version[phys_apicid]))
728 num_starts = 2;
729 else
730 num_starts = 0;
732 /*
733 * Run STARTUP IPI loop.
734 */
735 Dprintk("#startup loops: %d.\n", num_starts);
737 maxlvt = get_maxlvt();
739 for (j = 1; j <= num_starts; j++) {
740 Dprintk("Sending STARTUP #%d.\n",j);
741 apic_read_around(APIC_SPIV);
742 apic_write(APIC_ESR, 0);
743 apic_read(APIC_ESR);
744 Dprintk("After apic_write.\n");
746 /*
747 * STARTUP IPI
748 * Boot on the stack
749 */
750 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), phys_apicid);
752 /*
753 * Give the other CPU some time to accept the IPI.
754 */
755 udelay(300);
757 Dprintk("Startup point 1.\n");
759 Dprintk("Waiting for send to finish...\n");
760 timeout = 0;
761 do {
762 Dprintk("+");
763 udelay(100);
764 if ( !x2apic_enabled )
765 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
766 else
767 send_status = 0; /* We go out of the loop dirctly. */
768 } while (send_status && (timeout++ < 1000));
770 /*
771 * Give the other CPU some time to accept the IPI.
772 */
773 udelay(200);
774 /*
775 * Due to the Pentium erratum 3AP.
776 */
777 if (maxlvt > 3) {
778 apic_read_around(APIC_SPIV);
779 apic_write(APIC_ESR, 0);
780 }
781 accept_status = (apic_read(APIC_ESR) & 0xEF);
782 if (send_status || accept_status)
783 break;
784 }
785 Dprintk("After Startup.\n");
787 if (send_status)
788 printk("APIC never delivered???\n");
789 if (accept_status)
790 printk("APIC delivery error (%lx).\n", accept_status);
792 return (send_status | accept_status);
793 }
794 #endif /* WAKE_SECONDARY_VIA_INIT */
796 extern cpumask_t cpu_initialized;
797 static inline int alloc_cpu_id(void)
798 {
799 cpumask_t tmp_map;
800 int cpu;
801 cpus_complement(tmp_map, cpu_present_map);
802 cpu = first_cpu(tmp_map);
803 if (cpu >= NR_CPUS)
804 return -ENODEV;
805 return cpu;
806 }
808 static void *prepare_idle_stack(unsigned int cpu)
809 {
810 if (!stack_base[cpu])
811 stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER);
813 return stack_base[cpu];
814 }
816 static int __devinit do_boot_cpu(int apicid, int cpu)
817 /*
818 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
819 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
820 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
821 */
822 {
823 unsigned long boot_error;
824 unsigned int order;
825 int timeout;
826 unsigned long start_eip;
827 unsigned short nmi_high = 0, nmi_low = 0;
828 struct vcpu *v;
829 struct desc_struct *gdt;
830 #ifdef __x86_64__
831 struct page_info *page;
832 #endif
834 /*
835 * Save current MTRR state in case it was changed since early boot
836 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
837 */
838 mtrr_save_state();
840 ++cpucount;
842 booting_cpu = cpu;
844 v = alloc_idle_vcpu(cpu);
845 BUG_ON(v == NULL);
847 /* start_eip had better be page-aligned! */
848 start_eip = setup_trampoline();
850 /* So we see what's up */
851 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
853 stack_start.esp = prepare_idle_stack(cpu);
855 /* Debug build: detect stack overflow by setting up a guard page. */
856 memguard_guard_stack(stack_start.esp);
858 gdt = per_cpu(gdt_table, cpu);
859 if (gdt == boot_cpu_gdt_table) {
860 order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
861 #ifdef __x86_64__
862 #ifdef CONFIG_COMPAT
863 page = alloc_domheap_pages(NULL, order,
864 MEMF_node(cpu_to_node(cpu)));
865 per_cpu(compat_gdt_table, cpu) = gdt = page_to_virt(page);
866 memcpy(gdt, boot_cpu_compat_gdt_table,
867 NR_RESERVED_GDT_PAGES * PAGE_SIZE);
868 gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
869 #endif
870 page = alloc_domheap_pages(NULL, order,
871 MEMF_node(cpu_to_node(cpu)));
872 per_cpu(gdt_table, cpu) = gdt = page_to_virt(page);
873 #else
874 per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order);
875 #endif
876 memcpy(gdt, boot_cpu_gdt_table,
877 NR_RESERVED_GDT_PAGES * PAGE_SIZE);
878 BUILD_BUG_ON(NR_CPUS > 0x10000);
879 gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
880 }
882 #ifdef __i386__
883 if (!per_cpu(doublefault_tss, cpu)) {
884 per_cpu(doublefault_tss, cpu) = alloc_xenheap_page();
885 memset(per_cpu(doublefault_tss, cpu), 0, PAGE_SIZE);
886 }
887 #endif
889 if (!idt_tables[cpu]) {
890 idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
891 memcpy(idt_tables[cpu], idt_table,
892 IDT_ENTRIES*sizeof(idt_entry_t));
893 }
895 /*
896 * This grunge runs the startup process for
897 * the targeted processor.
898 */
900 atomic_set(&init_deasserted, 0);
902 Dprintk("Setting warm reset code and vector.\n");
904 store_NMI_vector(&nmi_high, &nmi_low);
906 smpboot_setup_warm_reset_vector(start_eip);
908 /*
909 * Starting actual IPI sequence...
910 */
911 boot_error = wakeup_secondary_cpu(apicid, start_eip);
913 if (!boot_error) {
914 /*
915 * allow APs to start initializing.
916 */
917 Dprintk("Before Callout %d.\n", cpu);
918 cpu_set(cpu, cpu_callout_map);
919 Dprintk("After Callout %d.\n", cpu);
921 /*
922 * Wait 5s total for a response
923 */
924 for (timeout = 0; timeout < 50000; timeout++) {
925 if (cpu_isset(cpu, cpu_callin_map))
926 break; /* It has booted */
927 udelay(100);
928 }
930 if (cpu_isset(cpu, cpu_callin_map)) {
931 /* number CPUs logically, starting from 1 (BSP is 0) */
932 Dprintk("OK.\n");
933 printk("CPU%d: ", cpu);
934 print_cpu_info(&cpu_data[cpu]);
935 Dprintk("CPU has booted.\n");
936 } else {
937 boot_error = 1;
938 mb();
939 if (bootsym(trampoline_cpu_started) == 0xA5)
940 /* trampoline started but...? */
941 printk("Stuck ??\n");
942 else
943 /* trampoline code not run */
944 printk("Not responding.\n");
945 inquire_remote_apic(apicid);
946 }
947 }
949 if (boot_error) {
950 /* Try to put things back the way they were before ... */
951 unmap_cpu_to_logical_apicid(cpu);
952 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
953 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
954 cpucount--;
955 } else {
956 x86_cpu_to_apicid[cpu] = apicid;
957 cpu_set(cpu, cpu_present_map);
958 }
960 /* mark "stuck" area as not stuck */
961 bootsym(trampoline_cpu_started) = 0;
962 mb();
964 return boot_error;
965 }
967 #ifdef CONFIG_HOTPLUG_CPU
968 static void idle_task_exit(void)
969 {
970 /* Give up lazy state borrowed by this idle vcpu */
971 __sync_lazy_execstate();
972 }
974 void cpu_exit_clear(void)
975 {
976 int cpu = raw_smp_processor_id();
978 idle_task_exit();
980 cpucount --;
981 cpu_uninit();
983 cpu_clear(cpu, cpu_callout_map);
984 cpu_clear(cpu, cpu_callin_map);
986 cpu_clear(cpu, smp_commenced_mask);
987 unmap_cpu_to_logical_apicid(cpu);
988 }
990 static int __cpuinit __smp_prepare_cpu(int cpu)
991 {
992 int apicid, ret;
994 apicid = x86_cpu_to_apicid[cpu];
995 if (apicid == BAD_APICID) {
996 ret = -ENODEV;
997 goto exit;
998 }
1000 tsc_sync_disabled = 1;
1002 do_boot_cpu(apicid, cpu);
1004 tsc_sync_disabled = 0;
1006 ret = 0;
1007 exit:
1008 return ret;
1010 #endif
1012 /*
1013 * Cycle through the processors sending APIC IPIs to boot each.
1014 */
1016 /* Where the IO area was mapped on multiquad, always 0 otherwise */
1017 void *xquad_portio;
1018 #ifdef CONFIG_X86_NUMAQ
1019 EXPORT_SYMBOL(xquad_portio);
1020 #endif
1022 static void __init smp_boot_cpus(unsigned int max_cpus)
1024 int apicid, cpu, bit, kicked;
1025 #ifdef BOGOMIPS
1026 unsigned long bogosum = 0;
1027 #endif
1029 /*
1030 * Setup boot CPU information
1031 */
1032 smp_store_cpu_info(0); /* Final full version of the data */
1033 printk("CPU%d: ", 0);
1034 print_cpu_info(&cpu_data[0]);
1036 boot_cpu_physical_apicid = get_apic_id();
1037 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1039 stack_base[0] = stack_start.esp;
1041 /*current_thread_info()->cpu = 0;*/
1042 /*smp_tune_scheduling();*/
1044 set_cpu_sibling_map(0);
1046 /*
1047 * If we couldn't find an SMP configuration at boot time,
1048 * get out of here now!
1049 */
1050 if (!smp_found_config && !acpi_lapic) {
1051 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1052 init_uniprocessor:
1053 phys_cpu_present_map = physid_mask_of_physid(0);
1054 if (APIC_init_uniprocessor())
1055 printk(KERN_NOTICE "Local APIC not detected."
1056 " Using dummy APIC emulation.\n");
1057 map_cpu_to_logical_apicid();
1058 cpu_set(0, cpu_sibling_map[0]);
1059 cpu_set(0, cpu_core_map[0]);
1060 return;
1063 /*
1064 * Should not be necessary because the MP table should list the boot
1065 * CPU too, but we do it for the sake of robustness anyway.
1066 * Makes no sense to do this check in clustered apic mode, so skip it
1067 */
1068 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1069 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1070 boot_cpu_physical_apicid);
1071 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1074 /*
1075 * If we couldn't find a local APIC, then get out of here now!
1076 */
1077 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1078 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1079 boot_cpu_physical_apicid);
1080 goto init_uniprocessor;
1083 verify_local_APIC();
1085 /*
1086 * If SMP should be disabled, then really disable it!
1087 */
1088 if (!max_cpus)
1089 goto init_uniprocessor;
1091 connect_bsp_APIC();
1092 setup_local_APIC();
1093 map_cpu_to_logical_apicid();
1096 setup_portio_remap();
1098 /*
1099 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1101 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1102 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1103 * clustered apic ID.
1104 */
1105 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1107 kicked = 1;
1108 for (bit = 0; kicked < NR_CPUS && bit < NR_CPUS; bit++) {
1109 apicid = cpu_present_to_apicid(bit);
1110 /*
1111 * Don't even attempt to start the boot CPU!
1112 */
1113 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1114 continue;
1116 if (!check_apicid_present(apicid))
1117 continue;
1118 if (max_cpus <= cpucount+1)
1119 continue;
1121 if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
1122 printk("CPU #%d not responding - cannot use it.\n",
1123 apicid);
1124 else
1125 ++kicked;
1128 /*
1129 * Cleanup possible dangling ends...
1130 */
1131 smpboot_restore_warm_reset_vector();
1133 #ifdef BOGOMIPS
1134 /*
1135 * Allow the user to impress friends.
1136 */
1137 Dprintk("Before bogomips.\n");
1138 for (cpu = 0; cpu < NR_CPUS; cpu++)
1139 if (cpu_isset(cpu, cpu_callout_map))
1140 bogosum += cpu_data[cpu].loops_per_jiffy;
1141 printk(KERN_INFO
1142 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1143 cpucount+1,
1144 bogosum/(500000/HZ),
1145 (bogosum/(5000/HZ))%100);
1146 #else
1147 printk("Total of %d processors activated.\n", cpucount+1);
1148 #endif
1150 Dprintk("Before bogocount - setting activated=1.\n");
1152 if (smp_b_stepping)
1153 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1155 /*
1156 * Don't taint if we are running SMP kernel on a single non-MP
1157 * approved Athlon
1158 */
1159 if (tainted & TAINT_UNSAFE_SMP) {
1160 if (cpucount)
1161 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1162 else
1163 tainted &= ~TAINT_UNSAFE_SMP;
1166 Dprintk("Boot done.\n");
1168 /*
1169 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1170 * efficiently.
1171 */
1172 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1173 cpus_clear(cpu_sibling_map[cpu]);
1174 cpus_clear(cpu_core_map[cpu]);
1177 cpu_set(0, cpu_sibling_map[0]);
1178 cpu_set(0, cpu_core_map[0]);
1180 if (nmi_watchdog == NMI_LOCAL_APIC)
1181 check_nmi_watchdog();
1183 smpboot_setup_io_apic();
1185 setup_boot_APIC_clock();
1187 /*
1188 * Synchronize the TSC with the AP
1189 */
1190 if (cpu_has_tsc && cpucount && cpu_khz)
1191 synchronize_tsc_bp();
1192 calibrate_tsc_bp();
1195 /* These are wrappers to interface to the new boot process. Someone
1196 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1197 void __init smp_prepare_cpus(unsigned int max_cpus)
1199 smp_commenced_mask = cpumask_of_cpu(0);
1200 cpu_callin_map = cpumask_of_cpu(0);
1201 mb();
1202 smp_boot_cpus(max_cpus);
1205 void __devinit smp_prepare_boot_cpu(void)
1207 cpu_set(smp_processor_id(), cpu_online_map);
1208 cpu_set(smp_processor_id(), cpu_callout_map);
1209 cpu_set(smp_processor_id(), cpu_present_map);
1210 cpu_set(smp_processor_id(), cpu_possible_map);
1211 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
1214 #ifdef CONFIG_HOTPLUG_CPU
1215 static void
1216 remove_siblinginfo(int cpu)
1218 int sibling;
1219 struct cpuinfo_x86 *c = cpu_data;
1221 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1222 cpu_clear(cpu, cpu_core_map[sibling]);
1223 /*
1224 * last thread sibling in this cpu core going down
1225 */
1226 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1227 c[sibling].booted_cores--;
1230 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1231 cpu_clear(cpu, cpu_sibling_map[sibling]);
1232 cpus_clear(cpu_sibling_map[cpu]);
1233 cpus_clear(cpu_core_map[cpu]);
1234 phys_proc_id[cpu] = BAD_APICID;
1235 cpu_core_id[cpu] = BAD_APICID;
1236 cpu_clear(cpu, cpu_sibling_setup_map);
1239 extern void fixup_irqs(cpumask_t map);
1240 int __cpu_disable(void)
1242 cpumask_t map = cpu_online_map;
1243 int cpu = smp_processor_id();
1245 /*
1246 * Perhaps use cpufreq to drop frequency, but that could go
1247 * into generic code.
1249 * We won't take down the boot processor on i386 due to some
1250 * interrupts only being able to be serviced by the BSP.
1251 * Especially so if we're not using an IOAPIC -zwane
1252 */
1253 if (cpu == 0)
1254 return -EBUSY;
1256 local_irq_disable();
1257 clear_local_APIC();
1258 /* Allow any queued timer interrupts to get serviced */
1259 local_irq_enable();
1260 mdelay(1);
1261 local_irq_disable();
1263 cpufreq_del_cpu(cpu);
1265 time_suspend();
1267 remove_siblinginfo(cpu);
1269 cpu_clear(cpu, map);
1270 fixup_irqs(map);
1271 /* It's now safe to remove this processor from the online map */
1272 cpu_clear(cpu, cpu_online_map);
1274 cpu_disable_scheduler();
1276 return 0;
1279 void __cpu_die(unsigned int cpu)
1281 /* We don't do anything here: idle task is faking death itself. */
1282 unsigned int i;
1284 for (i = 0; i < 10; i++) {
1285 /* They ack this in play_dead by setting CPU_DEAD */
1286 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1287 printk ("CPU %d is now offline\n", cpu);
1288 return;
1290 mdelay(100);
1291 mb();
1292 process_pending_timers();
1294 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1297 static int take_cpu_down(void *unused)
1299 return __cpu_disable();
1302 int cpu_down(unsigned int cpu)
1304 int err = 0;
1306 spin_lock(&cpu_add_remove_lock);
1307 if (num_online_cpus() == 1) {
1308 err = -EBUSY;
1309 goto out;
1312 /* Can not offline BSP */
1313 if (cpu == 0) {
1314 err = -EINVAL;
1315 goto out;
1318 if (!cpu_online(cpu)) {
1319 err = -EINVAL;
1320 goto out;
1323 printk("Prepare to bring CPU%d down...\n", cpu);
1325 err = stop_machine_run(take_cpu_down, NULL, cpu);
1326 if ( err < 0 )
1327 goto out;
1329 __cpu_die(cpu);
1331 if (cpu_online(cpu)) {
1332 printk("Bad state (DEAD, but in online map) on CPU%d\n", cpu);
1333 err = -EBUSY;
1335 out:
1336 spin_unlock(&cpu_add_remove_lock);
1337 return err;
1340 int cpu_up(unsigned int cpu)
1342 int err = 0;
1344 spin_lock(&cpu_add_remove_lock);
1345 if (cpu_online(cpu)) {
1346 printk("Bring up a online cpu. Bogus!\n");
1347 err = -EBUSY;
1348 goto out;
1351 err = __cpu_up(cpu);
1352 if (err < 0)
1353 goto out;
1355 out:
1356 spin_unlock(&cpu_add_remove_lock);
1357 return err;
1360 /* From kernel/power/main.c */
1361 /* This is protected by pm_sem semaphore */
1362 static cpumask_t frozen_cpus;
1364 void disable_nonboot_cpus(void)
1366 int cpu, error;
1368 error = 0;
1369 cpus_clear(frozen_cpus);
1370 printk("Freezing cpus ...\n");
1371 for_each_online_cpu(cpu) {
1372 if (cpu == 0)
1373 continue;
1374 error = cpu_down(cpu);
1375 if (!error) {
1376 cpu_set(cpu, frozen_cpus);
1377 printk("CPU%d is down\n", cpu);
1378 continue;
1380 printk("Error taking cpu %d down: %d\n", cpu, error);
1382 BUG_ON(raw_smp_processor_id() != 0);
1383 if (error)
1384 panic("cpus not sleeping");
1387 void enable_nonboot_cpus(void)
1389 int cpu, error;
1391 printk("Thawing cpus ...\n");
1392 for_each_cpu_mask(cpu, frozen_cpus) {
1393 error = cpu_up(cpu);
1394 if (!error) {
1395 printk("CPU%d is up\n", cpu);
1396 continue;
1398 printk("Error taking cpu %d up: %d\n", cpu, error);
1399 panic("Not enough cpus");
1401 cpus_clear(frozen_cpus);
1403 /*
1404 * Cleanup possible dangling ends after sleep...
1405 */
1406 smpboot_restore_warm_reset_vector();
1408 #else /* ... !CONFIG_HOTPLUG_CPU */
1409 int __cpu_disable(void)
1411 return -ENOSYS;
1414 void __cpu_die(unsigned int cpu)
1416 /* We said "no" in __cpu_disable */
1417 BUG();
1419 #endif /* CONFIG_HOTPLUG_CPU */
1421 int __devinit __cpu_up(unsigned int cpu)
1423 #ifdef CONFIG_HOTPLUG_CPU
1424 int ret=0;
1426 /*
1427 * We do warm boot only on cpus that had booted earlier
1428 * Otherwise cold boot is all handled from smp_boot_cpus().
1429 * cpu_callin_map is set during AP kickstart process. Its reset
1430 * when a cpu is taken offline from cpu_exit_clear().
1431 */
1432 if (!cpu_isset(cpu, cpu_callin_map))
1433 ret = __smp_prepare_cpu(cpu);
1435 if (ret)
1436 return -EIO;
1437 #endif
1439 /* In case one didn't come up */
1440 if (!cpu_isset(cpu, cpu_callin_map)) {
1441 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
1442 local_irq_enable();
1443 return -EIO;
1446 local_irq_enable();
1447 /*per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;*/
1448 /* Unleash the CPU! */
1449 cpu_set(cpu, smp_commenced_mask);
1450 while (!cpu_isset(cpu, cpu_online_map)) {
1451 mb();
1452 process_pending_timers();
1455 cpufreq_add_cpu(cpu);
1456 return 0;
1460 void __init smp_cpus_done(unsigned int max_cpus)
1462 #ifdef CONFIG_X86_IO_APIC
1463 setup_ioapic_dest();
1464 #endif
1465 #ifndef CONFIG_HOTPLUG_CPU
1466 /*
1467 * Disable executability of the SMP trampoline:
1468 */
1469 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1470 #endif
1473 void __init smp_intr_init(void)
1475 int irq, seridx;
1477 /*
1478 * IRQ0 must be given a fixed assignment and initialized,
1479 * because it's used before the IO-APIC is set up.
1480 */
1481 irq_vector[0] = FIRST_HIPRIORITY_VECTOR;
1482 vector_irq[FIRST_HIPRIORITY_VECTOR] = 0;
1484 /*
1485 * Also ensure serial interrupts are high priority. We do not
1486 * want them to be blocked by unacknowledged guest-bound interrupts.
1487 */
1488 for (seridx = 0; seridx < 2; seridx++) {
1489 if ((irq = serial_irq(seridx)) < 0)
1490 continue;
1491 irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1;
1492 vector_irq[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq;
1495 /* IPI for event checking. */
1496 set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
1498 /* IPI for invalidation */
1499 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1501 /* IPI for generic function call */
1502 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);