direct-io.hg

view xen/arch/x86/mpparse.c @ 15516:fa9fa5f98c91

Simplify Unisys ES7000 code in light of the fact we do not support
legacy boxes with very non-standard APIC setup.
From: Raj Subrahmanian <raj.subrahmanian@unisys.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Jul 10 10:07:00 2007 +0100 (2007-07-10)
parents f07cf18343f1
children d7e3224b661a
line source
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
16 #include <xen/config.h>
17 #include <xen/types.h>
18 #include <xen/irq.h>
19 #include <xen/init.h>
20 #include <xen/acpi.h>
21 #include <xen/delay.h>
22 #include <xen/sched.h>
24 #include <asm/mc146818rtc.h>
25 #include <asm/bitops.h>
26 #include <asm/smp.h>
27 #include <asm/acpi.h>
28 #include <asm/mtrr.h>
29 #include <asm/mpspec.h>
30 #include <asm/io_apic.h>
32 #include <mach_apic.h>
33 #include <mach_mpparse.h>
34 #include <bios_ebda.h>
36 /* Have we found an MP table */
37 int smp_found_config;
38 unsigned int __initdata maxcpus = NR_CPUS;
40 /*
41 * Various Linux-internal data structures created from the
42 * MP-table.
43 */
44 int apic_version [MAX_APICS];
45 int mp_bus_id_to_type [MAX_MP_BUSSES];
46 int mp_bus_id_to_node [MAX_MP_BUSSES];
47 int mp_bus_id_to_local [MAX_MP_BUSSES];
48 int quad_local_to_mp_bus_id [NR_CPUS/4][4];
49 int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
50 static int mp_current_pci_id;
52 /* I/O APIC entries */
53 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
55 /* # of MP IRQ source entries */
56 struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
58 /* MP IRQ source entries */
59 int mp_irq_entries;
61 int nr_ioapics;
63 int pic_mode;
64 unsigned long mp_lapic_addr;
66 unsigned int def_to_bigsmp = 0;
68 /* Processor that is doing the boot up */
69 unsigned int boot_cpu_physical_apicid = -1U;
70 /* Internal processor count */
71 static unsigned int __devinitdata num_processors;
73 /* Bitmask of physically existing CPUs */
74 physid_mask_t phys_cpu_present_map;
76 u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
78 /*
79 * Intel MP BIOS table parsing routines:
80 */
83 /*
84 * Checksum an MP configuration block.
85 */
87 static int __init mpf_checksum(unsigned char *mp, int len)
88 {
89 int sum = 0;
91 while (len--)
92 sum += *mp++;
94 return sum & 0xFF;
95 }
97 /*
98 * Have to match translation table entries to main table entries by counter
99 * hence the mpc_record variable .... can't see a less disgusting way of
100 * doing this ....
101 */
103 static int mpc_record;
104 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
106 static void __devinit MP_processor_info (struct mpc_config_processor *m)
107 {
108 int ver, apicid;
109 physid_mask_t phys_cpu;
111 if (!(m->mpc_cpuflag & CPU_ENABLED))
112 return;
114 apicid = mpc_apic_id(m, translation_table[mpc_record]);
116 if (m->mpc_featureflag&(1<<0))
117 Dprintk(" Floating point unit present.\n");
118 if (m->mpc_featureflag&(1<<7))
119 Dprintk(" Machine Exception supported.\n");
120 if (m->mpc_featureflag&(1<<8))
121 Dprintk(" 64 bit compare & exchange supported.\n");
122 if (m->mpc_featureflag&(1<<9))
123 Dprintk(" Internal APIC present.\n");
124 if (m->mpc_featureflag&(1<<11))
125 Dprintk(" SEP present.\n");
126 if (m->mpc_featureflag&(1<<12))
127 Dprintk(" MTRR present.\n");
128 if (m->mpc_featureflag&(1<<13))
129 Dprintk(" PGE present.\n");
130 if (m->mpc_featureflag&(1<<14))
131 Dprintk(" MCA present.\n");
132 if (m->mpc_featureflag&(1<<15))
133 Dprintk(" CMOV present.\n");
134 if (m->mpc_featureflag&(1<<16))
135 Dprintk(" PAT present.\n");
136 if (m->mpc_featureflag&(1<<17))
137 Dprintk(" PSE present.\n");
138 if (m->mpc_featureflag&(1<<18))
139 Dprintk(" PSN present.\n");
140 if (m->mpc_featureflag&(1<<19))
141 Dprintk(" Cache Line Flush Instruction present.\n");
142 /* 20 Reserved */
143 if (m->mpc_featureflag&(1<<21))
144 Dprintk(" Debug Trace and EMON Store present.\n");
145 if (m->mpc_featureflag&(1<<22))
146 Dprintk(" ACPI Thermal Throttle Registers present.\n");
147 if (m->mpc_featureflag&(1<<23))
148 Dprintk(" MMX present.\n");
149 if (m->mpc_featureflag&(1<<24))
150 Dprintk(" FXSR present.\n");
151 if (m->mpc_featureflag&(1<<25))
152 Dprintk(" XMM present.\n");
153 if (m->mpc_featureflag&(1<<26))
154 Dprintk(" Willamette New Instructions present.\n");
155 if (m->mpc_featureflag&(1<<27))
156 Dprintk(" Self Snoop present.\n");
157 if (m->mpc_featureflag&(1<<28))
158 Dprintk(" HT present.\n");
159 if (m->mpc_featureflag&(1<<29))
160 Dprintk(" Thermal Monitor present.\n");
161 /* 30, 31 Reserved */
164 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
165 Dprintk(" Bootup CPU\n");
166 boot_cpu_physical_apicid = m->mpc_apicid;
167 }
169 ver = m->mpc_apicver;
171 /*
172 * Validate version
173 */
174 if (ver == 0x0) {
175 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
176 "fixing up to 0x10. (tell your hw vendor)\n",
177 m->mpc_apicid);
178 ver = 0x10;
179 }
180 apic_version[m->mpc_apicid] = ver;
182 phys_cpu = apicid_to_cpu_present(apicid);
183 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
185 if (num_processors >= NR_CPUS) {
186 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
187 " Processor ignored.\n", NR_CPUS);
188 return;
189 }
191 if (num_processors >= maxcpus) {
192 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
193 " Processor ignored.\n", maxcpus);
194 return;
195 }
197 cpu_set(num_processors, cpu_possible_map);
198 num_processors++;
200 if (num_processors > 8) {
201 /*
202 * No need for processor or APIC checks: physical delivery
203 * (bigsmp) mode should always work.
204 */
205 def_to_bigsmp = 1;
206 }
207 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
208 }
210 static void __init MP_bus_info (struct mpc_config_bus *m)
211 {
212 char str[7];
214 memcpy(str, m->mpc_bustype, 6);
215 str[6] = 0;
217 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
219 #if 0 /* size of mpc_busid (8 bits) makes this check unnecessary */
220 if (m->mpc_busid >= MAX_MP_BUSSES) {
221 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
222 " is too large, max. supported is %d\n",
223 m->mpc_busid, str, MAX_MP_BUSSES - 1);
224 return;
225 }
226 #endif
228 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
229 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
230 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
231 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
232 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
233 mpc_oem_pci_bus(m, translation_table[mpc_record]);
234 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
235 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
236 mp_current_pci_id++;
237 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
238 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
239 } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
240 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
241 } else {
242 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
243 }
244 }
246 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
247 {
248 if (!(m->mpc_flags & MPC_APIC_USABLE))
249 return;
251 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
252 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
253 if (nr_ioapics >= MAX_IO_APICS) {
254 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
255 MAX_IO_APICS, nr_ioapics);
256 panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
257 }
258 if (!m->mpc_apicaddr) {
259 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
260 " found in MP table, skipping!\n");
261 return;
262 }
263 mp_ioapics[nr_ioapics] = *m;
264 nr_ioapics++;
265 }
267 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
268 {
269 mp_irqs [mp_irq_entries] = *m;
270 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
271 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
272 m->mpc_irqtype, m->mpc_irqflag & 3,
273 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
274 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
275 if (++mp_irq_entries == MAX_IRQ_SOURCES)
276 panic("Max # of irq sources exceeded!!\n");
277 }
279 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
280 {
281 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
282 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
283 m->mpc_irqtype, m->mpc_irqflag & 3,
284 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
285 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
286 /*
287 * Well it seems all SMP boards in existence
288 * use ExtINT/LVT1 == LINT0 and
289 * NMI/LVT2 == LINT1 - the following check
290 * will show us if this assumptions is false.
291 * Until then we do not have to add baggage.
292 */
293 if ((m->mpc_irqtype == mp_ExtINT) &&
294 (m->mpc_destapiclint != 0))
295 BUG();
296 if ((m->mpc_irqtype == mp_NMI) &&
297 (m->mpc_destapiclint != 1))
298 BUG();
299 }
301 #ifdef CONFIG_X86_NUMAQ
302 static void __init MP_translation_info (struct mpc_config_translation *m)
303 {
304 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
306 if (mpc_record >= MAX_MPC_ENTRY)
307 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
308 else
309 translation_table[mpc_record] = m; /* stash this for later */
310 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
311 node_set_online(m->trans_quad);
312 }
314 /*
315 * Read/parse the MPC oem tables
316 */
318 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
319 unsigned short oemsize)
320 {
321 int count = sizeof (*oemtable); /* the header size */
322 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
324 mpc_record = 0;
325 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
326 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
327 {
328 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
329 oemtable->oem_signature[0],
330 oemtable->oem_signature[1],
331 oemtable->oem_signature[2],
332 oemtable->oem_signature[3]);
333 return;
334 }
335 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
336 {
337 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
338 return;
339 }
340 while (count < oemtable->oem_length) {
341 switch (*oemptr) {
342 case MP_TRANSLATION:
343 {
344 struct mpc_config_translation *m=
345 (struct mpc_config_translation *)oemptr;
346 MP_translation_info(m);
347 oemptr += sizeof(*m);
348 count += sizeof(*m);
349 ++mpc_record;
350 break;
351 }
352 default:
353 {
354 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
355 return;
356 }
357 }
358 }
359 }
361 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
362 char *productid)
363 {
364 if (strncmp(oem, "IBM NUMA", 8))
365 printk("Warning! May not be a NUMA-Q system!\n");
366 if (mpc->mpc_oemptr)
367 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
368 mpc->mpc_oemsize);
369 }
370 #endif /* CONFIG_X86_NUMAQ */
372 /*
373 * Read/parse the MPC
374 */
376 static int __init smp_read_mpc(struct mp_config_table *mpc)
377 {
378 char str[16];
379 char oem[10];
380 int count=sizeof(*mpc);
381 unsigned char *mpt=((unsigned char *)mpc)+count;
383 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
384 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
385 *(u32 *)mpc->mpc_signature);
386 return 0;
387 }
388 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
389 printk(KERN_ERR "SMP mptable: checksum error!\n");
390 return 0;
391 }
392 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
393 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
394 mpc->mpc_spec);
395 return 0;
396 }
397 if (!mpc->mpc_lapic) {
398 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
399 return 0;
400 }
401 memcpy(oem,mpc->mpc_oem,8);
402 oem[8]=0;
403 printk(KERN_INFO "OEM ID: %s ",oem);
405 memcpy(str,mpc->mpc_productid,12);
406 str[12]=0;
407 printk("Product ID: %s ",str);
409 mps_oem_check(mpc, oem, str);
411 printk("APIC at: 0x%X\n",mpc->mpc_lapic);
413 /*
414 * Save the local APIC address (it might be non-default) -- but only
415 * if we're not using ACPI.
416 */
417 if (!acpi_lapic)
418 mp_lapic_addr = mpc->mpc_lapic;
420 /*
421 * Now process the configuration blocks.
422 */
423 mpc_record = 0;
424 while (count < mpc->mpc_length) {
425 switch(*mpt) {
426 case MP_PROCESSOR:
427 {
428 struct mpc_config_processor *m=
429 (struct mpc_config_processor *)mpt;
430 /* ACPI may have already provided this data */
431 if (!acpi_lapic)
432 MP_processor_info(m);
433 mpt += sizeof(*m);
434 count += sizeof(*m);
435 break;
436 }
437 case MP_BUS:
438 {
439 struct mpc_config_bus *m=
440 (struct mpc_config_bus *)mpt;
441 MP_bus_info(m);
442 mpt += sizeof(*m);
443 count += sizeof(*m);
444 break;
445 }
446 case MP_IOAPIC:
447 {
448 struct mpc_config_ioapic *m=
449 (struct mpc_config_ioapic *)mpt;
450 MP_ioapic_info(m);
451 mpt+=sizeof(*m);
452 count+=sizeof(*m);
453 break;
454 }
455 case MP_INTSRC:
456 {
457 struct mpc_config_intsrc *m=
458 (struct mpc_config_intsrc *)mpt;
460 MP_intsrc_info(m);
461 mpt+=sizeof(*m);
462 count+=sizeof(*m);
463 break;
464 }
465 case MP_LINTSRC:
466 {
467 struct mpc_config_lintsrc *m=
468 (struct mpc_config_lintsrc *)mpt;
469 MP_lintsrc_info(m);
470 mpt+=sizeof(*m);
471 count+=sizeof(*m);
472 break;
473 }
474 default:
475 {
476 count = mpc->mpc_length;
477 break;
478 }
479 }
480 ++mpc_record;
481 }
482 clustered_apic_check();
483 if (!num_processors)
484 printk(KERN_ERR "SMP mptable: no processors registered!\n");
485 return num_processors;
486 }
488 static int __init ELCR_trigger(unsigned int irq)
489 {
490 unsigned int port;
492 port = 0x4d0 + (irq >> 3);
493 return (inb(port) >> (irq & 7)) & 1;
494 }
496 static void __init construct_default_ioirq_mptable(int mpc_default_type)
497 {
498 struct mpc_config_intsrc intsrc;
499 int i;
500 int ELCR_fallback = 0;
502 intsrc.mpc_type = MP_INTSRC;
503 intsrc.mpc_irqflag = 0; /* conforming */
504 intsrc.mpc_srcbus = 0;
505 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
507 intsrc.mpc_irqtype = mp_INT;
509 /*
510 * If true, we have an ISA/PCI system with no IRQ entries
511 * in the MP table. To prevent the PCI interrupts from being set up
512 * incorrectly, we try to use the ELCR. The sanity check to see if
513 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
514 * never be level sensitive, so we simply see if the ELCR agrees.
515 * If it does, we assume it's valid.
516 */
517 if (mpc_default_type == 5) {
518 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
520 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
521 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
522 else {
523 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
524 ELCR_fallback = 1;
525 }
526 }
528 for (i = 0; i < 16; i++) {
529 switch (mpc_default_type) {
530 case 2:
531 if (i == 0 || i == 13)
532 continue; /* IRQ0 & IRQ13 not connected */
533 /* fall through */
534 default:
535 if (i == 2)
536 continue; /* IRQ2 is never connected */
537 }
539 if (ELCR_fallback) {
540 /*
541 * If the ELCR indicates a level-sensitive interrupt, we
542 * copy that information over to the MP table in the
543 * irqflag field (level sensitive, active high polarity).
544 */
545 if (ELCR_trigger(i))
546 intsrc.mpc_irqflag = 13;
547 else
548 intsrc.mpc_irqflag = 0;
549 }
551 intsrc.mpc_srcbusirq = i;
552 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
553 MP_intsrc_info(&intsrc);
554 }
556 intsrc.mpc_irqtype = mp_ExtINT;
557 intsrc.mpc_srcbusirq = 0;
558 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
559 MP_intsrc_info(&intsrc);
560 }
562 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
563 {
564 struct mpc_config_processor processor;
565 struct mpc_config_bus bus;
566 struct mpc_config_ioapic ioapic;
567 struct mpc_config_lintsrc lintsrc;
568 int linttypes[2] = { mp_ExtINT, mp_NMI };
569 int i;
571 /*
572 * local APIC has default address
573 */
574 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
576 /*
577 * 2 CPUs, numbered 0 & 1.
578 */
579 processor.mpc_type = MP_PROCESSOR;
580 /* Either an integrated APIC or a discrete 82489DX. */
581 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
582 processor.mpc_cpuflag = CPU_ENABLED;
583 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
584 (boot_cpu_data.x86_model << 4) |
585 boot_cpu_data.x86_mask;
586 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
587 processor.mpc_reserved[0] = 0;
588 processor.mpc_reserved[1] = 0;
589 for (i = 0; i < 2; i++) {
590 processor.mpc_apicid = i;
591 MP_processor_info(&processor);
592 }
594 bus.mpc_type = MP_BUS;
595 bus.mpc_busid = 0;
596 switch (mpc_default_type) {
597 default:
598 printk("???\n");
599 printk(KERN_ERR "Unknown standard configuration %d\n",
600 mpc_default_type);
601 /* fall through */
602 case 1:
603 case 5:
604 memcpy(bus.mpc_bustype, "ISA ", 6);
605 break;
606 case 2:
607 case 6:
608 case 3:
609 memcpy(bus.mpc_bustype, "EISA ", 6);
610 break;
611 case 4:
612 case 7:
613 memcpy(bus.mpc_bustype, "MCA ", 6);
614 }
615 MP_bus_info(&bus);
616 if (mpc_default_type > 4) {
617 bus.mpc_busid = 1;
618 memcpy(bus.mpc_bustype, "PCI ", 6);
619 MP_bus_info(&bus);
620 }
622 ioapic.mpc_type = MP_IOAPIC;
623 ioapic.mpc_apicid = 2;
624 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
625 ioapic.mpc_flags = MPC_APIC_USABLE;
626 ioapic.mpc_apicaddr = 0xFEC00000;
627 MP_ioapic_info(&ioapic);
629 /*
630 * We set up most of the low 16 IO-APIC pins according to MPS rules.
631 */
632 construct_default_ioirq_mptable(mpc_default_type);
634 lintsrc.mpc_type = MP_LINTSRC;
635 lintsrc.mpc_irqflag = 0; /* conforming */
636 lintsrc.mpc_srcbusid = 0;
637 lintsrc.mpc_srcbusirq = 0;
638 lintsrc.mpc_destapic = MP_APIC_ALL;
639 for (i = 0; i < 2; i++) {
640 lintsrc.mpc_irqtype = linttypes[i];
641 lintsrc.mpc_destapiclint = i;
642 MP_lintsrc_info(&lintsrc);
643 }
644 }
646 static struct intel_mp_floating *mpf_found;
648 /*
649 * Scan the memory blocks for an SMP configuration block.
650 */
651 void __init get_smp_config (void)
652 {
653 struct intel_mp_floating *mpf = mpf_found;
655 /*
656 * ACPI supports both logical (e.g. Hyper-Threading) and physical
657 * processors, where MPS only supports physical.
658 */
659 if (acpi_lapic && acpi_ioapic) {
660 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
661 return;
662 }
663 else if (acpi_lapic)
664 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
666 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
667 if (mpf->mpf_feature2 & (1<<7)) {
668 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
669 pic_mode = 1;
670 } else {
671 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
672 pic_mode = 0;
673 }
675 /*
676 * Now see if we need to read further.
677 */
678 if (mpf->mpf_feature1 != 0) {
680 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
681 construct_default_ISA_mptable(mpf->mpf_feature1);
683 } else if (mpf->mpf_physptr) {
685 /*
686 * Read the physical hardware table. Anything here will
687 * override the defaults.
688 */
689 if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) {
690 smp_found_config = 0;
691 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
692 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
693 return;
694 }
695 /*
696 * If there are no explicit MP IRQ entries, then we are
697 * broken. We set up most of the low 16 IO-APIC pins to
698 * ISA defaults and hope it will work.
699 */
700 if (!mp_irq_entries) {
701 struct mpc_config_bus bus;
703 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
705 bus.mpc_type = MP_BUS;
706 bus.mpc_busid = 0;
707 memcpy(bus.mpc_bustype, "ISA ", 6);
708 MP_bus_info(&bus);
710 construct_default_ioirq_mptable(0);
711 }
713 } else
714 BUG();
716 printk(KERN_INFO "Processors: %d\n", num_processors);
717 /*
718 * Only use the first configuration found.
719 */
720 }
722 static int __init smp_scan_config (unsigned long base, unsigned long length)
723 {
724 unsigned int *bp = maddr_to_virt(base);
725 struct intel_mp_floating *mpf;
727 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
728 if (sizeof(*mpf) != 16)
729 printk("Error: MPF size\n");
731 while (length > 0) {
732 mpf = (struct intel_mp_floating *)bp;
733 if ((*bp == SMP_MAGIC_IDENT) &&
734 (mpf->mpf_length == 1) &&
735 !mpf_checksum((unsigned char *)bp, 16) &&
736 ((mpf->mpf_specification == 1)
737 || (mpf->mpf_specification == 4)) ) {
739 smp_found_config = 1;
740 printk(KERN_INFO "found SMP MP-table at %08lx\n",
741 virt_to_maddr(mpf));
742 #if 0
743 reserve_bootmem(virt_to_maddr(mpf), PAGE_SIZE);
744 if (mpf->mpf_physptr) {
745 /*
746 * We cannot access to MPC table to compute
747 * table size yet, as only few megabytes from
748 * the bottom is mapped now.
749 * PC-9800's MPC table places on the very last
750 * of physical memory; so that simply reserving
751 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
752 * in reserve_bootmem.
753 */
754 unsigned long size = PAGE_SIZE;
755 unsigned long end = max_low_pfn * PAGE_SIZE;
756 if (mpf->mpf_physptr + size > end)
757 size = end - mpf->mpf_physptr;
758 reserve_bootmem(mpf->mpf_physptr, size);
759 }
760 #endif
761 mpf_found = mpf;
762 return 1;
763 }
764 bp += 4;
765 length -= 16;
766 }
767 return 0;
768 }
770 void __init find_smp_config (void)
771 {
772 unsigned int address;
774 /*
775 * FIXME: Linux assumes you have 640K of base ram..
776 * this continues the error...
777 *
778 * 1) Scan the bottom 1K for a signature
779 * 2) Scan the top 1K of base RAM
780 * 3) Scan the 64K of bios
781 */
782 if (smp_scan_config(0x0,0x400) ||
783 smp_scan_config(639*0x400,0x400) ||
784 smp_scan_config(0xF0000,0x10000))
785 return;
786 /*
787 * If it is an SMP machine we should know now, unless the
788 * configuration is in an EISA/MCA bus machine with an
789 * extended bios data area.
790 *
791 * there is a real-mode segmented pointer pointing to the
792 * 4K EBDA area at 0x40E, calculate and scan it here.
793 *
794 * NOTE! There are Linux loaders that will corrupt the EBDA
795 * area, and as such this kind of SMP config may be less
796 * trustworthy, simply because the SMP table may have been
797 * stomped on during early boot. These loaders are buggy and
798 * should be fixed.
799 *
800 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
801 */
803 address = get_bios_ebda();
804 if (address)
805 smp_scan_config(address, 0x400);
806 }
808 /* --------------------------------------------------------------------------
809 ACPI-based MP Configuration
810 -------------------------------------------------------------------------- */
812 #ifdef CONFIG_ACPI
814 void __init mp_register_lapic_address (
815 u64 address)
816 {
817 mp_lapic_addr = (unsigned long) address;
819 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
821 if (boot_cpu_physical_apicid == -1U)
822 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
824 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
825 }
828 void __devinit mp_register_lapic (
829 u8 id,
830 u8 enabled)
831 {
832 struct mpc_config_processor processor;
833 int boot_cpu = 0;
835 if (MAX_APICS - id <= 0) {
836 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
837 id, MAX_APICS);
838 return;
839 }
841 if (id == boot_cpu_physical_apicid)
842 boot_cpu = 1;
844 processor.mpc_type = MP_PROCESSOR;
845 processor.mpc_apicid = id;
846 processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
847 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
848 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
849 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
850 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
851 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
852 processor.mpc_reserved[0] = 0;
853 processor.mpc_reserved[1] = 0;
855 MP_processor_info(&processor);
856 }
858 #ifdef CONFIG_X86_IO_APIC
860 #define MP_ISA_BUS 0
861 #define MP_MAX_IOAPIC_PIN 127
863 static struct mp_ioapic_routing {
864 int apic_id;
865 int gsi_base;
866 int gsi_end;
867 u32 pin_programmed[4];
868 } mp_ioapic_routing[MAX_IO_APICS];
871 static int mp_find_ioapic (
872 int gsi)
873 {
874 int i = 0;
876 /* Find the IOAPIC that manages this GSI. */
877 for (i = 0; i < nr_ioapics; i++) {
878 if ((gsi >= mp_ioapic_routing[i].gsi_base)
879 && (gsi <= mp_ioapic_routing[i].gsi_end))
880 return i;
881 }
883 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
885 return -1;
886 }
889 void __init mp_register_ioapic (
890 u8 id,
891 u32 address,
892 u32 gsi_base)
893 {
894 int idx = 0;
895 int tmpid;
897 if (nr_ioapics >= MAX_IO_APICS) {
898 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
899 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
900 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
901 }
902 if (!address) {
903 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
904 " found in MADT table, skipping!\n");
905 return;
906 }
908 idx = nr_ioapics++;
910 mp_ioapics[idx].mpc_type = MP_IOAPIC;
911 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
912 mp_ioapics[idx].mpc_apicaddr = address;
914 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
915 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
916 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
917 tmpid = io_apic_get_unique_id(idx, id);
918 else
919 tmpid = id;
920 if (tmpid == -1) {
921 nr_ioapics--;
922 return;
923 }
924 mp_ioapics[idx].mpc_apicid = tmpid;
925 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
927 /*
928 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
929 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
930 */
931 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
932 mp_ioapic_routing[idx].gsi_base = gsi_base;
933 mp_ioapic_routing[idx].gsi_end = gsi_base +
934 io_apic_get_redir_entries(idx);
936 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
937 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
938 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
939 mp_ioapic_routing[idx].gsi_base,
940 mp_ioapic_routing[idx].gsi_end);
942 return;
943 }
946 void __init mp_override_legacy_irq (
947 u8 bus_irq,
948 u8 polarity,
949 u8 trigger,
950 u32 gsi)
951 {
952 struct mpc_config_intsrc intsrc;
953 int ioapic = -1;
954 int pin = -1;
956 /*
957 * Convert 'gsi' to 'ioapic.pin'.
958 */
959 ioapic = mp_find_ioapic(gsi);
960 if (ioapic < 0)
961 return;
962 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
964 /*
965 * TBD: This check is for faulty timer entries, where the override
966 * erroneously sets the trigger to level, resulting in a HUGE
967 * increase of timer interrupts!
968 */
969 if ((bus_irq == 0) && (trigger == 3))
970 trigger = 1;
972 intsrc.mpc_type = MP_INTSRC;
973 intsrc.mpc_irqtype = mp_INT;
974 intsrc.mpc_irqflag = (trigger << 2) | polarity;
975 intsrc.mpc_srcbus = MP_ISA_BUS;
976 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
977 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
978 intsrc.mpc_dstirq = pin; /* INTIN# */
980 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
981 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
982 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
983 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
985 mp_irqs[mp_irq_entries] = intsrc;
986 if (++mp_irq_entries == MAX_IRQ_SOURCES)
987 panic("Max # of irq sources exceeded!\n");
989 return;
990 }
992 void __init mp_config_acpi_legacy_irqs (void)
993 {
994 struct mpc_config_intsrc intsrc;
995 int i = 0;
996 int ioapic = -1;
998 /*
999 * Fabricate the legacy ISA bus (bus #31).
1000 */
1001 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
1002 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
1004 /*
1005 * Locate the IOAPIC that manages the ISA IRQs (0-15).
1006 */
1007 ioapic = mp_find_ioapic(0);
1008 if (ioapic < 0)
1009 return;
1011 intsrc.mpc_type = MP_INTSRC;
1012 intsrc.mpc_irqflag = 0; /* Conforming */
1013 intsrc.mpc_srcbus = MP_ISA_BUS;
1014 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
1016 /*
1017 * Use the default configuration for the IRQs 0-15. Unless
1018 * overriden by (MADT) interrupt source override entries.
1019 */
1020 for (i = 0; i < 16; i++) {
1021 int idx;
1023 for (idx = 0; idx < mp_irq_entries; idx++) {
1024 struct mpc_config_intsrc *irq = mp_irqs + idx;
1026 /* Do we already have a mapping for this ISA IRQ? */
1027 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
1028 break;
1030 /* Do we already have a mapping for this IOAPIC pin */
1031 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1032 (irq->mpc_dstirq == i))
1033 break;
1036 if (idx != mp_irq_entries) {
1037 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1038 continue; /* IRQ already used */
1041 intsrc.mpc_irqtype = mp_INT;
1042 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1043 intsrc.mpc_dstirq = i;
1045 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1046 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1047 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1048 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1049 intsrc.mpc_dstirq);
1051 mp_irqs[mp_irq_entries] = intsrc;
1052 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1053 panic("Max # of irq sources exceeded!\n");
1057 #define MAX_GSI_NUM 4096
1059 int mp_register_gsi (u32 gsi, int triggering, int polarity)
1061 int ioapic = -1;
1062 int ioapic_pin = 0;
1063 int idx, bit = 0;
1064 static int pci_irq = 16;
1065 /*
1066 * Mapping between Global System Interrups, which
1067 * represent all possible interrupts, and IRQs
1068 * assigned to actual devices.
1069 */
1070 static int gsi_to_irq[MAX_GSI_NUM];
1072 #ifdef CONFIG_ACPI_BUS
1073 /* Don't set up the ACPI SCI because it's already set up */
1074 if (acpi_fadt.sci_int == gsi)
1075 return gsi;
1076 #endif
1078 ioapic = mp_find_ioapic(gsi);
1079 if (ioapic < 0) {
1080 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1081 return gsi;
1084 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1086 if (ioapic_renumber_irq)
1087 gsi = ioapic_renumber_irq(ioapic, gsi);
1089 /*
1090 * Avoid pin reprogramming. PRTs typically include entries
1091 * with redundant pin->gsi mappings (but unique PCI devices);
1092 * we only program the IOAPIC on the first.
1093 */
1094 bit = ioapic_pin % 32;
1095 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1096 if (idx > 3) {
1097 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1098 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1099 ioapic_pin);
1100 return gsi;
1102 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1103 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1104 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1105 return gsi_to_irq[gsi];
1108 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1110 if (triggering == ACPI_LEVEL_SENSITIVE) {
1111 /*
1112 * For PCI devices assign IRQs in order, avoiding gaps
1113 * due to unused I/O APIC pins.
1114 */
1115 int irq = gsi;
1116 if (gsi < MAX_GSI_NUM) {
1117 /*
1118 * Retain the VIA chipset work-around (gsi > 15), but
1119 * avoid a problem where the 8254 timer (IRQ0) is setup
1120 * via an override (so it's not on pin 0 of the ioapic),
1121 * and at the same time, the pin 0 interrupt is a PCI
1122 * type. The gsi > 15 test could cause these two pins
1123 * to be shared as IRQ0, and they are not shareable.
1124 * So test for this condition, and if necessary, avoid
1125 * the pin collision.
1126 */
1127 if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
1128 gsi = pci_irq++;
1129 #ifdef CONFIG_ACPI_BUS
1130 /*
1131 * Don't assign IRQ used by ACPI SCI
1132 */
1133 if (gsi == acpi_fadt.sci_int)
1134 gsi = pci_irq++;
1135 #endif
1136 gsi_to_irq[irq] = gsi;
1137 } else {
1138 printk(KERN_ERR "GSI %u is too high\n", gsi);
1139 return gsi;
1143 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1144 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1145 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1146 return gsi;
1149 #endif /* CONFIG_X86_IO_APIC */
1150 #endif /* CONFIG_ACPI */