ia64/xen-unstable

view xen/arch/x86/acpi/boot.c @ 17577:ccbbe6fe5827

Fix the address calculation of acpi enable reg according to ACPI spec.

Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon May 05 10:16:58 2008 +0100 (2008-05-05)
parents bb2301b33760
children 29dc52031954
line source
1 /*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
26 #include <xen/config.h>
27 #include <xen/errno.h>
28 #include <xen/init.h>
29 #include <xen/acpi.h>
30 #include <xen/irq.h>
31 #include <xen/dmi.h>
32 #include <asm/fixmap.h>
33 #include <asm/page.h>
34 #include <asm/apic.h>
35 #include <asm/io_apic.h>
36 #include <asm/apic.h>
37 #include <asm/io.h>
38 #include <asm/mpspec.h>
39 #include <asm/processor.h>
40 #include <mach_apic.h>
41 #include <mach_mpparse.h>
43 int sbf_port;
44 #define CONFIG_ACPI_PCI
46 #define BAD_MADT_ENTRY(entry, end) ( \
47 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
48 ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
50 #define PREFIX "ACPI: "
52 #ifdef CONFIG_ACPI_PCI
53 int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
54 int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
55 #else
56 int acpi_noirq __initdata = 1;
57 int acpi_pci_disabled __initdata = 1;
58 #endif
59 int acpi_ht __initdata = 1; /* enable HT */
61 int acpi_lapic;
62 int acpi_ioapic;
63 int acpi_strict;
64 EXPORT_SYMBOL(acpi_strict);
66 u8 acpi_sci_flags __initdata;
67 int acpi_sci_override_gsi __initdata;
68 int acpi_skip_timer_override __initdata;
70 #ifdef CONFIG_X86_LOCAL_APIC
71 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
72 #endif
74 u32 acpi_smi_cmd;
75 u8 acpi_enable_value, acpi_disable_value;
77 #ifndef __HAVE_ARCH_CMPXCHG
78 #warning ACPI uses CMPXCHG, i486 and later hardware
79 #endif
81 #define MAX_MADT_ENTRIES 256
82 u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
83 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
84 EXPORT_SYMBOL(x86_acpiid_to_apicid);
86 /* --------------------------------------------------------------------------
87 Boot-time Configuration
88 -------------------------------------------------------------------------- */
90 /*
91 * The default interrupt routing model is PIC (8259). This gets
92 * overriden if IOAPICs are enumerated (below).
93 */
94 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
96 /*
97 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
98 * to map the target physical address. The problem is that set_fixmap()
99 * provides a single page, and it is possible that the page is not
100 * sufficient.
101 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
102 * i.e. until the next __va_range() call.
103 *
104 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
105 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
106 * count idx down while incrementing the phys address.
107 */
108 char *__acpi_map_table(unsigned long phys, unsigned long size)
109 {
110 unsigned long base, offset, mapped_size;
111 int idx;
113 /* XEN: RAM holes above 1MB are not permanently mapped. */
114 if (phys + size < 1 * 1024 * 1024)
115 return __va(phys);
117 offset = phys & (PAGE_SIZE - 1);
118 mapped_size = PAGE_SIZE - offset;
119 set_fixmap(FIX_ACPI_END, phys);
120 base = fix_to_virt(FIX_ACPI_END);
122 /*
123 * Most cases can be covered by the below.
124 */
125 idx = FIX_ACPI_END;
126 while (mapped_size < size) {
127 if (--idx < FIX_ACPI_BEGIN)
128 return NULL; /* cannot handle this */
129 phys += PAGE_SIZE;
130 set_fixmap(idx, phys);
131 mapped_size += PAGE_SIZE;
132 }
134 return ((char *) base + offset);
135 }
137 #ifdef CONFIG_X86_LOCAL_APIC
138 static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
139 {
140 struct acpi_table_madt *madt = NULL;
142 if (!phys_addr || !size)
143 return -EINVAL;
145 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
146 if (!madt) {
147 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
148 return -ENODEV;
149 }
151 if (madt->address) {
152 acpi_lapic_addr = (u64) madt->address;
154 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
155 madt->address);
156 }
158 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
160 return 0;
161 }
163 static int __init
164 acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
165 {
166 struct acpi_table_lapic *processor = NULL;
168 processor = (struct acpi_table_lapic *)header;
170 if (BAD_MADT_ENTRY(processor, end))
171 return -EINVAL;
173 acpi_table_print_madt_entry(header);
175 /* Record local apic id only when enabled */
176 if (processor->flags.enabled)
177 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
179 /*
180 * We need to register disabled CPU as well to permit
181 * counting disabled CPUs. This allows us to size
182 * cpus_possible_map more accurately, to permit
183 * to not preallocating memory for all NR_CPUS
184 * when we use CPU hotplug.
185 */
186 mp_register_lapic(processor->id, /* APIC ID */
187 processor->flags.enabled); /* Enabled? */
189 return 0;
190 }
192 static int __init
193 acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
194 const unsigned long end)
195 {
196 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
198 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
200 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
201 return -EINVAL;
203 acpi_lapic_addr = lapic_addr_ovr->address;
205 return 0;
206 }
208 static int __init
209 acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
210 {
211 struct acpi_table_lapic_nmi *lapic_nmi = NULL;
213 lapic_nmi = (struct acpi_table_lapic_nmi *)header;
215 if (BAD_MADT_ENTRY(lapic_nmi, end))
216 return -EINVAL;
218 acpi_table_print_madt_entry(header);
220 if (lapic_nmi->lint != 1)
221 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
223 return 0;
224 }
226 #endif /*CONFIG_X86_LOCAL_APIC */
228 #if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/
230 static int __init
231 acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
232 {
233 struct acpi_table_ioapic *ioapic = NULL;
235 ioapic = (struct acpi_table_ioapic *)header;
237 if (BAD_MADT_ENTRY(ioapic, end))
238 return -EINVAL;
240 acpi_table_print_madt_entry(header);
242 mp_register_ioapic(ioapic->id,
243 ioapic->address, ioapic->global_irq_base);
245 return 0;
246 }
248 static int __init
249 acpi_parse_int_src_ovr(acpi_table_entry_header * header,
250 const unsigned long end)
251 {
252 struct acpi_table_int_src_ovr *intsrc = NULL;
254 intsrc = (struct acpi_table_int_src_ovr *)header;
256 if (BAD_MADT_ENTRY(intsrc, end))
257 return -EINVAL;
259 acpi_table_print_madt_entry(header);
261 if (acpi_skip_timer_override &&
262 intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
263 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
264 return 0;
265 }
267 mp_override_legacy_irq(intsrc->bus_irq,
268 intsrc->flags.polarity,
269 intsrc->flags.trigger, intsrc->global_irq);
271 return 0;
272 }
274 static int __init
275 acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
276 {
277 struct acpi_table_nmi_src *nmi_src = NULL;
279 nmi_src = (struct acpi_table_nmi_src *)header;
281 if (BAD_MADT_ENTRY(nmi_src, end))
282 return -EINVAL;
284 acpi_table_print_madt_entry(header);
286 /* TBD: Support nimsrc entries? */
288 return 0;
289 }
291 #endif /* CONFIG_X86_IO_APIC */
293 static unsigned long __init
294 acpi_scan_rsdp(unsigned long start, unsigned long length)
295 {
296 unsigned long offset = 0;
297 unsigned long sig_len = sizeof("RSD PTR ") - 1;
299 /*
300 * Scan all 16-byte boundaries of the physical memory region for the
301 * RSDP signature.
302 */
303 for (offset = 0; offset < length; offset += 16) {
304 if (strncmp((char *)(start + offset), "RSD PTR ", sig_len))
305 continue;
306 return (start + offset);
307 }
309 return 0;
310 }
312 static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
313 {
314 struct acpi_table_sbf *sb;
316 if (!phys_addr || !size)
317 return -EINVAL;
319 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
320 if (!sb) {
321 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
322 return -ENODEV;
323 }
325 sbf_port = sb->sbf_cmos; /* Save CMOS port */
327 return 0;
328 }
330 #ifdef CONFIG_HPET_TIMER
332 static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
333 {
334 struct acpi_table_hpet *hpet_tbl;
336 if (!phys || !size)
337 return -EINVAL;
339 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
340 if (!hpet_tbl) {
341 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
342 return -ENODEV;
343 }
345 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
346 printk(KERN_WARNING PREFIX "HPET timers must be located in "
347 "memory.\n");
348 return -1;
349 }
351 #if 0/*def CONFIG_X86_64*/
352 vxtime.hpet_address = hpet_tbl->address.address;
354 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
355 hpet_tbl->id, vxtime.hpet_address);
356 #else /* X86 */
357 {
358 extern unsigned long hpet_address;
360 hpet_address = hpet_tbl->address.address;
361 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
362 hpet_tbl->id, hpet_address);
363 }
364 #endif /* X86 */
366 return 0;
367 }
368 #else
369 #define acpi_parse_hpet NULL
370 #endif
372 #ifdef CONFIG_X86_PM_TIMER
373 extern u32 pmtmr_ioport;
374 #endif
376 #ifdef CONFIG_ACPI_SLEEP
377 #define acpi_fadt_copy_address(dst, src, len) do { \
378 if (fadt->header.revision >= FADT2_REVISION_ID) \
379 acpi_sinfo.dst##_blk = fadt->x##src##_block; \
380 if (!acpi_sinfo.dst##_blk.address) { \
381 acpi_sinfo.dst##_blk.address = fadt->src##_block; \
382 acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \
383 acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \
384 acpi_sinfo.dst##_blk.bit_offset = 0; \
385 acpi_sinfo.dst##_blk.access_width = 0; \
386 } \
387 } while (0)
389 /* Get pm1x_cnt and pm1x_evt information for ACPI sleep */
390 static void __init
391 acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt)
392 {
393 struct acpi_table_rsdp *rsdp;
394 unsigned long rsdp_phys;
395 struct acpi_table_facs *facs = NULL;
396 uint64_t facs_pa;
398 rsdp_phys = acpi_find_rsdp();
399 if (!rsdp_phys || acpi_disabled)
400 goto bad;
401 rsdp = __va(rsdp_phys);
403 acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control);
404 acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control);
405 acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event);
406 acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event);
408 printk(KERN_INFO PREFIX
409 "ACPI SLEEP INFO: pm1x_cnt[%"PRIx64",%"PRIx64"], "
410 "pm1x_evt[%"PRIx64",%"PRIx64"]\n",
411 acpi_sinfo.pm1a_cnt_blk.address,
412 acpi_sinfo.pm1b_cnt_blk.address,
413 acpi_sinfo.pm1a_evt_blk.address,
414 acpi_sinfo.pm1b_evt_blk.address);
416 /* Now FACS... */
417 if (fadt->header.revision >= FADT2_REVISION_ID)
418 facs_pa = fadt->Xfacs;
419 else
420 facs_pa = (uint64_t)fadt->facs;
422 facs = (struct acpi_table_facs *)
423 __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs));
424 if (!facs)
425 goto bad;
427 if (strncmp(facs->signature, "FACS", 4)) {
428 printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
429 facs->signature);
430 goto bad;
431 }
433 if (facs->length < 24) {
434 printk(KERN_ERR PREFIX "Invalid FACS table length: 0x%x",
435 facs->length);
436 goto bad;
437 }
439 if (facs->length < 64)
440 printk(KERN_WARNING PREFIX
441 "FACS is shorter than ACPI spec allow: 0x%x",
442 facs->length);
444 if ((rsdp->revision < 2) || (facs->length < 32)) {
445 acpi_sinfo.wakeup_vector = facs_pa +
446 offsetof(struct acpi_table_facs,
447 firmware_waking_vector);
448 acpi_sinfo.vector_width = 32;
449 } else {
450 acpi_sinfo.wakeup_vector = facs_pa +
451 offsetof(struct acpi_table_facs,
452 xfirmware_waking_vector);
453 acpi_sinfo.vector_width = 64;
454 }
456 printk(KERN_INFO PREFIX
457 " wakeup_vec[%"PRIx64"], vec_size[%x]\n",
458 acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
459 return;
460 bad:
461 memset(&acpi_sinfo, 0, sizeof(acpi_sinfo));
462 }
463 #endif
465 static void __init
466 acpi_fadt_parse_reg(struct acpi_table_fadt *fadt)
467 {
468 unsigned int len;
470 len = min_t(unsigned int, fadt->header.length, sizeof(*fadt));
471 memcpy(&acpi_gbl_FADT, fadt, len);
473 if (len > offsetof(struct acpi_table_fadt, xpm1b_event_block)) {
474 memcpy(&acpi_gbl_xpm1a_enable, &fadt->xpm1a_event_block,
475 sizeof(acpi_gbl_xpm1a_enable));
476 memcpy(&acpi_gbl_xpm1b_enable, &fadt->xpm1b_event_block,
477 sizeof(acpi_gbl_xpm1b_enable));
479 acpi_gbl_xpm1a_enable.address +=
480 acpi_gbl_FADT.pm1_event_length / 2;
481 if ( acpi_gbl_xpm1b_enable.address )
482 acpi_gbl_xpm1b_enable.address +=
483 acpi_gbl_FADT.pm1_event_length / 2;
484 }
485 }
487 static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
488 {
489 struct acpi_table_fadt *fadt = NULL;
491 fadt = (struct acpi_table_fadt *)__acpi_map_table(phys, size);
492 if (!fadt) {
493 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
494 return 0;
495 }
497 #ifdef CONFIG_ACPI_INTERPRETER
498 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
499 acpi_fadt.sci_int = fadt->sci_int;
501 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
502 acpi_fadt.revision = fadt->revision;
503 acpi_fadt.force_apic_physical_destination_mode =
504 fadt->force_apic_physical_destination_mode;
505 #endif
507 #ifdef CONFIG_X86_PM_TIMER
508 /* detect the location of the ACPI PM Timer */
509 if (fadt->header.revision >= FADT2_REVISION_ID) {
510 /* FADT rev. 2 */
511 if (fadt->xpm_timer_block.space_id ==
512 ACPI_ADR_SPACE_SYSTEM_IO)
513 pmtmr_ioport = fadt->xpm_timer_block.address;
514 /*
515 * "X" fields are optional extensions to the original V1.0
516 * fields, so we must selectively expand V1.0 fields if the
517 * corresponding X field is zero.
518 */
519 if (!pmtmr_ioport)
520 pmtmr_ioport = fadt->pm_timer_block;
521 } else {
522 /* FADT rev. 1 */
523 pmtmr_ioport = fadt->pm_timer_block;
524 }
525 if (pmtmr_ioport)
526 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
527 pmtmr_ioport);
528 #endif
530 acpi_smi_cmd = fadt->smi_command;
531 acpi_enable_value = fadt->acpi_enable;
532 acpi_disable_value = fadt->acpi_disable;
534 acpi_fadt_parse_reg(fadt);
536 #ifdef CONFIG_ACPI_SLEEP
537 acpi_fadt_parse_sleep_info(fadt);
538 #endif
540 return 0;
541 }
543 unsigned long __init acpi_find_rsdp(void)
544 {
545 unsigned long rsdp_phys = 0;
547 #if 0
548 if (efi_enabled) {
549 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
550 return efi.acpi20;
551 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
552 return efi.acpi;
553 }
554 #endif
555 /*
556 * Scan memory looking for the RSDP signature. First search EBDA (low
557 * memory) paragraphs and then search upper memory (E0000-FFFFF).
558 */
559 rsdp_phys = acpi_scan_rsdp(0, 0x400);
560 if (!rsdp_phys)
561 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
563 return rsdp_phys;
564 }
566 #ifdef CONFIG_X86_LOCAL_APIC
567 /*
568 * Parse LAPIC entries in MADT
569 * returns 0 on success, < 0 on error
570 */
571 static int __init acpi_parse_madt_lapic_entries(void)
572 {
573 int count;
575 if (!cpu_has_apic)
576 return -ENODEV;
578 /*
579 * Note that the LAPIC address is obtained from the MADT (32-bit value)
580 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
581 */
583 count =
584 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
585 acpi_parse_lapic_addr_ovr, 0);
586 if (count < 0) {
587 printk(KERN_ERR PREFIX
588 "Error parsing LAPIC address override entry\n");
589 return count;
590 }
592 mp_register_lapic_address(acpi_lapic_addr);
594 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
595 MAX_APICS);
596 if (!count) {
597 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
598 /* TBD: Cleanup to allow fallback to MPS */
599 return -ENODEV;
600 } else if (count < 0) {
601 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
602 /* TBD: Cleanup to allow fallback to MPS */
603 return count;
604 }
606 count =
607 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
608 if (count < 0) {
609 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
610 /* TBD: Cleanup to allow fallback to MPS */
611 return count;
612 }
613 return 0;
614 }
615 #endif /* CONFIG_X86_LOCAL_APIC */
617 #if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/
618 /*
619 * Parse IOAPIC related entries in MADT
620 * returns 0 on success, < 0 on error
621 */
622 static int __init acpi_parse_madt_ioapic_entries(void)
623 {
624 int count;
626 /*
627 * ACPI interpreter is required to complete interrupt setup,
628 * so if it is off, don't enumerate the io-apics with ACPI.
629 * If MPS is present, it will handle them,
630 * otherwise the system will stay in PIC mode
631 */
632 if (acpi_disabled || acpi_noirq) {
633 return -ENODEV;
634 }
636 if (!cpu_has_apic)
637 return -ENODEV;
639 /*
640 * if "noapic" boot option, don't look for IO-APICs
641 */
642 if (skip_ioapic_setup) {
643 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
644 "due to 'noapic' option.\n");
645 return -ENODEV;
646 }
648 count =
649 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
650 MAX_IO_APICS);
651 if (!count) {
652 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
653 return -ENODEV;
654 } else if (count < 0) {
655 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
656 return count;
657 }
659 count =
660 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
661 NR_IRQ_VECTORS);
662 if (count < 0) {
663 printk(KERN_ERR PREFIX
664 "Error parsing interrupt source overrides entry\n");
665 /* TBD: Cleanup to allow fallback to MPS */
666 return count;
667 }
669 #ifdef CONFIG_ACPI_INTERPRETER
670 /*
671 * If BIOS did not supply an INT_SRC_OVR for the SCI
672 * pretend we got one so we can set the SCI flags.
673 */
674 if (!acpi_sci_override_gsi)
675 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
676 #endif
678 /* Fill in identity legacy mapings where no override */
679 mp_config_acpi_legacy_irqs();
681 count =
682 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
683 NR_IRQ_VECTORS);
684 if (count < 0) {
685 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
686 /* TBD: Cleanup to allow fallback to MPS */
687 return count;
688 }
690 return 0;
691 }
692 #else
693 static inline int acpi_parse_madt_ioapic_entries(void)
694 {
695 return -1;
696 }
697 #endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */
700 static void __init acpi_process_madt(void)
701 {
702 #ifdef CONFIG_X86_LOCAL_APIC
703 int count, error;
705 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
706 if (count >= 1) {
708 /*
709 * Parse MADT LAPIC entries
710 */
711 error = acpi_parse_madt_lapic_entries();
712 if (!error) {
713 acpi_lapic = 1;
714 generic_bigsmp_probe();
716 /*
717 * Parse MADT IO-APIC entries
718 */
719 error = acpi_parse_madt_ioapic_entries();
720 if (!error) {
721 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
722 acpi_irq_balance_set(NULL);
723 acpi_ioapic = 1;
725 smp_found_config = 1;
726 clustered_apic_check();
727 }
728 }
729 if (error == -EINVAL) {
730 /*
731 * Dell Precision Workstation 410, 610 come here.
732 */
733 printk(KERN_ERR PREFIX
734 "Invalid BIOS MADT, disabling ACPI\n");
735 disable_acpi();
736 }
737 }
738 #endif
739 return;
740 }
742 extern int acpi_force;
744 #ifdef __i386__
746 static int __init disable_acpi_irq(struct dmi_system_id *d)
747 {
748 if (!acpi_force) {
749 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
750 d->ident);
751 acpi_noirq_set();
752 }
753 return 0;
754 }
756 static int __init disable_acpi_pci(struct dmi_system_id *d)
757 {
758 if (!acpi_force) {
759 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
760 d->ident);
761 /*acpi_disable_pci();*/
762 }
763 return 0;
764 }
766 static int __init dmi_disable_acpi(struct dmi_system_id *d)
767 {
768 if (!acpi_force) {
769 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
770 disable_acpi();
771 } else {
772 printk(KERN_NOTICE
773 "Warning: DMI blacklist says broken, but acpi forced\n");
774 }
775 return 0;
776 }
778 /*
779 * Limit ACPI to CPU enumeration for HT
780 */
781 static int __init force_acpi_ht(struct dmi_system_id *d)
782 {
783 if (!acpi_force) {
784 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
785 d->ident);
786 disable_acpi();
787 acpi_ht = 1;
788 } else {
789 printk(KERN_NOTICE
790 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
791 }
792 return 0;
793 }
795 /*
796 * If your system is blacklisted here, but you find that acpi=force
797 * works for you, please contact acpi-devel@sourceforge.net
798 */
799 static struct dmi_system_id __initdata acpi_dmi_table[] = {
800 /*
801 * Boxes that need ACPI disabled
802 */
803 {
804 .callback = dmi_disable_acpi,
805 .ident = "IBM Thinkpad",
806 .matches = {
807 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
808 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
809 },
810 },
812 /*
813 * Boxes that need acpi=ht
814 */
815 {
816 .callback = force_acpi_ht,
817 .ident = "FSC Primergy T850",
818 .matches = {
819 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
820 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
821 },
822 },
823 {
824 .callback = force_acpi_ht,
825 .ident = "DELL GX240",
826 .matches = {
827 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
828 DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
829 },
830 },
831 {
832 .callback = force_acpi_ht,
833 .ident = "HP VISUALIZE NT Workstation",
834 .matches = {
835 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
836 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
837 },
838 },
839 {
840 .callback = force_acpi_ht,
841 .ident = "Compaq Workstation W8000",
842 .matches = {
843 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
844 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
845 },
846 },
847 {
848 .callback = force_acpi_ht,
849 .ident = "ASUS P4B266",
850 .matches = {
851 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
852 DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
853 },
854 },
855 {
856 .callback = force_acpi_ht,
857 .ident = "ASUS P2B-DS",
858 .matches = {
859 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
860 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
861 },
862 },
863 {
864 .callback = force_acpi_ht,
865 .ident = "ASUS CUR-DLS",
866 .matches = {
867 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
868 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
869 },
870 },
871 {
872 .callback = force_acpi_ht,
873 .ident = "ABIT i440BX-W83977",
874 .matches = {
875 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
876 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
877 },
878 },
879 {
880 .callback = force_acpi_ht,
881 .ident = "IBM Bladecenter",
882 .matches = {
883 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
884 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
885 },
886 },
887 {
888 .callback = force_acpi_ht,
889 .ident = "IBM eServer xSeries 360",
890 .matches = {
891 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
892 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
893 },
894 },
895 {
896 .callback = force_acpi_ht,
897 .ident = "IBM eserver xSeries 330",
898 .matches = {
899 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
900 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
901 },
902 },
903 {
904 .callback = force_acpi_ht,
905 .ident = "IBM eserver xSeries 440",
906 .matches = {
907 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
908 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
909 },
910 },
912 /*
913 * Boxes that need ACPI PCI IRQ routing disabled
914 */
915 {
916 .callback = disable_acpi_irq,
917 .ident = "ASUS A7V",
918 .matches = {
919 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
920 DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
921 /* newer BIOS, Revision 1011, does work */
922 DMI_MATCH(DMI_BIOS_VERSION,
923 "ASUS A7V ACPI BIOS Revision 1007"),
924 },
925 },
927 /*
928 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
929 */
930 { /* _BBN 0 bug */
931 .callback = disable_acpi_pci,
932 .ident = "ASUS PR-DLS",
933 .matches = {
934 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
935 DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
936 DMI_MATCH(DMI_BIOS_VERSION,
937 "ASUS PR-DLS ACPI BIOS Revision 1010"),
938 DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
939 },
940 },
941 {
942 .callback = disable_acpi_pci,
943 .ident = "Acer TravelMate 36x Laptop",
944 .matches = {
945 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
946 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
947 },
948 },
949 {}
950 };
952 #endif /* __i386__ */
954 /*
955 * acpi_boot_table_init() and acpi_boot_init()
956 * called from setup_arch(), always.
957 * 1. checksums all tables
958 * 2. enumerates lapics
959 * 3. enumerates io-apics
960 *
961 * acpi_table_init() is separate to allow reading SRAT without
962 * other side effects.
963 *
964 * side effects of acpi_boot_init:
965 * acpi_lapic = 1 if LAPIC found
966 * acpi_ioapic = 1 if IOAPIC found
967 * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
968 * if acpi_blacklisted() acpi_disabled = 1;
969 * acpi_irq_model=...
970 * ...
971 *
972 * return value: (currently ignored)
973 * 0: success
974 * !0: failure
975 */
977 int __init acpi_boot_table_init(void)
978 {
979 int error;
981 #ifdef __i386__
982 dmi_check_system(acpi_dmi_table);
983 #endif
985 /*
986 * If acpi_disabled, bail out
987 * One exception: acpi=ht continues far enough to enumerate LAPICs
988 */
989 if (acpi_disabled && !acpi_ht)
990 return 1;
992 /*
993 * Initialize the ACPI boot-time table parser.
994 */
995 error = acpi_table_init();
996 if (error) {
997 disable_acpi();
998 return error;
999 }
1001 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1003 /*
1004 * blacklist may disable ACPI entirely
1005 */
1006 error = acpi_blacklisted();
1007 if (error) {
1008 extern int acpi_force;
1010 if (acpi_force) {
1011 printk(KERN_WARNING PREFIX "acpi=force override\n");
1012 } else {
1013 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1014 disable_acpi();
1015 return error;
1019 return 0;
1022 int __init acpi_boot_init(void)
1024 /*
1025 * If acpi_disabled, bail out
1026 * One exception: acpi=ht continues far enough to enumerate LAPICs
1027 */
1028 if (acpi_disabled && !acpi_ht)
1029 return 1;
1031 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1033 /*
1034 * set sci_int and PM timer address
1035 */
1036 acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
1038 /*
1039 * Process the Multiple APIC Description Table (MADT), if present
1040 */
1041 acpi_process_madt();
1043 acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
1045 acpi_dmar_init();
1047 return 0;
1050 unsigned int acpi_get_processor_id(unsigned int cpu)
1052 unsigned int acpiid, apicid;
1054 if ((apicid = x86_cpu_to_apicid[cpu]) == 0xff)
1055 return 0xff;
1057 for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++)
1058 if (x86_acpiid_to_apicid[acpiid] == apicid)
1059 return acpiid;
1061 return 0xff;