ia64/linux-2.6.18-xen.hg

view arch/x86_64/kernel/setup-xen.c @ 526:f128849f9e78

linux/x86: set up machine_e820 in copy_e820_map() for it to be usable
earlier (and also for being placed more logically).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Apr 22 14:41:51 2008 +0100 (2008-04-22)
parents 17a1a39df565
children 6d9e4b02bba4
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 */
10 /*
11 * This file handles the architecture-dependent parts of initialization
12 */
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/screen_info.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/initrd.h>
29 #include <linux/highmem.h>
30 #include <linux/bootmem.h>
31 #include <linux/module.h>
32 #include <asm/processor.h>
33 #include <linux/console.h>
34 #include <linux/seq_file.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/pci.h>
38 #include <linux/acpi.h>
39 #include <linux/kallsyms.h>
40 #include <linux/edd.h>
41 #include <linux/mmzone.h>
42 #include <linux/kexec.h>
43 #include <linux/cpufreq.h>
44 #include <linux/dmi.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/ctype.h>
48 #include <asm/mtrr.h>
49 #include <asm/uaccess.h>
50 #include <asm/system.h>
51 #include <asm/io.h>
52 #include <asm/smp.h>
53 #include <asm/msr.h>
54 #include <asm/desc.h>
55 #include <video/edid.h>
56 #include <asm/e820.h>
57 #include <asm/dma.h>
58 #include <asm/mpspec.h>
59 #include <asm/mmu_context.h>
60 #include <asm/bootsetup.h>
61 #include <asm/proto.h>
62 #include <asm/setup.h>
63 #include <asm/mach_apic.h>
64 #include <asm/numa.h>
65 #include <asm/sections.h>
66 #include <asm/dmi.h>
67 #ifdef CONFIG_XEN
68 #include <linux/percpu.h>
69 #include <xen/interface/physdev.h>
70 #include "setup_arch_pre.h"
71 #include <asm/hypervisor.h>
72 #include <xen/interface/nmi.h>
73 #include <xen/features.h>
74 #include <xen/firmware.h>
75 #include <xen/xencons.h>
76 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
77 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
78 #include <asm/mach-xen/setup_arch_post.h>
79 #include <xen/interface/memory.h>
81 #ifdef CONFIG_XEN
82 #include <xen/interface/kexec.h>
83 #endif
85 extern unsigned long start_pfn;
86 extern struct edid_info edid_info;
88 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
89 EXPORT_SYMBOL(HYPERVISOR_shared_info);
91 extern char hypercall_page[PAGE_SIZE];
92 EXPORT_SYMBOL(hypercall_page);
94 static int xen_panic_event(struct notifier_block *, unsigned long, void *);
95 static struct notifier_block xen_panic_block = {
96 xen_panic_event, NULL, 0 /* try to go last */
97 };
99 unsigned long *phys_to_machine_mapping;
100 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
102 EXPORT_SYMBOL(phys_to_machine_mapping);
104 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
105 DEFINE_PER_CPU(int, nr_multicall_ents);
107 /* Raw start-of-day parameters from the hypervisor. */
108 start_info_t *xen_start_info;
109 EXPORT_SYMBOL(xen_start_info);
110 #endif
112 /*
113 * Machine setup..
114 */
116 struct cpuinfo_x86 boot_cpu_data __read_mostly;
117 EXPORT_SYMBOL(boot_cpu_data);
119 unsigned long mmu_cr4_features;
121 int acpi_disabled;
122 EXPORT_SYMBOL(acpi_disabled);
123 #ifdef CONFIG_ACPI
124 extern int __initdata acpi_ht;
125 extern acpi_interrupt_flags acpi_sci_flags;
126 int __initdata acpi_force = 0;
127 #endif
129 int acpi_numa __initdata;
131 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
132 int bootloader_type;
134 unsigned long saved_video_mode;
136 /*
137 * Early DMI memory
138 */
139 int dmi_alloc_index;
140 char dmi_alloc_data[DMI_MAX_DATA];
142 /*
143 * Setup options
144 */
145 struct screen_info screen_info;
146 EXPORT_SYMBOL(screen_info);
147 struct sys_desc_table_struct {
148 unsigned short length;
149 unsigned char table[0];
150 };
152 struct edid_info edid_info;
153 EXPORT_SYMBOL_GPL(edid_info);
154 struct e820map e820;
155 #ifdef CONFIG_XEN
156 struct e820map machine_e820;
157 #endif
159 extern int root_mountflags;
161 char command_line[COMMAND_LINE_SIZE];
163 struct resource standard_io_resources[] = {
164 { .name = "dma1", .start = 0x00, .end = 0x1f,
165 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
166 { .name = "pic1", .start = 0x20, .end = 0x21,
167 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
168 { .name = "timer0", .start = 0x40, .end = 0x43,
169 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
170 { .name = "timer1", .start = 0x50, .end = 0x53,
171 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
172 { .name = "keyboard", .start = 0x60, .end = 0x6f,
173 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
174 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
175 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
176 { .name = "pic2", .start = 0xa0, .end = 0xa1,
177 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
178 { .name = "dma2", .start = 0xc0, .end = 0xdf,
179 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
180 { .name = "fpu", .start = 0xf0, .end = 0xff,
181 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
182 };
184 #define STANDARD_IO_RESOURCES \
185 (sizeof standard_io_resources / sizeof standard_io_resources[0])
187 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
189 struct resource data_resource = {
190 .name = "Kernel data",
191 .start = 0,
192 .end = 0,
193 .flags = IORESOURCE_RAM,
194 };
195 struct resource code_resource = {
196 .name = "Kernel code",
197 .start = 0,
198 .end = 0,
199 .flags = IORESOURCE_RAM,
200 };
202 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
204 static struct resource system_rom_resource = {
205 .name = "System ROM",
206 .start = 0xf0000,
207 .end = 0xfffff,
208 .flags = IORESOURCE_ROM,
209 };
211 static struct resource extension_rom_resource = {
212 .name = "Extension ROM",
213 .start = 0xe0000,
214 .end = 0xeffff,
215 .flags = IORESOURCE_ROM,
216 };
218 static struct resource adapter_rom_resources[] = {
219 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
220 .flags = IORESOURCE_ROM },
221 { .name = "Adapter ROM", .start = 0, .end = 0,
222 .flags = IORESOURCE_ROM },
223 { .name = "Adapter ROM", .start = 0, .end = 0,
224 .flags = IORESOURCE_ROM },
225 { .name = "Adapter ROM", .start = 0, .end = 0,
226 .flags = IORESOURCE_ROM },
227 { .name = "Adapter ROM", .start = 0, .end = 0,
228 .flags = IORESOURCE_ROM },
229 { .name = "Adapter ROM", .start = 0, .end = 0,
230 .flags = IORESOURCE_ROM }
231 };
233 #define ADAPTER_ROM_RESOURCES \
234 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
236 static struct resource video_rom_resource = {
237 .name = "Video ROM",
238 .start = 0xc0000,
239 .end = 0xc7fff,
240 .flags = IORESOURCE_ROM,
241 };
243 static struct resource video_ram_resource = {
244 .name = "Video RAM area",
245 .start = 0xa0000,
246 .end = 0xbffff,
247 .flags = IORESOURCE_RAM,
248 };
250 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
252 static int __init romchecksum(unsigned char *rom, unsigned long length)
253 {
254 unsigned char *p, sum = 0;
256 for (p = rom; p < rom + length; p++)
257 sum += *p;
258 return sum == 0;
259 }
261 static void __init probe_roms(void)
262 {
263 unsigned long start, length, upper;
264 unsigned char *rom;
265 int i;
267 #ifdef CONFIG_XEN
268 /* Nothing to do if not running in dom0. */
269 if (!is_initial_xendomain())
270 return;
271 #endif
273 /* video rom */
274 upper = adapter_rom_resources[0].start;
275 for (start = video_rom_resource.start; start < upper; start += 2048) {
276 rom = isa_bus_to_virt(start);
277 if (!romsignature(rom))
278 continue;
280 video_rom_resource.start = start;
282 /* 0 < length <= 0x7f * 512, historically */
283 length = rom[2] * 512;
285 /* if checksum okay, trust length byte */
286 if (length && romchecksum(rom, length))
287 video_rom_resource.end = start + length - 1;
289 request_resource(&iomem_resource, &video_rom_resource);
290 break;
291 }
293 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
294 if (start < upper)
295 start = upper;
297 /* system rom */
298 request_resource(&iomem_resource, &system_rom_resource);
299 upper = system_rom_resource.start;
301 /* check for extension rom (ignore length byte!) */
302 rom = isa_bus_to_virt(extension_rom_resource.start);
303 if (romsignature(rom)) {
304 length = extension_rom_resource.end - extension_rom_resource.start + 1;
305 if (romchecksum(rom, length)) {
306 request_resource(&iomem_resource, &extension_rom_resource);
307 upper = extension_rom_resource.start;
308 }
309 }
311 /* check for adapter roms on 2k boundaries */
312 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
313 rom = isa_bus_to_virt(start);
314 if (!romsignature(rom))
315 continue;
317 /* 0 < length <= 0x7f * 512, historically */
318 length = rom[2] * 512;
320 /* but accept any length that fits if checksum okay */
321 if (!length || start + length > upper || !romchecksum(rom, length))
322 continue;
324 adapter_rom_resources[i].start = start;
325 adapter_rom_resources[i].end = start + length - 1;
326 request_resource(&iomem_resource, &adapter_rom_resources[i]);
328 start = adapter_rom_resources[i++].end & ~2047UL;
329 }
330 }
332 /* Check for full argument with no trailing characters */
333 static int fullarg(char *p, char *arg)
334 {
335 int l = strlen(arg);
336 return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
337 }
339 static __init void parse_cmdline_early (char ** cmdline_p)
340 {
341 char c = ' ', *to = command_line, *from = COMMAND_LINE;
342 int len = 0;
343 int userdef = 0;
345 for (;;) {
346 if (c != ' ')
347 goto next_char;
349 #ifdef CONFIG_SMP
350 /*
351 * If the BIOS enumerates physical processors before logical,
352 * maxcpus=N at enumeration-time can be used to disable HT.
353 */
354 else if (!memcmp(from, "maxcpus=", 8)) {
355 extern unsigned int maxcpus;
357 maxcpus = simple_strtoul(from + 8, NULL, 0);
358 }
359 #endif
360 #ifdef CONFIG_ACPI
361 /* "acpi=off" disables both ACPI table parsing and interpreter init */
362 if (fullarg(from,"acpi=off"))
363 disable_acpi();
365 if (fullarg(from, "acpi=force")) {
366 /* add later when we do DMI horrors: */
367 acpi_force = 1;
368 acpi_disabled = 0;
369 }
371 /* acpi=ht just means: do ACPI MADT parsing
372 at bootup, but don't enable the full ACPI interpreter */
373 if (fullarg(from, "acpi=ht")) {
374 if (!acpi_force)
375 disable_acpi();
376 acpi_ht = 1;
377 }
378 else if (fullarg(from, "pci=noacpi"))
379 acpi_disable_pci();
380 else if (fullarg(from, "acpi=noirq"))
381 acpi_noirq_set();
383 else if (fullarg(from, "acpi_sci=edge"))
384 acpi_sci_flags.trigger = 1;
385 else if (fullarg(from, "acpi_sci=level"))
386 acpi_sci_flags.trigger = 3;
387 else if (fullarg(from, "acpi_sci=high"))
388 acpi_sci_flags.polarity = 1;
389 else if (fullarg(from, "acpi_sci=low"))
390 acpi_sci_flags.polarity = 3;
392 /* acpi=strict disables out-of-spec workarounds */
393 else if (fullarg(from, "acpi=strict")) {
394 acpi_strict = 1;
395 }
396 #ifdef CONFIG_X86_IO_APIC
397 else if (fullarg(from, "acpi_skip_timer_override"))
398 acpi_skip_timer_override = 1;
399 #endif
400 #endif
402 #ifndef CONFIG_XEN
403 if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
404 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
405 disable_apic = 1;
406 }
408 if (fullarg(from, "noapic"))
409 skip_ioapic_setup = 1;
411 if (fullarg(from,"apic")) {
412 skip_ioapic_setup = 0;
413 ioapic_force = 1;
414 }
415 #endif
417 if (!memcmp(from, "mem=", 4))
418 parse_memopt(from+4, &from);
420 if (!memcmp(from, "memmap=", 7)) {
421 /* exactmap option is for used defined memory */
422 if (!memcmp(from+7, "exactmap", 8)) {
423 #ifdef CONFIG_CRASH_DUMP
424 /* If we are doing a crash dump, we
425 * still need to know the real mem
426 * size before original memory map is
427 * reset.
428 */
429 saved_max_pfn = e820_end_of_ram();
430 #endif
431 from += 8+7;
432 end_pfn_map = 0;
433 e820.nr_map = 0;
434 userdef = 1;
435 }
436 else {
437 parse_memmapopt(from+7, &from);
438 userdef = 1;
439 }
440 }
442 #ifdef CONFIG_NUMA
443 if (!memcmp(from, "numa=", 5))
444 numa_setup(from+5);
445 #endif
447 if (!memcmp(from,"iommu=",6)) {
448 iommu_setup(from+6);
449 }
451 if (fullarg(from,"oops=panic"))
452 panic_on_oops = 1;
454 if (!memcmp(from, "noexec=", 7))
455 nonx_setup(from + 7);
457 #ifdef CONFIG_KEXEC
458 /* crashkernel=size@addr specifies the location to reserve for
459 * a crash kernel. By reserving this memory we guarantee
460 * that linux never set's it up as a DMA target.
461 * Useful for holding code to do something appropriate
462 * after a kernel panic.
463 */
464 else if (!memcmp(from, "crashkernel=", 12)) {
465 #ifndef CONFIG_XEN
466 unsigned long size, base;
467 size = memparse(from+12, &from);
468 if (*from == '@') {
469 base = memparse(from+1, &from);
470 /* FIXME: Do I want a sanity check
471 * to validate the memory range?
472 */
473 crashk_res.start = base;
474 crashk_res.end = base + size - 1;
475 }
476 #else
477 printk("Ignoring crashkernel command line, "
478 "parameter will be supplied by xen\n");
479 #endif
480 }
481 #endif
483 #ifdef CONFIG_PROC_VMCORE
484 /* elfcorehdr= specifies the location of elf core header
485 * stored by the crashed kernel. This option will be passed
486 * by kexec loader to the capture kernel.
487 */
488 else if(!memcmp(from, "elfcorehdr=", 11))
489 elfcorehdr_addr = memparse(from+11, &from);
490 #endif
492 #if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
493 else if (!memcmp(from, "additional_cpus=", 16))
494 setup_additional_cpus(from+16);
495 #endif
497 next_char:
498 c = *(from++);
499 if (!c)
500 break;
501 if (COMMAND_LINE_SIZE <= ++len)
502 break;
503 *(to++) = c;
504 }
505 if (userdef) {
506 printk(KERN_INFO "user-defined physical RAM map:\n");
507 e820_print_map("user");
508 }
509 *to = '\0';
510 *cmdline_p = command_line;
511 }
513 #ifndef CONFIG_NUMA
514 static void __init
515 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
516 {
517 unsigned long bootmap_size, bootmap;
519 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
520 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
521 if (bootmap == -1L)
522 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
523 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
524 #ifdef CONFIG_XEN
525 e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
526 #else
527 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
528 #endif
529 reserve_bootmem(bootmap, bootmap_size);
530 }
531 #endif
533 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
534 struct edd edd;
535 #ifdef CONFIG_EDD_MODULE
536 EXPORT_SYMBOL(edd);
537 #endif
538 #ifndef CONFIG_XEN
539 /**
540 * copy_edd() - Copy the BIOS EDD information
541 * from boot_params into a safe place.
542 *
543 */
544 static inline void copy_edd(void)
545 {
546 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
547 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
548 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
549 edd.edd_info_nr = EDD_NR;
550 }
551 #endif
552 #else
553 static inline void copy_edd(void)
554 {
555 }
556 #endif
558 #ifndef CONFIG_XEN
559 #define EBDA_ADDR_POINTER 0x40E
561 unsigned __initdata ebda_addr;
562 unsigned __initdata ebda_size;
564 static void discover_ebda(void)
565 {
566 /*
567 * there is a real-mode segmented pointer pointing to the
568 * 4K EBDA area at 0x40E
569 */
570 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
571 ebda_addr <<= 4;
573 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
575 /* Round EBDA up to pages */
576 if (ebda_size == 0)
577 ebda_size = 1;
578 ebda_size <<= 10;
579 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
580 if (ebda_size > 64*1024)
581 ebda_size = 64*1024;
582 }
583 #else
584 #define discover_ebda() ((void)0)
585 #endif
587 void __init setup_arch(char **cmdline_p)
588 {
589 #ifdef CONFIG_XEN
590 /* Register a call for panic conditions. */
591 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
593 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
594 screen_info = SCREEN_INFO;
596 if (is_initial_xendomain()) {
597 const struct dom0_vga_console_info *info =
598 (void *)((char *)xen_start_info +
599 xen_start_info->console.dom0.info_off);
601 dom0_init_screen_info(info,
602 xen_start_info->console.dom0.info_size);
603 xen_start_info->console.domU.mfn = 0;
604 xen_start_info->console.domU.evtchn = 0;
605 } else
606 screen_info.orig_video_isVGA = 0;
608 copy_edid();
610 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
611 VMASST_TYPE_writable_pagetables));
613 ARCH_SETUP
614 #else
615 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
616 screen_info = SCREEN_INFO;
617 edid_info = EDID_INFO;
618 #endif /* !CONFIG_XEN */
619 saved_video_mode = SAVED_VIDEO_MODE;
620 bootloader_type = LOADER_TYPE;
622 #ifdef CONFIG_BLK_DEV_RAM
623 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
624 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
625 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
626 #endif
627 setup_memory_region();
628 copy_edd();
630 if (!MOUNT_ROOT_RDONLY)
631 root_mountflags &= ~MS_RDONLY;
632 init_mm.start_code = (unsigned long) &_text;
633 init_mm.end_code = (unsigned long) &_etext;
634 init_mm.end_data = (unsigned long) &_edata;
635 init_mm.brk = (unsigned long) &_end;
637 code_resource.start = virt_to_phys(&_text);
638 code_resource.end = virt_to_phys(&_etext)-1;
639 data_resource.start = virt_to_phys(&_etext);
640 data_resource.end = virt_to_phys(&_edata)-1;
642 parse_cmdline_early(cmdline_p);
644 early_identify_cpu(&boot_cpu_data);
646 /*
647 * partially used pages are not usable - thus
648 * we are rounding upwards:
649 */
650 end_pfn = e820_end_of_ram();
651 num_physpages = end_pfn; /* for pfn_valid */
653 check_efer();
655 discover_ebda();
657 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
659 if (is_initial_xendomain())
660 dmi_scan_machine();
662 #ifdef CONFIG_ACPI_NUMA
663 /*
664 * Parse SRAT to discover nodes.
665 */
666 acpi_numa_init();
667 #endif
669 #ifdef CONFIG_NUMA
670 numa_initmem_init(0, end_pfn);
671 #else
672 contig_initmem_init(0, end_pfn);
673 #endif
675 #ifdef CONFIG_XEN
676 /*
677 * Reserve kernel, physmap, start info, initial page tables, and
678 * direct mapping.
679 */
680 reserve_bootmem_generic(__pa_symbol(&_text),
681 (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
682 #else
683 /* Reserve direct mapping */
684 reserve_bootmem_generic(table_start << PAGE_SHIFT,
685 (table_end - table_start) << PAGE_SHIFT);
687 /* reserve kernel */
688 reserve_bootmem_generic(__pa_symbol(&_text),
689 __pa_symbol(&_end) - __pa_symbol(&_text));
691 /*
692 * reserve physical page 0 - it's a special BIOS page on many boxes,
693 * enabling clean reboots, SMP operation, laptop functions.
694 */
695 reserve_bootmem_generic(0, PAGE_SIZE);
697 /* reserve ebda region */
698 if (ebda_addr)
699 reserve_bootmem_generic(ebda_addr, ebda_size);
701 #ifdef CONFIG_SMP
702 /*
703 * But first pinch a few for the stack/trampoline stuff
704 * FIXME: Don't need the extra page at 4K, but need to fix
705 * trampoline before removing it. (see the GDT stuff)
706 */
707 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
709 /* Reserve SMP trampoline */
710 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
711 #endif
712 #endif
714 #ifdef CONFIG_ACPI_SLEEP
715 /*
716 * Reserve low memory region for sleep support.
717 */
718 acpi_reserve_bootmem();
719 #endif
720 #ifdef CONFIG_XEN
721 #ifdef CONFIG_BLK_DEV_INITRD
722 if (xen_start_info->mod_start) {
723 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
724 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
725 initrd_start = INITRD_START + PAGE_OFFSET;
726 initrd_end = initrd_start+INITRD_SIZE;
727 initrd_below_start_ok = 1;
728 } else {
729 printk(KERN_ERR "initrd extends beyond end of memory "
730 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
731 (unsigned long)(INITRD_START + INITRD_SIZE),
732 (unsigned long)(end_pfn << PAGE_SHIFT));
733 initrd_start = 0;
734 }
735 }
736 #endif
737 #else /* CONFIG_XEN */
738 #ifdef CONFIG_BLK_DEV_INITRD
739 if (LOADER_TYPE && INITRD_START) {
740 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
741 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
742 initrd_start =
743 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
744 initrd_end = initrd_start+INITRD_SIZE;
745 }
746 else {
747 printk(KERN_ERR "initrd extends beyond end of memory "
748 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
749 (unsigned long)(INITRD_START + INITRD_SIZE),
750 (unsigned long)(end_pfn << PAGE_SHIFT));
751 initrd_start = 0;
752 }
753 }
754 #endif
755 #endif /* !CONFIG_XEN */
756 #ifdef CONFIG_KEXEC
757 #ifdef CONFIG_XEN
758 xen_machine_kexec_setup_resources();
759 #else
760 if (crashk_res.start != crashk_res.end) {
761 reserve_bootmem_generic(crashk_res.start,
762 crashk_res.end - crashk_res.start + 1);
763 }
764 #endif
765 #endif
767 paging_init();
768 #ifdef CONFIG_X86_LOCAL_APIC
769 /*
770 * Find and reserve possible boot-time SMP configuration:
771 */
772 find_smp_config();
773 #endif
774 #ifdef CONFIG_XEN
775 {
776 int i, j, k, fpp;
777 unsigned long p2m_pages;
779 p2m_pages = end_pfn;
780 if (xen_start_info->nr_pages > end_pfn) {
781 /*
782 * the end_pfn was shrunk (probably by mem= or highmem=
783 * kernel parameter); shrink reservation with the HV
784 */
785 struct xen_memory_reservation reservation = {
786 .address_bits = 0,
787 .extent_order = 0,
788 .domid = DOMID_SELF
789 };
790 unsigned int difference;
791 int ret;
793 difference = xen_start_info->nr_pages - end_pfn;
795 set_xen_guest_handle(reservation.extent_start,
796 ((unsigned long *)xen_start_info->mfn_list) + end_pfn);
797 reservation.nr_extents = difference;
798 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
799 &reservation);
800 BUG_ON (ret != difference);
801 }
802 else if (end_pfn > xen_start_info->nr_pages)
803 p2m_pages = xen_start_info->nr_pages;
805 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
806 /* Make sure we have a large enough P->M table. */
807 phys_to_machine_mapping = alloc_bootmem_pages(
808 end_pfn * sizeof(unsigned long));
809 memset(phys_to_machine_mapping, ~0,
810 end_pfn * sizeof(unsigned long));
811 memcpy(phys_to_machine_mapping,
812 (unsigned long *)xen_start_info->mfn_list,
813 p2m_pages * sizeof(unsigned long));
814 free_bootmem(
815 __pa(xen_start_info->mfn_list),
816 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
817 sizeof(unsigned long))));
819 /*
820 * Initialise the list of the frames that specify the
821 * list of frames that make up the p2m table. Used by
822 * save/restore.
823 */
824 pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
826 fpp = PAGE_SIZE/sizeof(unsigned long);
827 for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
828 if ((j % fpp) == 0) {
829 k++;
830 BUG_ON(k>=fpp);
831 pfn_to_mfn_frame_list[k] =
832 alloc_bootmem_pages(PAGE_SIZE);
833 pfn_to_mfn_frame_list_list[k] =
834 virt_to_mfn(pfn_to_mfn_frame_list[k]);
835 j=0;
836 }
837 pfn_to_mfn_frame_list[k][j] =
838 virt_to_mfn(&phys_to_machine_mapping[i]);
839 }
840 HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
841 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
842 virt_to_mfn(pfn_to_mfn_frame_list_list);
843 }
845 /* Mark all ISA DMA channels in-use - using them wouldn't work. */
846 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
847 if (i != 4 && request_dma(i, "xen") != 0)
848 BUG();
849 }
851 if (!is_initial_xendomain()) {
852 acpi_disabled = 1;
853 #ifdef CONFIG_ACPI
854 acpi_ht = 0;
855 #endif
856 }
857 #endif
859 #ifndef CONFIG_XEN
860 check_ioapic();
861 #endif
863 zap_low_mappings(0);
865 /*
866 * set this early, so we dont allocate cpu0
867 * if MADT list doesnt list BSP first
868 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
869 */
870 cpu_set(0, cpu_present_map);
871 #ifdef CONFIG_ACPI
872 /*
873 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
874 * Call this early for SRAT node setup.
875 */
876 acpi_boot_table_init();
878 /*
879 * Read APIC and some other early information from ACPI tables.
880 */
881 acpi_boot_init();
882 #endif
884 init_cpu_to_node();
886 #ifdef CONFIG_X86_LOCAL_APIC
887 /*
888 * get boot-time SMP configuration:
889 */
890 if (smp_found_config)
891 get_smp_config();
892 #ifndef CONFIG_XEN
893 init_apic_mappings();
894 #endif
895 #endif
896 #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
897 prefill_possible_map();
898 #endif
900 /*
901 * Request address space for all standard RAM and ROM resources
902 * and also for regions reported as reserved by the e820.
903 */
904 probe_roms();
905 #ifdef CONFIG_XEN
906 if (is_initial_xendomain())
907 e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
908 #else
909 e820_reserve_resources(e820.map, e820.nr_map);
910 #endif
912 request_resource(&iomem_resource, &video_ram_resource);
914 {
915 unsigned i;
916 /* request I/O space for devices used on all i[345]86 PCs */
917 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
918 request_resource(&ioport_resource, &standard_io_resources[i]);
919 }
921 #ifdef CONFIG_XEN
922 if (is_initial_xendomain())
923 e820_setup_gap(machine_e820.map, machine_e820.nr_map);
924 #else
925 e820_setup_gap(e820.map, e820.nr_map);
926 #endif
928 #ifdef CONFIG_XEN
929 {
930 struct physdev_set_iopl set_iopl;
932 set_iopl.iopl = 1;
933 WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
935 if (is_initial_xendomain()) {
936 #ifdef CONFIG_VT
937 #if defined(CONFIG_VGA_CONSOLE)
938 conswitchp = &vga_con;
939 #elif defined(CONFIG_DUMMY_CONSOLE)
940 conswitchp = &dummy_con;
941 #endif
942 #endif
943 } else {
944 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
945 conswitchp = &dummy_con;
946 #endif
947 }
948 }
949 #else /* CONFIG_XEN */
951 #ifdef CONFIG_VT
952 #if defined(CONFIG_VGA_CONSOLE)
953 conswitchp = &vga_con;
954 #elif defined(CONFIG_DUMMY_CONSOLE)
955 conswitchp = &dummy_con;
956 #endif
957 #endif
959 #endif /* !CONFIG_XEN */
960 }
962 #ifdef CONFIG_XEN
963 static int
964 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
965 {
966 HYPERVISOR_shutdown(SHUTDOWN_crash);
967 /* we're never actually going to get here... */
968 return NOTIFY_DONE;
969 }
970 #endif /* !CONFIG_XEN */
973 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
974 {
975 unsigned int *v;
977 if (c->extended_cpuid_level < 0x80000004)
978 return 0;
980 v = (unsigned int *) c->x86_model_id;
981 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
982 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
983 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
984 c->x86_model_id[48] = 0;
985 return 1;
986 }
989 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
990 {
991 unsigned int n, dummy, eax, ebx, ecx, edx;
993 n = c->extended_cpuid_level;
995 if (n >= 0x80000005) {
996 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
997 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
998 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
999 c->x86_cache_size=(ecx>>24)+(edx>>24);
1000 /* On K8 L1 TLB is inclusive, so don't count it */
1001 c->x86_tlbsize = 0;
1004 if (n >= 0x80000006) {
1005 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1006 ecx = cpuid_ecx(0x80000006);
1007 c->x86_cache_size = ecx >> 16;
1008 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
1010 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1011 c->x86_cache_size, ecx & 0xFF);
1014 if (n >= 0x80000007)
1015 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1016 if (n >= 0x80000008) {
1017 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1018 c->x86_virt_bits = (eax >> 8) & 0xff;
1019 c->x86_phys_bits = eax & 0xff;
1023 #ifdef CONFIG_NUMA
1024 static int nearby_node(int apicid)
1026 int i;
1027 for (i = apicid - 1; i >= 0; i--) {
1028 int node = apicid_to_node[i];
1029 if (node != NUMA_NO_NODE && node_online(node))
1030 return node;
1032 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
1033 int node = apicid_to_node[i];
1034 if (node != NUMA_NO_NODE && node_online(node))
1035 return node;
1037 return first_node(node_online_map); /* Shouldn't happen */
1039 #endif
1041 /*
1042 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
1043 * Assumes number of cores is a power of two.
1044 */
1045 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
1047 #ifdef CONFIG_SMP
1048 unsigned bits;
1049 #ifdef CONFIG_NUMA
1050 int cpu = smp_processor_id();
1051 int node = 0;
1052 unsigned apicid = hard_smp_processor_id();
1053 #endif
1054 unsigned ecx = cpuid_ecx(0x80000008);
1056 c->x86_max_cores = (ecx & 0xff) + 1;
1058 /* CPU telling us the core id bits shift? */
1059 bits = (ecx >> 12) & 0xF;
1061 /* Otherwise recompute */
1062 if (bits == 0) {
1063 while ((1 << bits) < c->x86_max_cores)
1064 bits++;
1067 /* Low order bits define the core id (index of core in socket) */
1068 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
1069 /* Convert the APIC ID into the socket ID */
1070 c->phys_proc_id = phys_pkg_id(bits);
1072 #ifdef CONFIG_NUMA
1073 node = c->phys_proc_id;
1074 if (apicid_to_node[apicid] != NUMA_NO_NODE)
1075 node = apicid_to_node[apicid];
1076 if (!node_online(node)) {
1077 /* Two possibilities here:
1078 - The CPU is missing memory and no node was created.
1079 In that case try picking one from a nearby CPU
1080 - The APIC IDs differ from the HyperTransport node IDs
1081 which the K8 northbridge parsing fills in.
1082 Assume they are all increased by a constant offset,
1083 but in the same order as the HT nodeids.
1084 If that doesn't result in a usable node fall back to the
1085 path for the previous case. */
1086 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
1087 if (ht_nodeid >= 0 &&
1088 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
1089 node = apicid_to_node[ht_nodeid];
1090 /* Pick a nearby node */
1091 if (!node_online(node))
1092 node = nearby_node(apicid);
1094 numa_set_node(cpu, node);
1096 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
1097 #endif
1098 #endif
1101 static void __init init_amd(struct cpuinfo_x86 *c)
1103 unsigned level;
1105 #ifdef CONFIG_SMP
1106 unsigned long value;
1108 /*
1109 * Disable TLB flush filter by setting HWCR.FFDIS on K8
1110 * bit 6 of msr C001_0015
1112 * Errata 63 for SH-B3 steppings
1113 * Errata 122 for all steppings (F+ have it disabled by default)
1114 */
1115 if (c->x86 == 15) {
1116 rdmsrl(MSR_K8_HWCR, value);
1117 value |= 1 << 6;
1118 wrmsrl(MSR_K8_HWCR, value);
1120 #endif
1122 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1123 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1124 clear_bit(0*32+31, &c->x86_capability);
1126 /* On C+ stepping K8 rep microcode works well for copy/memset */
1127 level = cpuid_eax(1);
1128 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
1129 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
1131 /* Enable workaround for FXSAVE leak */
1132 if (c->x86 >= 6)
1133 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
1135 level = get_model_name(c);
1136 if (!level) {
1137 switch (c->x86) {
1138 case 15:
1139 /* Should distinguish Models here, but this is only
1140 a fallback anyways. */
1141 strcpy(c->x86_model_id, "Hammer");
1142 break;
1145 display_cacheinfo(c);
1147 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
1148 if (c->x86_power & (1<<8))
1149 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1151 /* Multi core CPU? */
1152 if (c->extended_cpuid_level >= 0x80000008)
1153 amd_detect_cmp(c);
1155 /* Fix cpuid4 emulation for more */
1156 num_cache_leaves = 3;
1159 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1161 #ifdef CONFIG_SMP
1162 u32 eax, ebx, ecx, edx;
1163 int index_msb, core_bits;
1165 cpuid(1, &eax, &ebx, &ecx, &edx);
1168 if (!cpu_has(c, X86_FEATURE_HT))
1169 return;
1170 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
1171 goto out;
1173 smp_num_siblings = (ebx & 0xff0000) >> 16;
1175 if (smp_num_siblings == 1) {
1176 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1177 } else if (smp_num_siblings > 1 ) {
1179 if (smp_num_siblings > NR_CPUS) {
1180 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1181 smp_num_siblings = 1;
1182 return;
1185 index_msb = get_count_order(smp_num_siblings);
1186 c->phys_proc_id = phys_pkg_id(index_msb);
1188 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1190 index_msb = get_count_order(smp_num_siblings) ;
1192 core_bits = get_count_order(c->x86_max_cores);
1194 c->cpu_core_id = phys_pkg_id(index_msb) &
1195 ((1 << core_bits) - 1);
1197 out:
1198 if ((c->x86_max_cores * smp_num_siblings) > 1) {
1199 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
1200 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
1203 #endif
1206 /*
1207 * find out the number of processor cores on the die
1208 */
1209 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1211 unsigned int eax, t;
1213 if (c->cpuid_level < 4)
1214 return 1;
1216 cpuid_count(4, 0, &eax, &t, &t, &t);
1218 if (eax & 0x1f)
1219 return ((eax >> 26) + 1);
1220 else
1221 return 1;
1224 static void srat_detect_node(void)
1226 #ifdef CONFIG_NUMA
1227 unsigned node;
1228 int cpu = smp_processor_id();
1229 int apicid = hard_smp_processor_id();
1231 /* Don't do the funky fallback heuristics the AMD version employs
1232 for now. */
1233 node = apicid_to_node[apicid];
1234 if (node == NUMA_NO_NODE)
1235 node = first_node(node_online_map);
1236 numa_set_node(cpu, node);
1238 if (acpi_numa > 0)
1239 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
1240 #endif
1243 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1245 /* Cache sizes */
1246 unsigned n;
1248 init_intel_cacheinfo(c);
1249 if (c->cpuid_level > 9 ) {
1250 unsigned eax = cpuid_eax(10);
1251 /* Check for version and the number of counters */
1252 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
1253 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
1256 n = c->extended_cpuid_level;
1257 if (n >= 0x80000008) {
1258 unsigned eax = cpuid_eax(0x80000008);
1259 c->x86_virt_bits = (eax >> 8) & 0xff;
1260 c->x86_phys_bits = eax & 0xff;
1261 /* CPUID workaround for Intel 0F34 CPU */
1262 if (c->x86_vendor == X86_VENDOR_INTEL &&
1263 c->x86 == 0xF && c->x86_model == 0x3 &&
1264 c->x86_mask == 0x4)
1265 c->x86_phys_bits = 36;
1268 if (c->x86 == 15)
1269 c->x86_cache_alignment = c->x86_clflush_size * 2;
1270 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1271 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1272 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1273 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1274 c->x86_max_cores = intel_num_cpu_cores(c);
1276 srat_detect_node();
1279 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1281 char *v = c->x86_vendor_id;
1283 if (!strcmp(v, "AuthenticAMD"))
1284 c->x86_vendor = X86_VENDOR_AMD;
1285 else if (!strcmp(v, "GenuineIntel"))
1286 c->x86_vendor = X86_VENDOR_INTEL;
1287 else
1288 c->x86_vendor = X86_VENDOR_UNKNOWN;
1291 struct cpu_model_info {
1292 int vendor;
1293 int family;
1294 char *model_names[16];
1295 };
1297 /* Do some early cpuid on the boot CPU to get some parameter that are
1298 needed before check_bugs. Everything advanced is in identify_cpu
1299 below. */
1300 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1302 u32 tfms;
1304 c->loops_per_jiffy = loops_per_jiffy;
1305 c->x86_cache_size = -1;
1306 c->x86_vendor = X86_VENDOR_UNKNOWN;
1307 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1308 c->x86_vendor_id[0] = '\0'; /* Unset */
1309 c->x86_model_id[0] = '\0'; /* Unset */
1310 c->x86_clflush_size = 64;
1311 c->x86_cache_alignment = c->x86_clflush_size;
1312 c->x86_max_cores = 1;
1313 c->extended_cpuid_level = 0;
1314 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1316 /* Get vendor name */
1317 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1318 (unsigned int *)&c->x86_vendor_id[0],
1319 (unsigned int *)&c->x86_vendor_id[8],
1320 (unsigned int *)&c->x86_vendor_id[4]);
1322 get_cpu_vendor(c);
1324 /* Initialize the standard set of capabilities */
1325 /* Note that the vendor-specific code below might override */
1327 /* Intel-defined flags: level 0x00000001 */
1328 if (c->cpuid_level >= 0x00000001) {
1329 __u32 misc;
1330 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1331 &c->x86_capability[0]);
1332 c->x86 = (tfms >> 8) & 0xf;
1333 c->x86_model = (tfms >> 4) & 0xf;
1334 c->x86_mask = tfms & 0xf;
1335 if (c->x86 == 0xf)
1336 c->x86 += (tfms >> 20) & 0xff;
1337 if (c->x86 >= 0x6)
1338 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1339 if (c->x86_capability[0] & (1<<19))
1340 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1341 } else {
1342 /* Have CPUID level 0 only - unheard of */
1343 c->x86 = 4;
1346 #ifdef CONFIG_SMP
1347 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
1348 #endif
1351 /*
1352 * This does the hard work of actually picking apart the CPU stuff...
1353 */
1354 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1356 int i;
1357 u32 xlvl;
1359 early_identify_cpu(c);
1361 /* AMD-defined flags: level 0x80000001 */
1362 xlvl = cpuid_eax(0x80000000);
1363 c->extended_cpuid_level = xlvl;
1364 if ((xlvl & 0xffff0000) == 0x80000000) {
1365 if (xlvl >= 0x80000001) {
1366 c->x86_capability[1] = cpuid_edx(0x80000001);
1367 c->x86_capability[6] = cpuid_ecx(0x80000001);
1369 if (xlvl >= 0x80000004)
1370 get_model_name(c); /* Default name */
1373 /* Transmeta-defined flags: level 0x80860001 */
1374 xlvl = cpuid_eax(0x80860000);
1375 if ((xlvl & 0xffff0000) == 0x80860000) {
1376 /* Don't set x86_cpuid_level here for now to not confuse. */
1377 if (xlvl >= 0x80860001)
1378 c->x86_capability[2] = cpuid_edx(0x80860001);
1381 c->apicid = phys_pkg_id(0);
1383 /*
1384 * Vendor-specific initialization. In this section we
1385 * canonicalize the feature flags, meaning if there are
1386 * features a certain CPU supports which CPUID doesn't
1387 * tell us, CPUID claiming incorrect flags, or other bugs,
1388 * we handle them here.
1390 * At the end of this section, c->x86_capability better
1391 * indicate the features this CPU genuinely supports!
1392 */
1393 switch (c->x86_vendor) {
1394 case X86_VENDOR_AMD:
1395 init_amd(c);
1396 break;
1398 case X86_VENDOR_INTEL:
1399 init_intel(c);
1400 break;
1402 case X86_VENDOR_UNKNOWN:
1403 default:
1404 display_cacheinfo(c);
1405 break;
1408 select_idle_routine(c);
1409 detect_ht(c);
1411 /*
1412 * On SMP, boot_cpu_data holds the common feature set between
1413 * all CPUs; so make sure that we indicate which features are
1414 * common between the CPUs. The first time this routine gets
1415 * executed, c == &boot_cpu_data.
1416 */
1417 if (c != &boot_cpu_data) {
1418 /* AND the already accumulated flags with these */
1419 for (i = 0 ; i < NCAPINTS ; i++)
1420 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1423 #ifdef CONFIG_X86_MCE
1424 mcheck_init(c);
1425 #endif
1426 if (c == &boot_cpu_data)
1427 mtrr_bp_init();
1428 else
1429 mtrr_ap_init();
1430 #ifdef CONFIG_NUMA
1431 numa_add_cpu(smp_processor_id());
1432 #endif
1436 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1438 if (c->x86_model_id[0])
1439 printk("%s", c->x86_model_id);
1441 if (c->x86_mask || c->cpuid_level >= 0)
1442 printk(" stepping %02x\n", c->x86_mask);
1443 else
1444 printk("\n");
1447 /*
1448 * Get CPU information for use by the procfs.
1449 */
1451 static int show_cpuinfo(struct seq_file *m, void *v)
1453 struct cpuinfo_x86 *c = v;
1455 /*
1456 * These flag bits must match the definitions in <asm/cpufeature.h>.
1457 * NULL means this bit is undefined or reserved; either way it doesn't
1458 * have meaning as far as Linux is concerned. Note that it's important
1459 * to realize there is a difference between this table and CPUID -- if
1460 * applications want to get the raw CPUID data, they should access
1461 * /dev/cpu/<cpu_nr>/cpuid instead.
1462 */
1463 static char *x86_cap_flags[] = {
1464 /* Intel-defined */
1465 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1466 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1467 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1468 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1470 /* AMD-defined */
1471 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1472 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1473 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1474 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1476 /* Transmeta-defined */
1477 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1482 /* Other (Linux-defined) */
1483 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1484 "constant_tsc", NULL, NULL,
1485 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1486 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1487 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1489 /* Intel-defined (#2) */
1490 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1491 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1492 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1493 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1495 /* VIA/Cyrix/Centaur-defined */
1496 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1497 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1498 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1499 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1501 /* AMD-defined (#2) */
1502 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1503 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1504 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1505 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1506 };
1507 static char *x86_power_flags[] = {
1508 "ts", /* temperature sensor */
1509 "fid", /* frequency id control */
1510 "vid", /* voltage id control */
1511 "ttp", /* thermal trip */
1512 "tm",
1513 "stc",
1514 NULL,
1515 /* nothing */ /* constant_tsc - moved to flags */
1516 };
1519 #ifdef CONFIG_SMP
1520 if (!cpu_online(c-cpu_data))
1521 return 0;
1522 #endif
1524 seq_printf(m,"processor\t: %u\n"
1525 "vendor_id\t: %s\n"
1526 "cpu family\t: %d\n"
1527 "model\t\t: %d\n"
1528 "model name\t: %s\n",
1529 (unsigned)(c-cpu_data),
1530 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1531 c->x86,
1532 (int)c->x86_model,
1533 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1535 if (c->x86_mask || c->cpuid_level >= 0)
1536 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1537 else
1538 seq_printf(m, "stepping\t: unknown\n");
1540 if (cpu_has(c,X86_FEATURE_TSC)) {
1541 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1542 if (!freq)
1543 freq = cpu_khz;
1544 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1545 freq / 1000, (freq % 1000));
1548 /* Cache size */
1549 if (c->x86_cache_size >= 0)
1550 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1552 #ifdef CONFIG_SMP
1553 if (smp_num_siblings * c->x86_max_cores > 1) {
1554 int cpu = c - cpu_data;
1555 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1556 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1557 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1558 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1560 #endif
1562 seq_printf(m,
1563 "fpu\t\t: yes\n"
1564 "fpu_exception\t: yes\n"
1565 "cpuid level\t: %d\n"
1566 "wp\t\t: yes\n"
1567 "flags\t\t:",
1568 c->cpuid_level);
1571 int i;
1572 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1573 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1574 seq_printf(m, " %s", x86_cap_flags[i]);
1577 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1578 c->loops_per_jiffy/(500000/HZ),
1579 (c->loops_per_jiffy/(5000/HZ)) % 100);
1581 if (c->x86_tlbsize > 0)
1582 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1583 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1584 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1586 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1587 c->x86_phys_bits, c->x86_virt_bits);
1589 seq_printf(m, "power management:");
1591 unsigned i;
1592 for (i = 0; i < 32; i++)
1593 if (c->x86_power & (1 << i)) {
1594 if (i < ARRAY_SIZE(x86_power_flags) &&
1595 x86_power_flags[i])
1596 seq_printf(m, "%s%s",
1597 x86_power_flags[i][0]?" ":"",
1598 x86_power_flags[i]);
1599 else
1600 seq_printf(m, " [%d]", i);
1604 seq_printf(m, "\n\n");
1606 return 0;
1609 static void *c_start(struct seq_file *m, loff_t *pos)
1611 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1614 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1616 ++*pos;
1617 return c_start(m, pos);
1620 static void c_stop(struct seq_file *m, void *v)
1624 struct seq_operations cpuinfo_op = {
1625 .start =c_start,
1626 .next = c_next,
1627 .stop = c_stop,
1628 .show = show_cpuinfo,
1629 };
1631 #if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
1632 #include <linux/platform_device.h>
1633 static __init int add_pcspkr(void)
1635 struct platform_device *pd;
1636 int ret;
1638 if (!is_initial_xendomain())
1639 return 0;
1641 pd = platform_device_alloc("pcspkr", -1);
1642 if (!pd)
1643 return -ENOMEM;
1645 ret = platform_device_add(pd);
1646 if (ret)
1647 platform_device_put(pd);
1649 return ret;
1651 device_initcall(add_pcspkr);
1652 #endif