ia64/xen-unstable

view linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c @ 10133:dec7f819293c

Fix x86/64 -xenU build. probe_roms() is only defined if building a privleged
or !xen build.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian.Campbell@xensource.com
date Mon May 22 15:03:46 2006 +0100 (2006-05-22)
parents 8fa46042348c
children dbcb8acc8f59
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/crash_dump.h>
39 #include <linux/root_dev.h>
40 #include <linux/pci.h>
41 #include <linux/acpi.h>
42 #include <linux/kallsyms.h>
43 #include <linux/edd.h>
44 #include <linux/mmzone.h>
45 #include <linux/kexec.h>
46 #include <linux/cpufreq.h>
47 #include <linux/dmi.h>
48 #include <linux/dma-mapping.h>
50 #include <asm/mtrr.h>
51 #include <asm/uaccess.h>
52 #include <asm/system.h>
53 #include <asm/io.h>
54 #include <asm/smp.h>
55 #include <asm/msr.h>
56 #include <asm/desc.h>
57 #include <video/edid.h>
58 #include <asm/e820.h>
59 #include <asm/dma.h>
60 #include <asm/mpspec.h>
61 #include <asm/mmu_context.h>
62 #include <asm/bootsetup.h>
63 #include <asm/proto.h>
64 #include <asm/setup.h>
65 #include <asm/mach_apic.h>
66 #include <asm/numa.h>
67 #include <asm/swiotlb.h>
68 #include <asm/sections.h>
69 #include <asm/gart-mapping.h>
70 #ifdef CONFIG_XEN
71 #include <linux/percpu.h>
72 #include <xen/interface/physdev.h>
73 #include "setup_arch_pre.h"
74 #include <asm/hypervisor.h>
75 #include <xen/interface/nmi.h>
76 #include <xen/features.h>
77 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
78 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
79 #include <asm/mach-xen/setup_arch_post.h>
80 #include <xen/interface/memory.h>
82 extern unsigned long start_pfn;
83 extern struct edid_info edid_info;
85 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
86 EXPORT_SYMBOL(HYPERVISOR_shared_info);
88 extern char hypercall_page[PAGE_SIZE];
89 EXPORT_SYMBOL(hypercall_page);
91 /* Allows setting of maximum possible memory size */
92 unsigned long xen_override_max_pfn;
94 static int xen_panic_event(struct notifier_block *, unsigned long, void *);
95 static struct notifier_block xen_panic_block = {
96 xen_panic_event, NULL, 0 /* try to go last */
97 };
99 unsigned long *phys_to_machine_mapping;
100 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
102 EXPORT_SYMBOL(phys_to_machine_mapping);
104 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
105 DEFINE_PER_CPU(int, nr_multicall_ents);
107 /* Raw start-of-day parameters from the hypervisor. */
108 start_info_t *xen_start_info;
109 EXPORT_SYMBOL(xen_start_info);
110 #endif
112 /*
113 * Machine setup..
114 */
116 struct cpuinfo_x86 boot_cpu_data __read_mostly;
118 unsigned long mmu_cr4_features;
120 int acpi_disabled;
121 EXPORT_SYMBOL(acpi_disabled);
122 #ifdef CONFIG_ACPI
123 extern int __initdata acpi_ht;
124 extern acpi_interrupt_flags acpi_sci_flags;
125 int __initdata acpi_force = 0;
126 #endif
128 int acpi_numa __initdata;
130 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
131 int bootloader_type;
133 unsigned long saved_video_mode;
135 /*
136 * Setup options
137 */
138 struct screen_info screen_info;
139 struct sys_desc_table_struct {
140 unsigned short length;
141 unsigned char table[0];
142 };
144 struct edid_info edid_info;
145 struct e820map e820;
147 extern int root_mountflags;
149 char command_line[COMMAND_LINE_SIZE];
151 struct resource standard_io_resources[] = {
152 { .name = "dma1", .start = 0x00, .end = 0x1f,
153 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
154 { .name = "pic1", .start = 0x20, .end = 0x21,
155 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
156 { .name = "timer0", .start = 0x40, .end = 0x43,
157 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
158 { .name = "timer1", .start = 0x50, .end = 0x53,
159 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
160 { .name = "keyboard", .start = 0x60, .end = 0x6f,
161 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
162 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
163 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
164 { .name = "pic2", .start = 0xa0, .end = 0xa1,
165 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
166 { .name = "dma2", .start = 0xc0, .end = 0xdf,
167 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
168 { .name = "fpu", .start = 0xf0, .end = 0xff,
169 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
170 };
172 #define STANDARD_IO_RESOURCES \
173 (sizeof standard_io_resources / sizeof standard_io_resources[0])
175 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
177 struct resource data_resource = {
178 .name = "Kernel data",
179 .start = 0,
180 .end = 0,
181 .flags = IORESOURCE_RAM,
182 };
183 struct resource code_resource = {
184 .name = "Kernel code",
185 .start = 0,
186 .end = 0,
187 .flags = IORESOURCE_RAM,
188 };
190 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
192 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
193 static struct resource system_rom_resource = {
194 .name = "System ROM",
195 .start = 0xf0000,
196 .end = 0xfffff,
197 .flags = IORESOURCE_ROM,
198 };
200 static struct resource extension_rom_resource = {
201 .name = "Extension ROM",
202 .start = 0xe0000,
203 .end = 0xeffff,
204 .flags = IORESOURCE_ROM,
205 };
207 static struct resource adapter_rom_resources[] = {
208 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
209 .flags = IORESOURCE_ROM },
210 { .name = "Adapter ROM", .start = 0, .end = 0,
211 .flags = IORESOURCE_ROM },
212 { .name = "Adapter ROM", .start = 0, .end = 0,
213 .flags = IORESOURCE_ROM },
214 { .name = "Adapter ROM", .start = 0, .end = 0,
215 .flags = IORESOURCE_ROM },
216 { .name = "Adapter ROM", .start = 0, .end = 0,
217 .flags = IORESOURCE_ROM },
218 { .name = "Adapter ROM", .start = 0, .end = 0,
219 .flags = IORESOURCE_ROM }
220 };
221 #endif
223 #define ADAPTER_ROM_RESOURCES \
224 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
226 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
227 static struct resource video_rom_resource = {
228 .name = "Video ROM",
229 .start = 0xc0000,
230 .end = 0xc7fff,
231 .flags = IORESOURCE_ROM,
232 };
233 #endif
235 static struct resource video_ram_resource = {
236 .name = "Video RAM area",
237 .start = 0xa0000,
238 .end = 0xbffff,
239 .flags = IORESOURCE_RAM,
240 };
242 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
243 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
245 static int __init romchecksum(unsigned char *rom, unsigned long length)
246 {
247 unsigned char *p, sum = 0;
249 for (p = rom; p < rom + length; p++)
250 sum += *p;
251 return sum == 0;
252 }
254 static void __init probe_roms(void)
255 {
256 unsigned long start, length, upper;
257 unsigned char *rom;
258 int i;
260 /* video rom */
261 upper = adapter_rom_resources[0].start;
262 for (start = video_rom_resource.start; start < upper; start += 2048) {
263 rom = isa_bus_to_virt(start);
264 if (!romsignature(rom))
265 continue;
267 video_rom_resource.start = start;
269 /* 0 < length <= 0x7f * 512, historically */
270 length = rom[2] * 512;
272 /* if checksum okay, trust length byte */
273 if (length && romchecksum(rom, length))
274 video_rom_resource.end = start + length - 1;
276 request_resource(&iomem_resource, &video_rom_resource);
277 break;
278 }
280 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
281 if (start < upper)
282 start = upper;
284 /* system rom */
285 request_resource(&iomem_resource, &system_rom_resource);
286 upper = system_rom_resource.start;
288 /* check for extension rom (ignore length byte!) */
289 rom = isa_bus_to_virt(extension_rom_resource.start);
290 if (romsignature(rom)) {
291 length = extension_rom_resource.end - extension_rom_resource.start + 1;
292 if (romchecksum(rom, length)) {
293 request_resource(&iomem_resource, &extension_rom_resource);
294 upper = extension_rom_resource.start;
295 }
296 }
298 /* check for adapter roms on 2k boundaries */
299 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
300 rom = isa_bus_to_virt(start);
301 if (!romsignature(rom))
302 continue;
304 /* 0 < length <= 0x7f * 512, historically */
305 length = rom[2] * 512;
307 /* but accept any length that fits if checksum okay */
308 if (!length || start + length > upper || !romchecksum(rom, length))
309 continue;
311 adapter_rom_resources[i].start = start;
312 adapter_rom_resources[i].end = start + length - 1;
313 request_resource(&iomem_resource, &adapter_rom_resources[i]);
315 start = adapter_rom_resources[i++].end & ~2047UL;
316 }
317 }
318 #endif
320 static __init void parse_cmdline_early (char ** cmdline_p)
321 {
322 char c = ' ', *to = command_line, *from = COMMAND_LINE;
323 int len = 0;
324 int userdef = 0;
326 for (;;) {
327 if (c != ' ')
328 goto next_char;
330 #ifdef CONFIG_SMP
331 /*
332 * If the BIOS enumerates physical processors before logical,
333 * maxcpus=N at enumeration-time can be used to disable HT.
334 */
335 else if (!memcmp(from, "maxcpus=", 8)) {
336 extern unsigned int maxcpus;
338 maxcpus = simple_strtoul(from + 8, NULL, 0);
339 }
340 #endif
341 #ifdef CONFIG_ACPI
342 /* "acpi=off" disables both ACPI table parsing and interpreter init */
343 if (!memcmp(from, "acpi=off", 8))
344 disable_acpi();
346 if (!memcmp(from, "acpi=force", 10)) {
347 /* add later when we do DMI horrors: */
348 acpi_force = 1;
349 acpi_disabled = 0;
350 }
352 /* acpi=ht just means: do ACPI MADT parsing
353 at bootup, but don't enable the full ACPI interpreter */
354 if (!memcmp(from, "acpi=ht", 7)) {
355 if (!acpi_force)
356 disable_acpi();
357 acpi_ht = 1;
358 }
359 else if (!memcmp(from, "pci=noacpi", 10))
360 acpi_disable_pci();
361 else if (!memcmp(from, "acpi=noirq", 10))
362 acpi_noirq_set();
364 else if (!memcmp(from, "acpi_sci=edge", 13))
365 acpi_sci_flags.trigger = 1;
366 else if (!memcmp(from, "acpi_sci=level", 14))
367 acpi_sci_flags.trigger = 3;
368 else if (!memcmp(from, "acpi_sci=high", 13))
369 acpi_sci_flags.polarity = 1;
370 else if (!memcmp(from, "acpi_sci=low", 12))
371 acpi_sci_flags.polarity = 3;
373 /* acpi=strict disables out-of-spec workarounds */
374 else if (!memcmp(from, "acpi=strict", 11)) {
375 acpi_strict = 1;
376 }
377 #ifdef CONFIG_X86_IO_APIC
378 else if (!memcmp(from, "acpi_skip_timer_override", 24))
379 acpi_skip_timer_override = 1;
380 #endif
381 #endif
383 #ifndef CONFIG_XEN
384 if (!memcmp(from, "nolapic", 7) ||
385 !memcmp(from, "disableapic", 11))
386 disable_apic = 1;
388 /* Don't confuse with noapictimer */
389 if (!memcmp(from, "noapic", 6) &&
390 (from[6] == ' ' || from[6] == 0))
391 skip_ioapic_setup = 1;
393 /* Make sure to not confuse with apic= */
394 if (!memcmp(from, "apic", 4) &&
395 (from[4] == ' ' || from[4] == 0)) {
396 skip_ioapic_setup = 0;
397 ioapic_force = 1;
398 }
399 #endif
401 if (!memcmp(from, "mem=", 4))
402 parse_memopt(from+4, &from);
404 if (!memcmp(from, "memmap=", 7)) {
405 /* exactmap option is for used defined memory */
406 if (!memcmp(from+7, "exactmap", 8)) {
407 #ifdef CONFIG_CRASH_DUMP
408 /* If we are doing a crash dump, we
409 * still need to know the real mem
410 * size before original memory map is
411 * reset.
412 */
413 saved_max_pfn = e820_end_of_ram();
414 #endif
415 from += 8+7;
416 end_pfn_map = 0;
417 e820.nr_map = 0;
418 userdef = 1;
419 }
420 else {
421 parse_memmapopt(from+7, &from);
422 userdef = 1;
423 }
424 }
426 #ifdef CONFIG_NUMA
427 if (!memcmp(from, "numa=", 5))
428 numa_setup(from+5);
429 #endif
431 if (!memcmp(from,"iommu=",6)) {
432 iommu_setup(from+6);
433 }
435 if (!memcmp(from,"oops=panic", 10))
436 panic_on_oops = 1;
438 if (!memcmp(from, "noexec=", 7))
439 nonx_setup(from + 7);
441 #ifdef CONFIG_KEXEC
442 /* crashkernel=size@addr specifies the location to reserve for
443 * a crash kernel. By reserving this memory we guarantee
444 * that linux never set's it up as a DMA target.
445 * Useful for holding code to do something appropriate
446 * after a kernel panic.
447 */
448 else if (!memcmp(from, "crashkernel=", 12)) {
449 unsigned long size, base;
450 size = memparse(from+12, &from);
451 if (*from == '@') {
452 base = memparse(from+1, &from);
453 /* FIXME: Do I want a sanity check
454 * to validate the memory range?
455 */
456 crashk_res.start = base;
457 crashk_res.end = base + size - 1;
458 }
459 }
460 #endif
462 #ifdef CONFIG_PROC_VMCORE
463 /* elfcorehdr= specifies the location of elf core header
464 * stored by the crashed kernel. This option will be passed
465 * by kexec loader to the capture kernel.
466 */
467 else if(!memcmp(from, "elfcorehdr=", 11))
468 elfcorehdr_addr = memparse(from+11, &from);
469 #endif
471 #if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
472 else if (!memcmp(from, "additional_cpus=", 16))
473 setup_additional_cpus(from+16);
474 #endif
476 next_char:
477 c = *(from++);
478 if (!c)
479 break;
480 if (COMMAND_LINE_SIZE <= ++len)
481 break;
482 *(to++) = c;
483 }
484 if (userdef) {
485 printk(KERN_INFO "user-defined physical RAM map:\n");
486 e820_print_map("user");
487 }
488 *to = '\0';
489 *cmdline_p = command_line;
490 }
492 #ifndef CONFIG_NUMA
493 static void __init
494 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
495 {
496 unsigned long bootmap_size, bootmap;
498 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
499 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
500 if (bootmap == -1L)
501 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
502 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
503 #ifdef CONFIG_XEN
504 e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
505 #else
506 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
507 #endif
508 reserve_bootmem(bootmap, bootmap_size);
509 }
510 #endif
512 /* Use inline assembly to define this because the nops are defined
513 as inline assembly strings in the include files and we cannot
514 get them easily into strings. */
515 asm("\t.data\nk8nops: "
516 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
517 K8_NOP7 K8_NOP8);
519 extern unsigned char k8nops[];
520 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
521 NULL,
522 k8nops,
523 k8nops + 1,
524 k8nops + 1 + 2,
525 k8nops + 1 + 2 + 3,
526 k8nops + 1 + 2 + 3 + 4,
527 k8nops + 1 + 2 + 3 + 4 + 5,
528 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
529 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
530 };
532 extern char __vsyscall_0;
534 /* Replace instructions with better alternatives for this CPU type.
536 This runs before SMP is initialized to avoid SMP problems with
537 self modifying code. This implies that assymetric systems where
538 APs have less capabilities than the boot processor are not handled.
539 In this case boot with "noreplacement". */
540 void apply_alternatives(void *start, void *end)
541 {
542 struct alt_instr *a;
543 int diff, i, k;
544 for (a = start; (void *)a < end; a++) {
545 u8 *instr;
547 if (!boot_cpu_has(a->cpuid))
548 continue;
550 BUG_ON(a->replacementlen > a->instrlen);
551 instr = a->instr;
552 /* vsyscall code is not mapped yet. resolve it manually. */
553 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
554 instr -= VSYSCALL_START - (unsigned long)&__vsyscall_0;
555 __inline_memcpy(instr, a->replacement, a->replacementlen);
556 diff = a->instrlen - a->replacementlen;
558 /* Pad the rest with nops */
559 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
560 k = diff;
561 if (k > ASM_NOP_MAX)
562 k = ASM_NOP_MAX;
563 __inline_memcpy(instr + i, k8_nops[k], k);
564 }
565 }
566 }
568 static int no_replacement __initdata = 0;
570 void __init alternative_instructions(void)
571 {
572 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
573 if (no_replacement)
574 return;
575 apply_alternatives(__alt_instructions, __alt_instructions_end);
576 }
578 static int __init noreplacement_setup(char *s)
579 {
580 no_replacement = 1;
581 return 0;
582 }
584 __setup("noreplacement", noreplacement_setup);
586 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
587 struct edd edd;
588 #ifdef CONFIG_EDD_MODULE
589 EXPORT_SYMBOL(edd);
590 #endif
591 /**
592 * copy_edd() - Copy the BIOS EDD information
593 * from boot_params into a safe place.
594 *
595 */
596 static inline void copy_edd(void)
597 {
598 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
599 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
600 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
601 edd.edd_info_nr = EDD_NR;
602 }
603 #else
604 static inline void copy_edd(void)
605 {
606 }
607 #endif
609 #ifndef CONFIG_XEN
610 #define EBDA_ADDR_POINTER 0x40E
611 static void __init reserve_ebda_region(void)
612 {
613 unsigned int addr;
614 /**
615 * there is a real-mode segmented pointer pointing to the
616 * 4K EBDA area at 0x40E
617 */
618 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
619 addr <<= 4;
620 if (addr)
621 reserve_bootmem_generic(addr, PAGE_SIZE);
622 }
623 #endif
625 void __init setup_arch(char **cmdline_p)
626 {
627 unsigned long kernel_end;
629 #ifdef CONFIG_XEN
630 struct e820entry *machine_e820;
631 struct xen_memory_map memmap;
633 /* Register a call for panic conditions. */
634 notifier_chain_register(&panic_notifier_list, &xen_panic_block);
636 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
637 kernel_end = 0; /* dummy */
638 screen_info = SCREEN_INFO;
640 if (xen_start_info->flags & SIF_INITDOMAIN) {
641 /* This is drawn from a dump from vgacon:startup in
642 * standard Linux. */
643 screen_info.orig_video_mode = 3;
644 screen_info.orig_video_isVGA = 1;
645 screen_info.orig_video_lines = 25;
646 screen_info.orig_video_cols = 80;
647 screen_info.orig_video_ega_bx = 3;
648 screen_info.orig_video_points = 16;
649 } else
650 screen_info.orig_video_isVGA = 0;
652 edid_info = EDID_INFO;
653 saved_video_mode = SAVED_VIDEO_MODE;
654 bootloader_type = LOADER_TYPE;
656 #ifdef CONFIG_BLK_DEV_RAM
657 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
658 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
659 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
662 #endif
664 setup_xen_features();
666 if (xen_feature(XENFEAT_auto_translated_physmap) &&
667 xen_start_info->shared_info < xen_start_info->nr_pages) {
668 HYPERVISOR_shared_info =
669 (shared_info_t *)__va(xen_start_info->shared_info);
670 memset(empty_zero_page, 0, sizeof(empty_zero_page));
671 }
673 HYPERVISOR_vm_assist(VMASST_CMD_enable,
674 VMASST_TYPE_writable_pagetables);
676 ARCH_SETUP
677 #else
678 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
679 screen_info = SCREEN_INFO;
680 edid_info = EDID_INFO;
681 saved_video_mode = SAVED_VIDEO_MODE;
682 bootloader_type = LOADER_TYPE;
684 #ifdef CONFIG_BLK_DEV_RAM
685 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
686 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
687 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
688 #endif
689 #endif /* !CONFIG_XEN */
690 setup_memory_region();
691 copy_edd();
693 if (!MOUNT_ROOT_RDONLY)
694 root_mountflags &= ~MS_RDONLY;
695 init_mm.start_code = (unsigned long) &_text;
696 init_mm.end_code = (unsigned long) &_etext;
697 init_mm.end_data = (unsigned long) &_edata;
698 init_mm.brk = (unsigned long) &_end;
700 #ifndef CONFIG_XEN
701 code_resource.start = virt_to_phys(&_text);
702 code_resource.end = virt_to_phys(&_etext)-1;
703 data_resource.start = virt_to_phys(&_etext);
704 data_resource.end = virt_to_phys(&_edata)-1;
705 #endif
707 parse_cmdline_early(cmdline_p);
709 early_identify_cpu(&boot_cpu_data);
711 /*
712 * partially used pages are not usable - thus
713 * we are rounding upwards:
714 */
715 end_pfn = e820_end_of_ram();
717 check_efer();
719 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
721 #ifdef CONFIG_ACPI_NUMA
722 /*
723 * Parse SRAT to discover nodes.
724 */
725 acpi_numa_init();
726 #endif
728 #ifdef CONFIG_NUMA
729 numa_initmem_init(0, end_pfn);
730 #else
731 contig_initmem_init(0, end_pfn);
732 #endif
734 /* Reserve direct mapping */
735 reserve_bootmem_generic(table_start << PAGE_SHIFT,
736 (table_end - table_start) << PAGE_SHIFT);
738 /* reserve kernel */
739 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
740 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
742 #ifdef CONFIG_XEN
743 /* reserve physmap, start info and initial page tables */
744 reserve_bootmem(kernel_end, table_start<<PAGE_SHIFT);
745 #else
746 /*
747 * reserve physical page 0 - it's a special BIOS page on many boxes,
748 * enabling clean reboots, SMP operation, laptop functions.
749 */
750 reserve_bootmem_generic(0, PAGE_SIZE);
752 /* reserve ebda region */
753 reserve_ebda_region();
754 #endif
756 #ifdef CONFIG_SMP
757 /*
758 * But first pinch a few for the stack/trampoline stuff
759 * FIXME: Don't need the extra page at 4K, but need to fix
760 * trampoline before removing it. (see the GDT stuff)
761 */
762 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
764 /* Reserve SMP trampoline */
765 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
766 #endif
768 #ifdef CONFIG_ACPI_SLEEP
769 /*
770 * Reserve low memory region for sleep support.
771 */
772 acpi_reserve_bootmem();
773 #endif
774 #ifdef CONFIG_XEN
775 #ifdef CONFIG_BLK_DEV_INITRD
776 if (xen_start_info->mod_start) {
777 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
778 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
779 initrd_start = INITRD_START + PAGE_OFFSET;
780 initrd_end = initrd_start+INITRD_SIZE;
781 initrd_below_start_ok = 1;
782 } else {
783 printk(KERN_ERR "initrd extends beyond end of memory "
784 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
785 (unsigned long)(INITRD_START + INITRD_SIZE),
786 (unsigned long)(end_pfn << PAGE_SHIFT));
787 initrd_start = 0;
788 }
789 }
790 #endif
791 #else /* CONFIG_XEN */
792 #ifdef CONFIG_BLK_DEV_INITRD
793 if (LOADER_TYPE && INITRD_START) {
794 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
795 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
796 initrd_start =
797 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
798 initrd_end = initrd_start+INITRD_SIZE;
799 }
800 else {
801 printk(KERN_ERR "initrd extends beyond end of memory "
802 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
803 (unsigned long)(INITRD_START + INITRD_SIZE),
804 (unsigned long)(end_pfn << PAGE_SHIFT));
805 initrd_start = 0;
806 }
807 }
808 #endif
809 #endif /* !CONFIG_XEN */
810 #ifdef CONFIG_KEXEC
811 if (crashk_res.start != crashk_res.end) {
812 reserve_bootmem(crashk_res.start,
813 crashk_res.end - crashk_res.start + 1);
814 }
815 #endif
817 paging_init();
818 #ifdef CONFIG_X86_LOCAL_APIC
819 /*
820 * Find and reserve possible boot-time SMP configuration:
821 */
822 find_smp_config();
823 #endif
824 #ifdef CONFIG_XEN
825 {
826 int i, j, k, fpp;
827 unsigned long va;
829 /* 'Initial mapping' of initrd must be destroyed. */
830 for (va = xen_start_info->mod_start;
831 va < (xen_start_info->mod_start+xen_start_info->mod_len);
832 va += PAGE_SIZE) {
833 HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
834 }
836 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
837 /* Make sure we have a large enough P->M table. */
838 phys_to_machine_mapping = alloc_bootmem(
839 end_pfn * sizeof(unsigned long));
840 memset(phys_to_machine_mapping, ~0,
841 end_pfn * sizeof(unsigned long));
842 memcpy(phys_to_machine_mapping,
843 (unsigned long *)xen_start_info->mfn_list,
844 xen_start_info->nr_pages * sizeof(unsigned long));
845 free_bootmem(
846 __pa(xen_start_info->mfn_list),
847 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
848 sizeof(unsigned long))));
850 /* Destroyed 'initial mapping' of old p2m table. */
851 for (va = xen_start_info->mfn_list;
852 va < (xen_start_info->mfn_list +
853 (xen_start_info->nr_pages*sizeof(unsigned long)));
854 va += PAGE_SIZE) {
855 HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
856 }
858 /*
859 * Initialise the list of the frames that specify the
860 * list of frames that make up the p2m table. Used by
861 * save/restore.
862 */
863 pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
864 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
865 virt_to_mfn(pfn_to_mfn_frame_list_list);
867 fpp = PAGE_SIZE/sizeof(unsigned long);
868 for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
869 if ((j % fpp) == 0) {
870 k++;
871 BUG_ON(k>=fpp);
872 pfn_to_mfn_frame_list[k] =
873 alloc_bootmem(PAGE_SIZE);
874 pfn_to_mfn_frame_list_list[k] =
875 virt_to_mfn(pfn_to_mfn_frame_list[k]);
876 j=0;
877 }
878 pfn_to_mfn_frame_list[k][j] =
879 virt_to_mfn(&phys_to_machine_mapping[i]);
880 }
881 HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
882 }
884 }
886 if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
887 {
888 acpi_disabled = 1;
889 #ifdef CONFIG_ACPI
890 acpi_ht = 0;
891 #endif
892 }
893 #endif
895 #ifndef CONFIG_XEN
896 check_ioapic();
897 #endif
899 zap_low_mappings(0);
901 #ifdef CONFIG_ACPI
902 /*
903 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
904 * Call this early for SRAT node setup.
905 */
906 acpi_boot_table_init();
908 /*
909 * Read APIC and some other early information from ACPI tables.
910 */
911 acpi_boot_init();
912 #endif
914 init_cpu_to_node();
916 #ifdef CONFIG_X86_LOCAL_APIC
917 /*
918 * get boot-time SMP configuration:
919 */
920 if (smp_found_config)
921 get_smp_config();
922 #ifndef CONFIG_XEN
923 init_apic_mappings();
924 #endif
925 #endif
926 #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
927 prefill_possible_map();
928 #endif
930 /*
931 * Request address space for all standard RAM and ROM resources
932 * and also for regions reported as reserved by the e820.
933 */
934 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
935 probe_roms();
936 if (xen_start_info->flags & SIF_INITDOMAIN) {
937 machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE);
939 memmap.nr_entries = E820MAX;
940 set_xen_guest_handle(memmap.buffer, machine_e820);
942 BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap));
944 e820_reserve_resources(machine_e820, memmap.nr_entries);
945 }
946 #elif !defined(CONFIG_XEN)
947 probe_roms();
948 e820_reserve_resources(e820.map, e820.nr_map);
949 #endif
951 request_resource(&iomem_resource, &video_ram_resource);
953 {
954 unsigned i;
955 /* request I/O space for devices used on all i[345]86 PCs */
956 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
957 request_resource(&ioport_resource, &standard_io_resources[i]);
958 }
960 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
961 if (xen_start_info->flags & SIF_INITDOMAIN) {
962 e820_setup_gap(machine_e820, memmap.nr_entries);
963 free_bootmem(__pa(machine_e820), PAGE_SIZE);
964 }
965 #elif !defined(CONFIG_XEN)
966 e820_setup_gap(e820.map, e820.nr_map);
967 #endif
969 #ifdef CONFIG_GART_IOMMU
970 iommu_hole_init();
971 #endif
973 #ifdef CONFIG_XEN
974 {
975 struct physdev_set_iopl set_iopl;
977 set_iopl.iopl = 1;
978 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
980 if (xen_start_info->flags & SIF_INITDOMAIN) {
981 if (!(xen_start_info->flags & SIF_PRIVILEGED))
982 panic("Xen granted us console access "
983 "but not privileged status");
985 #ifdef CONFIG_VT
986 #if defined(CONFIG_VGA_CONSOLE)
987 conswitchp = &vga_con;
988 #elif defined(CONFIG_DUMMY_CONSOLE)
989 conswitchp = &dummy_con;
990 #endif
991 #endif
992 } else {
993 extern int console_use_vt;
994 console_use_vt = 0;
995 }
996 }
997 #else /* CONFIG_XEN */
999 #ifdef CONFIG_VT
1000 #if defined(CONFIG_VGA_CONSOLE)
1001 conswitchp = &vga_con;
1002 #elif defined(CONFIG_DUMMY_CONSOLE)
1003 conswitchp = &dummy_con;
1004 #endif
1005 #endif
1007 #endif /* !CONFIG_XEN */
1010 #ifdef CONFIG_XEN
1011 static int
1012 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1014 HYPERVISOR_shutdown(SHUTDOWN_crash);
1015 /* we're never actually going to get here... */
1016 return NOTIFY_DONE;
1018 #endif /* !CONFIG_XEN */
1021 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1023 unsigned int *v;
1025 if (c->extended_cpuid_level < 0x80000004)
1026 return 0;
1028 v = (unsigned int *) c->x86_model_id;
1029 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
1030 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
1031 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
1032 c->x86_model_id[48] = 0;
1033 return 1;
1037 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1039 unsigned int n, dummy, eax, ebx, ecx, edx;
1041 n = c->extended_cpuid_level;
1043 if (n >= 0x80000005) {
1044 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
1045 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
1046 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
1047 c->x86_cache_size=(ecx>>24)+(edx>>24);
1048 /* On K8 L1 TLB is inclusive, so don't count it */
1049 c->x86_tlbsize = 0;
1052 if (n >= 0x80000006) {
1053 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1054 ecx = cpuid_ecx(0x80000006);
1055 c->x86_cache_size = ecx >> 16;
1056 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
1058 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1059 c->x86_cache_size, ecx & 0xFF);
1062 if (n >= 0x80000007)
1063 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1064 if (n >= 0x80000008) {
1065 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1066 c->x86_virt_bits = (eax >> 8) & 0xff;
1067 c->x86_phys_bits = eax & 0xff;
1071 #ifdef CONFIG_NUMA
1072 static int nearby_node(int apicid)
1074 int i;
1075 for (i = apicid - 1; i >= 0; i--) {
1076 int node = apicid_to_node[i];
1077 if (node != NUMA_NO_NODE && node_online(node))
1078 return node;
1080 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
1081 int node = apicid_to_node[i];
1082 if (node != NUMA_NO_NODE && node_online(node))
1083 return node;
1085 return first_node(node_online_map); /* Shouldn't happen */
1087 #endif
1089 /*
1090 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
1091 * Assumes number of cores is a power of two.
1092 */
1093 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
1095 #ifdef CONFIG_SMP
1096 int cpu = smp_processor_id();
1097 unsigned bits;
1098 #ifdef CONFIG_NUMA
1099 int node = 0;
1100 unsigned apicid = phys_proc_id[cpu];
1101 #endif
1103 bits = 0;
1104 while ((1 << bits) < c->x86_max_cores)
1105 bits++;
1107 /* Low order bits define the core id (index of core in socket) */
1108 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
1109 /* Convert the APIC ID into the socket ID */
1110 phys_proc_id[cpu] >>= bits;
1112 #ifdef CONFIG_NUMA
1113 node = phys_proc_id[cpu];
1114 if (apicid_to_node[apicid] != NUMA_NO_NODE)
1115 node = apicid_to_node[apicid];
1116 if (!node_online(node)) {
1117 /* Two possibilities here:
1118 - The CPU is missing memory and no node was created.
1119 In that case try picking one from a nearby CPU
1120 - The APIC IDs differ from the HyperTransport node IDs
1121 which the K8 northbridge parsing fills in.
1122 Assume they are all increased by a constant offset,
1123 but in the same order as the HT nodeids.
1124 If that doesn't result in a usable node fall back to the
1125 path for the previous case. */
1126 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
1127 if (ht_nodeid >= 0 &&
1128 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
1129 node = apicid_to_node[ht_nodeid];
1130 /* Pick a nearby node */
1131 if (!node_online(node))
1132 node = nearby_node(apicid);
1134 numa_set_node(cpu, node);
1136 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
1137 cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
1138 #endif
1139 #endif
1142 static int __init init_amd(struct cpuinfo_x86 *c)
1144 int r;
1145 unsigned level;
1147 #ifdef CONFIG_SMP
1148 unsigned long value;
1150 /*
1151 * Disable TLB flush filter by setting HWCR.FFDIS on K8
1152 * bit 6 of msr C001_0015
1154 * Errata 63 for SH-B3 steppings
1155 * Errata 122 for all steppings (F+ have it disabled by default)
1156 */
1157 if (c->x86 == 15) {
1158 rdmsrl(MSR_K8_HWCR, value);
1159 value |= 1 << 6;
1160 wrmsrl(MSR_K8_HWCR, value);
1162 #endif
1164 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1165 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1166 clear_bit(0*32+31, &c->x86_capability);
1168 /* On C+ stepping K8 rep microcode works well for copy/memset */
1169 level = cpuid_eax(1);
1170 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
1171 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
1173 /* Enable workaround for FXSAVE leak */
1174 if (c->x86 >= 6)
1175 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
1177 r = get_model_name(c);
1178 if (!r) {
1179 switch (c->x86) {
1180 case 15:
1181 /* Should distinguish Models here, but this is only
1182 a fallback anyways. */
1183 strcpy(c->x86_model_id, "Hammer");
1184 break;
1187 display_cacheinfo(c);
1189 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
1190 if (c->x86_power & (1<<8))
1191 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1193 if (c->extended_cpuid_level >= 0x80000008) {
1194 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
1195 if (c->x86_max_cores & (c->x86_max_cores - 1))
1196 c->x86_max_cores = 1;
1198 amd_detect_cmp(c);
1201 return r;
1204 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1206 #ifdef CONFIG_SMP
1207 u32 eax, ebx, ecx, edx;
1208 int index_msb, core_bits;
1209 int cpu = smp_processor_id();
1211 cpuid(1, &eax, &ebx, &ecx, &edx);
1213 c->apicid = phys_pkg_id(0);
1215 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1216 return;
1218 smp_num_siblings = (ebx & 0xff0000) >> 16;
1220 if (smp_num_siblings == 1) {
1221 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1222 } else if (smp_num_siblings > 1 ) {
1224 if (smp_num_siblings > NR_CPUS) {
1225 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1226 smp_num_siblings = 1;
1227 return;
1230 index_msb = get_count_order(smp_num_siblings);
1231 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1233 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1234 phys_proc_id[cpu]);
1236 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1238 index_msb = get_count_order(smp_num_siblings) ;
1240 core_bits = get_count_order(c->x86_max_cores);
1242 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
1243 ((1 << core_bits) - 1);
1245 if (c->x86_max_cores > 1)
1246 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1247 cpu_core_id[cpu]);
1249 #endif
1252 /*
1253 * find out the number of processor cores on the die
1254 */
1255 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1257 unsigned int eax;
1259 if (c->cpuid_level < 4)
1260 return 1;
1262 __asm__("cpuid"
1263 : "=a" (eax)
1264 : "0" (4), "c" (0)
1265 : "bx", "dx");
1267 if (eax & 0x1f)
1268 return ((eax >> 26) + 1);
1269 else
1270 return 1;
1273 static void srat_detect_node(void)
1275 #ifdef CONFIG_NUMA
1276 unsigned node;
1277 int cpu = smp_processor_id();
1279 /* Don't do the funky fallback heuristics the AMD version employs
1280 for now. */
1281 node = apicid_to_node[hard_smp_processor_id()];
1282 if (node == NUMA_NO_NODE)
1283 node = 0;
1284 numa_set_node(cpu, node);
1286 if (acpi_numa > 0)
1287 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
1288 #endif
1291 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1293 /* Cache sizes */
1294 unsigned n;
1296 init_intel_cacheinfo(c);
1297 n = c->extended_cpuid_level;
1298 if (n >= 0x80000008) {
1299 unsigned eax = cpuid_eax(0x80000008);
1300 c->x86_virt_bits = (eax >> 8) & 0xff;
1301 c->x86_phys_bits = eax & 0xff;
1302 /* CPUID workaround for Intel 0F34 CPU */
1303 if (c->x86_vendor == X86_VENDOR_INTEL &&
1304 c->x86 == 0xF && c->x86_model == 0x3 &&
1305 c->x86_mask == 0x4)
1306 c->x86_phys_bits = 36;
1309 if (c->x86 == 15)
1310 c->x86_cache_alignment = c->x86_clflush_size * 2;
1311 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1312 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1313 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1314 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1315 c->x86_max_cores = intel_num_cpu_cores(c);
1317 srat_detect_node();
1320 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1322 char *v = c->x86_vendor_id;
1324 if (!strcmp(v, "AuthenticAMD"))
1325 c->x86_vendor = X86_VENDOR_AMD;
1326 else if (!strcmp(v, "GenuineIntel"))
1327 c->x86_vendor = X86_VENDOR_INTEL;
1328 else
1329 c->x86_vendor = X86_VENDOR_UNKNOWN;
1332 struct cpu_model_info {
1333 int vendor;
1334 int family;
1335 char *model_names[16];
1336 };
1338 /* Do some early cpuid on the boot CPU to get some parameter that are
1339 needed before check_bugs. Everything advanced is in identify_cpu
1340 below. */
1341 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1343 u32 tfms;
1345 c->loops_per_jiffy = loops_per_jiffy;
1346 c->x86_cache_size = -1;
1347 c->x86_vendor = X86_VENDOR_UNKNOWN;
1348 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1349 c->x86_vendor_id[0] = '\0'; /* Unset */
1350 c->x86_model_id[0] = '\0'; /* Unset */
1351 c->x86_clflush_size = 64;
1352 c->x86_cache_alignment = c->x86_clflush_size;
1353 c->x86_max_cores = 1;
1354 c->extended_cpuid_level = 0;
1355 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1357 /* Get vendor name */
1358 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1359 (unsigned int *)&c->x86_vendor_id[0],
1360 (unsigned int *)&c->x86_vendor_id[8],
1361 (unsigned int *)&c->x86_vendor_id[4]);
1363 get_cpu_vendor(c);
1365 /* Initialize the standard set of capabilities */
1366 /* Note that the vendor-specific code below might override */
1368 /* Intel-defined flags: level 0x00000001 */
1369 if (c->cpuid_level >= 0x00000001) {
1370 __u32 misc;
1371 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1372 &c->x86_capability[0]);
1373 c->x86 = (tfms >> 8) & 0xf;
1374 c->x86_model = (tfms >> 4) & 0xf;
1375 c->x86_mask = tfms & 0xf;
1376 if (c->x86 == 0xf)
1377 c->x86 += (tfms >> 20) & 0xff;
1378 if (c->x86 >= 0x6)
1379 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1380 if (c->x86_capability[0] & (1<<19))
1381 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1382 } else {
1383 /* Have CPUID level 0 only - unheard of */
1384 c->x86 = 4;
1387 #ifdef CONFIG_SMP
1388 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1389 #endif
1392 /*
1393 * This does the hard work of actually picking apart the CPU stuff...
1394 */
1395 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1397 int i;
1398 u32 xlvl;
1400 early_identify_cpu(c);
1402 /* AMD-defined flags: level 0x80000001 */
1403 xlvl = cpuid_eax(0x80000000);
1404 c->extended_cpuid_level = xlvl;
1405 if ((xlvl & 0xffff0000) == 0x80000000) {
1406 if (xlvl >= 0x80000001) {
1407 c->x86_capability[1] = cpuid_edx(0x80000001);
1408 c->x86_capability[6] = cpuid_ecx(0x80000001);
1410 if (xlvl >= 0x80000004)
1411 get_model_name(c); /* Default name */
1414 /* Transmeta-defined flags: level 0x80860001 */
1415 xlvl = cpuid_eax(0x80860000);
1416 if ((xlvl & 0xffff0000) == 0x80860000) {
1417 /* Don't set x86_cpuid_level here for now to not confuse. */
1418 if (xlvl >= 0x80860001)
1419 c->x86_capability[2] = cpuid_edx(0x80860001);
1422 /*
1423 * Vendor-specific initialization. In this section we
1424 * canonicalize the feature flags, meaning if there are
1425 * features a certain CPU supports which CPUID doesn't
1426 * tell us, CPUID claiming incorrect flags, or other bugs,
1427 * we handle them here.
1429 * At the end of this section, c->x86_capability better
1430 * indicate the features this CPU genuinely supports!
1431 */
1432 switch (c->x86_vendor) {
1433 case X86_VENDOR_AMD:
1434 init_amd(c);
1435 break;
1437 case X86_VENDOR_INTEL:
1438 init_intel(c);
1439 break;
1441 case X86_VENDOR_UNKNOWN:
1442 default:
1443 display_cacheinfo(c);
1444 break;
1447 select_idle_routine(c);
1448 detect_ht(c);
1450 /*
1451 * On SMP, boot_cpu_data holds the common feature set between
1452 * all CPUs; so make sure that we indicate which features are
1453 * common between the CPUs. The first time this routine gets
1454 * executed, c == &boot_cpu_data.
1455 */
1456 if (c != &boot_cpu_data) {
1457 /* AND the already accumulated flags with these */
1458 for (i = 0 ; i < NCAPINTS ; i++)
1459 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1462 #ifdef CONFIG_X86_MCE
1463 mcheck_init(c);
1464 #endif
1465 if (c == &boot_cpu_data)
1466 mtrr_bp_init();
1467 else
1468 mtrr_ap_init();
1469 #ifdef CONFIG_NUMA
1470 numa_add_cpu(smp_processor_id());
1471 #endif
1475 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1477 if (c->x86_model_id[0])
1478 printk("%s", c->x86_model_id);
1480 if (c->x86_mask || c->cpuid_level >= 0)
1481 printk(" stepping %02x\n", c->x86_mask);
1482 else
1483 printk("\n");
1486 /*
1487 * Get CPU information for use by the procfs.
1488 */
1490 static int show_cpuinfo(struct seq_file *m, void *v)
1492 struct cpuinfo_x86 *c = v;
1494 /*
1495 * These flag bits must match the definitions in <asm/cpufeature.h>.
1496 * NULL means this bit is undefined or reserved; either way it doesn't
1497 * have meaning as far as Linux is concerned. Note that it's important
1498 * to realize there is a difference between this table and CPUID -- if
1499 * applications want to get the raw CPUID data, they should access
1500 * /dev/cpu/<cpu_nr>/cpuid instead.
1501 */
1502 static char *x86_cap_flags[] = {
1503 /* Intel-defined */
1504 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1505 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1506 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1507 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1509 /* AMD-defined */
1510 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1511 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1512 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1513 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1515 /* Transmeta-defined */
1516 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1517 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1518 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1519 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1521 /* Other (Linux-defined) */
1522 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1523 "constant_tsc", NULL, NULL,
1524 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1525 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1526 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1528 /* Intel-defined (#2) */
1529 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1530 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1531 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1532 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1534 /* VIA/Cyrix/Centaur-defined */
1535 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1536 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1537 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1538 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1540 /* AMD-defined (#2) */
1541 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1542 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1543 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1544 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1545 };
1546 static char *x86_power_flags[] = {
1547 "ts", /* temperature sensor */
1548 "fid", /* frequency id control */
1549 "vid", /* voltage id control */
1550 "ttp", /* thermal trip */
1551 "tm",
1552 "stc",
1553 NULL,
1554 /* nothing */ /* constant_tsc - moved to flags */
1555 };
1558 #ifdef CONFIG_SMP
1559 if (!cpu_online(c-cpu_data))
1560 return 0;
1561 #endif
1563 seq_printf(m,"processor\t: %u\n"
1564 "vendor_id\t: %s\n"
1565 "cpu family\t: %d\n"
1566 "model\t\t: %d\n"
1567 "model name\t: %s\n",
1568 (unsigned)(c-cpu_data),
1569 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1570 c->x86,
1571 (int)c->x86_model,
1572 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1574 if (c->x86_mask || c->cpuid_level >= 0)
1575 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1576 else
1577 seq_printf(m, "stepping\t: unknown\n");
1579 if (cpu_has(c,X86_FEATURE_TSC)) {
1580 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1581 if (!freq)
1582 freq = cpu_khz;
1583 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1584 freq / 1000, (freq % 1000));
1587 /* Cache size */
1588 if (c->x86_cache_size >= 0)
1589 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1591 #ifdef CONFIG_SMP
1592 if (smp_num_siblings * c->x86_max_cores > 1) {
1593 int cpu = c - cpu_data;
1594 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1595 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1596 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1597 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1599 #endif
1601 seq_printf(m,
1602 "fpu\t\t: yes\n"
1603 "fpu_exception\t: yes\n"
1604 "cpuid level\t: %d\n"
1605 "wp\t\t: yes\n"
1606 "flags\t\t:",
1607 c->cpuid_level);
1610 int i;
1611 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1612 if ( test_bit(i, &c->x86_capability) &&
1613 x86_cap_flags[i] != NULL )
1614 seq_printf(m, " %s", x86_cap_flags[i]);
1617 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1618 c->loops_per_jiffy/(500000/HZ),
1619 (c->loops_per_jiffy/(5000/HZ)) % 100);
1621 if (c->x86_tlbsize > 0)
1622 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1623 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1624 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1626 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1627 c->x86_phys_bits, c->x86_virt_bits);
1629 seq_printf(m, "power management:");
1631 unsigned i;
1632 for (i = 0; i < 32; i++)
1633 if (c->x86_power & (1 << i)) {
1634 if (i < ARRAY_SIZE(x86_power_flags) &&
1635 x86_power_flags[i])
1636 seq_printf(m, "%s%s",
1637 x86_power_flags[i][0]?" ":"",
1638 x86_power_flags[i]);
1639 else
1640 seq_printf(m, " [%d]", i);
1644 seq_printf(m, "\n\n");
1646 return 0;
1649 static void *c_start(struct seq_file *m, loff_t *pos)
1651 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1654 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1656 ++*pos;
1657 return c_start(m, pos);
1660 static void c_stop(struct seq_file *m, void *v)
1664 struct seq_operations cpuinfo_op = {
1665 .start =c_start,
1666 .next = c_next,
1667 .stop = c_stop,
1668 .show = show_cpuinfo,
1669 };
1671 static int __init run_dmi_scan(void)
1673 dmi_scan_machine();
1674 return 0;
1676 core_initcall(run_dmi_scan);