ia64/xen-unstable

view linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c @ 8815:ee9691e23dbe

Don't specify VT console structure at all when specifying
!console_use_vt. The dummy_con just confuses the VT
implementation and you get no console output without manually
specifying a console= boot parameter.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Feb 09 17:42:51 2006 +0100 (2006-02-09)
parents 5aad88c8d379
children 1ca3d63e7008
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/crash_dump.h>
39 #include <linux/root_dev.h>
40 #include <linux/pci.h>
41 #include <linux/acpi.h>
42 #include <linux/kallsyms.h>
43 #include <linux/edd.h>
44 #include <linux/mmzone.h>
45 #include <linux/kexec.h>
46 #include <linux/cpufreq.h>
47 #include <linux/dmi.h>
48 #include <linux/dma-mapping.h>
50 #include <asm/mtrr.h>
51 #include <asm/uaccess.h>
52 #include <asm/system.h>
53 #include <asm/io.h>
54 #include <asm/smp.h>
55 #include <asm/msr.h>
56 #include <asm/desc.h>
57 #include <video/edid.h>
58 #include <asm/e820.h>
59 #include <asm/dma.h>
60 #include <asm/mpspec.h>
61 #include <asm/mmu_context.h>
62 #include <asm/bootsetup.h>
63 #include <asm/proto.h>
64 #include <asm/setup.h>
65 #include <asm/mach_apic.h>
66 #include <asm/numa.h>
67 #include <asm/swiotlb.h>
68 #include <asm/sections.h>
69 #include <asm/gart-mapping.h>
70 #ifdef CONFIG_XEN
71 #include <linux/percpu.h>
72 #include <xen/interface/physdev.h>
73 #include "setup_arch_pre.h"
74 #include <asm/hypervisor.h>
75 #include <xen/interface/nmi.h>
76 #include <xen/features.h>
77 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
78 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
79 #define end_pfn_map end_pfn
80 #include <asm/mach-xen/setup_arch_post.h>
82 extern unsigned long start_pfn;
83 extern struct edid_info edid_info;
85 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
86 EXPORT_SYMBOL(HYPERVISOR_shared_info);
88 extern char hypercall_page[PAGE_SIZE];
89 EXPORT_SYMBOL(hypercall_page);
91 /* Allows setting of maximum possible memory size */
92 unsigned long xen_override_max_pfn;
94 unsigned long *phys_to_machine_mapping;
95 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
97 EXPORT_SYMBOL(phys_to_machine_mapping);
99 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
100 DEFINE_PER_CPU(int, nr_multicall_ents);
102 /* Raw start-of-day parameters from the hypervisor. */
103 start_info_t *xen_start_info;
104 EXPORT_SYMBOL(xen_start_info);
105 #endif
107 /*
108 * Machine setup..
109 */
111 struct cpuinfo_x86 boot_cpu_data __read_mostly;
113 unsigned long mmu_cr4_features;
115 int acpi_disabled;
116 EXPORT_SYMBOL(acpi_disabled);
117 #ifdef CONFIG_ACPI
118 extern int __initdata acpi_ht;
119 extern acpi_interrupt_flags acpi_sci_flags;
120 int __initdata acpi_force = 0;
121 #endif
123 int acpi_numa __initdata;
125 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
126 int bootloader_type;
128 unsigned long saved_video_mode;
130 /*
131 * Setup options
132 */
133 struct drive_info_struct { char dummy[32]; } drive_info;
134 struct screen_info screen_info;
135 struct sys_desc_table_struct {
136 unsigned short length;
137 unsigned char table[0];
138 };
140 struct edid_info edid_info;
141 struct e820map e820;
143 extern int root_mountflags;
145 char command_line[COMMAND_LINE_SIZE];
147 struct resource standard_io_resources[] = {
148 { .name = "dma1", .start = 0x00, .end = 0x1f,
149 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
150 { .name = "pic1", .start = 0x20, .end = 0x21,
151 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
152 { .name = "timer0", .start = 0x40, .end = 0x43,
153 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
154 { .name = "timer1", .start = 0x50, .end = 0x53,
155 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
156 { .name = "keyboard", .start = 0x60, .end = 0x6f,
157 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
158 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
159 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
160 { .name = "pic2", .start = 0xa0, .end = 0xa1,
161 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
162 { .name = "dma2", .start = 0xc0, .end = 0xdf,
163 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
164 { .name = "fpu", .start = 0xf0, .end = 0xff,
165 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
166 };
168 #define STANDARD_IO_RESOURCES \
169 (sizeof standard_io_resources / sizeof standard_io_resources[0])
171 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
173 struct resource data_resource = {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_RAM,
178 };
179 struct resource code_resource = {
180 .name = "Kernel code",
181 .start = 0,
182 .end = 0,
183 .flags = IORESOURCE_RAM,
184 };
186 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
188 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
189 static struct resource system_rom_resource = {
190 .name = "System ROM",
191 .start = 0xf0000,
192 .end = 0xfffff,
193 .flags = IORESOURCE_ROM,
194 };
196 static struct resource extension_rom_resource = {
197 .name = "Extension ROM",
198 .start = 0xe0000,
199 .end = 0xeffff,
200 .flags = IORESOURCE_ROM,
201 };
203 static struct resource adapter_rom_resources[] = {
204 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
205 .flags = IORESOURCE_ROM },
206 { .name = "Adapter ROM", .start = 0, .end = 0,
207 .flags = IORESOURCE_ROM },
208 { .name = "Adapter ROM", .start = 0, .end = 0,
209 .flags = IORESOURCE_ROM },
210 { .name = "Adapter ROM", .start = 0, .end = 0,
211 .flags = IORESOURCE_ROM },
212 { .name = "Adapter ROM", .start = 0, .end = 0,
213 .flags = IORESOURCE_ROM },
214 { .name = "Adapter ROM", .start = 0, .end = 0,
215 .flags = IORESOURCE_ROM }
216 };
217 #endif
219 #define ADAPTER_ROM_RESOURCES \
220 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
222 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
223 static struct resource video_rom_resource = {
224 .name = "Video ROM",
225 .start = 0xc0000,
226 .end = 0xc7fff,
227 .flags = IORESOURCE_ROM,
228 };
229 #endif
231 static struct resource video_ram_resource = {
232 .name = "Video RAM area",
233 .start = 0xa0000,
234 .end = 0xbffff,
235 .flags = IORESOURCE_RAM,
236 };
238 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
239 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
241 static int __init romchecksum(unsigned char *rom, unsigned long length)
242 {
243 unsigned char *p, sum = 0;
245 for (p = rom; p < rom + length; p++)
246 sum += *p;
247 return sum == 0;
248 }
250 static void __init probe_roms(void)
251 {
252 unsigned long start, length, upper;
253 unsigned char *rom;
254 int i;
256 /* video rom */
257 upper = adapter_rom_resources[0].start;
258 for (start = video_rom_resource.start; start < upper; start += 2048) {
259 rom = isa_bus_to_virt(start);
260 if (!romsignature(rom))
261 continue;
263 video_rom_resource.start = start;
265 /* 0 < length <= 0x7f * 512, historically */
266 length = rom[2] * 512;
268 /* if checksum okay, trust length byte */
269 if (length && romchecksum(rom, length))
270 video_rom_resource.end = start + length - 1;
272 request_resource(&iomem_resource, &video_rom_resource);
273 break;
274 }
276 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
277 if (start < upper)
278 start = upper;
280 /* system rom */
281 request_resource(&iomem_resource, &system_rom_resource);
282 upper = system_rom_resource.start;
284 /* check for extension rom (ignore length byte!) */
285 rom = isa_bus_to_virt(extension_rom_resource.start);
286 if (romsignature(rom)) {
287 length = extension_rom_resource.end - extension_rom_resource.start + 1;
288 if (romchecksum(rom, length)) {
289 request_resource(&iomem_resource, &extension_rom_resource);
290 upper = extension_rom_resource.start;
291 }
292 }
294 /* check for adapter roms on 2k boundaries */
295 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
296 rom = isa_bus_to_virt(start);
297 if (!romsignature(rom))
298 continue;
300 /* 0 < length <= 0x7f * 512, historically */
301 length = rom[2] * 512;
303 /* but accept any length that fits if checksum okay */
304 if (!length || start + length > upper || !romchecksum(rom, length))
305 continue;
307 adapter_rom_resources[i].start = start;
308 adapter_rom_resources[i].end = start + length - 1;
309 request_resource(&iomem_resource, &adapter_rom_resources[i]);
311 start = adapter_rom_resources[i++].end & ~2047UL;
312 }
313 }
314 #endif
316 static __init void parse_cmdline_early (char ** cmdline_p)
317 {
318 char c = ' ', *to = command_line, *from = COMMAND_LINE;
319 int len = 0;
320 int userdef = 0;
322 for (;;) {
323 if (c != ' ')
324 goto next_char;
326 #ifdef CONFIG_SMP
327 /*
328 * If the BIOS enumerates physical processors before logical,
329 * maxcpus=N at enumeration-time can be used to disable HT.
330 */
331 else if (!memcmp(from, "maxcpus=", 8)) {
332 extern unsigned int maxcpus;
334 maxcpus = simple_strtoul(from + 8, NULL, 0);
335 }
336 #endif
337 #ifdef CONFIG_ACPI
338 /* "acpi=off" disables both ACPI table parsing and interpreter init */
339 if (!memcmp(from, "acpi=off", 8))
340 disable_acpi();
342 if (!memcmp(from, "acpi=force", 10)) {
343 /* add later when we do DMI horrors: */
344 acpi_force = 1;
345 acpi_disabled = 0;
346 }
348 /* acpi=ht just means: do ACPI MADT parsing
349 at bootup, but don't enable the full ACPI interpreter */
350 if (!memcmp(from, "acpi=ht", 7)) {
351 if (!acpi_force)
352 disable_acpi();
353 acpi_ht = 1;
354 }
355 else if (!memcmp(from, "pci=noacpi", 10))
356 acpi_disable_pci();
357 else if (!memcmp(from, "acpi=noirq", 10))
358 acpi_noirq_set();
360 else if (!memcmp(from, "acpi_sci=edge", 13))
361 acpi_sci_flags.trigger = 1;
362 else if (!memcmp(from, "acpi_sci=level", 14))
363 acpi_sci_flags.trigger = 3;
364 else if (!memcmp(from, "acpi_sci=high", 13))
365 acpi_sci_flags.polarity = 1;
366 else if (!memcmp(from, "acpi_sci=low", 12))
367 acpi_sci_flags.polarity = 3;
369 /* acpi=strict disables out-of-spec workarounds */
370 else if (!memcmp(from, "acpi=strict", 11)) {
371 acpi_strict = 1;
372 }
373 #ifdef CONFIG_X86_IO_APIC
374 else if (!memcmp(from, "acpi_skip_timer_override", 24))
375 acpi_skip_timer_override = 1;
376 #endif
377 #endif
379 #ifndef CONFIG_XEN
380 if (!memcmp(from, "nolapic", 7) ||
381 !memcmp(from, "disableapic", 11))
382 disable_apic = 1;
384 /* Don't confuse with noapictimer */
385 if (!memcmp(from, "noapic", 6) &&
386 (from[6] == ' ' || from[6] == 0))
387 skip_ioapic_setup = 1;
389 /* Make sure to not confuse with apic= */
390 if (!memcmp(from, "apic", 4) &&
391 (from[4] == ' ' || from[4] == 0)) {
392 skip_ioapic_setup = 0;
393 ioapic_force = 1;
394 }
395 #endif
397 if (!memcmp(from, "mem=", 4))
398 parse_memopt(from+4, &from);
400 if (!memcmp(from, "memmap=", 7)) {
401 /* exactmap option is for used defined memory */
402 if (!memcmp(from+7, "exactmap", 8)) {
403 #ifdef CONFIG_CRASH_DUMP
404 /* If we are doing a crash dump, we
405 * still need to know the real mem
406 * size before original memory map is
407 * reset.
408 */
409 saved_max_pfn = e820_end_of_ram();
410 #endif
411 from += 8+7;
412 end_pfn_map = 0;
413 e820.nr_map = 0;
414 userdef = 1;
415 }
416 else {
417 parse_memmapopt(from+7, &from);
418 userdef = 1;
419 }
420 }
422 #ifdef CONFIG_NUMA
423 if (!memcmp(from, "numa=", 5))
424 numa_setup(from+5);
425 #endif
427 if (!memcmp(from,"iommu=",6)) {
428 iommu_setup(from+6);
429 }
431 if (!memcmp(from,"oops=panic", 10))
432 panic_on_oops = 1;
434 if (!memcmp(from, "noexec=", 7))
435 nonx_setup(from + 7);
437 #ifdef CONFIG_KEXEC
438 /* crashkernel=size@addr specifies the location to reserve for
439 * a crash kernel. By reserving this memory we guarantee
440 * that linux never set's it up as a DMA target.
441 * Useful for holding code to do something appropriate
442 * after a kernel panic.
443 */
444 else if (!memcmp(from, "crashkernel=", 12)) {
445 unsigned long size, base;
446 size = memparse(from+12, &from);
447 if (*from == '@') {
448 base = memparse(from+1, &from);
449 /* FIXME: Do I want a sanity check
450 * to validate the memory range?
451 */
452 crashk_res.start = base;
453 crashk_res.end = base + size - 1;
454 }
455 }
456 #endif
458 #ifdef CONFIG_PROC_VMCORE
459 /* elfcorehdr= specifies the location of elf core header
460 * stored by the crashed kernel. This option will be passed
461 * by kexec loader to the capture kernel.
462 */
463 else if(!memcmp(from, "elfcorehdr=", 11))
464 elfcorehdr_addr = memparse(from+11, &from);
465 #endif
466 next_char:
467 c = *(from++);
468 if (!c)
469 break;
470 if (COMMAND_LINE_SIZE <= ++len)
471 break;
472 *(to++) = c;
473 }
474 if (userdef) {
475 printk(KERN_INFO "user-defined physical RAM map:\n");
476 e820_print_map("user");
477 }
478 *to = '\0';
479 *cmdline_p = command_line;
480 }
482 #ifndef CONFIG_NUMA
483 #ifdef CONFIG_XEN
484 static void __init
485 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
486 {
487 unsigned long bootmap_size;
489 bootmap_size = init_bootmem(start_pfn, end_pfn);
490 free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
491 reserve_bootmem(HIGH_MEMORY,
492 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
493 - HIGH_MEMORY);
494 }
495 #else
496 static void __init
497 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
498 {
499 unsigned long bootmap_size, bootmap;
501 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
502 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
503 if (bootmap == -1L)
504 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
505 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
506 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
507 reserve_bootmem(bootmap, bootmap_size);
508 }
509 #endif /* !CONFIG_XEN */
510 #endif
512 /* Use inline assembly to define this because the nops are defined
513 as inline assembly strings in the include files and we cannot
514 get them easily into strings. */
515 asm("\t.data\nk8nops: "
516 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
517 K8_NOP7 K8_NOP8);
519 extern unsigned char k8nops[];
520 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
521 NULL,
522 k8nops,
523 k8nops + 1,
524 k8nops + 1 + 2,
525 k8nops + 1 + 2 + 3,
526 k8nops + 1 + 2 + 3 + 4,
527 k8nops + 1 + 2 + 3 + 4 + 5,
528 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
529 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
530 };
532 extern char __vsyscall_0;
534 /* Replace instructions with better alternatives for this CPU type.
536 This runs before SMP is initialized to avoid SMP problems with
537 self modifying code. This implies that assymetric systems where
538 APs have less capabilities than the boot processor are not handled.
539 In this case boot with "noreplacement". */
540 void apply_alternatives(void *start, void *end)
541 {
542 struct alt_instr *a;
543 int diff, i, k;
544 for (a = start; (void *)a < end; a++) {
545 u8 *instr;
547 if (!boot_cpu_has(a->cpuid))
548 continue;
550 BUG_ON(a->replacementlen > a->instrlen);
551 instr = a->instr;
552 /* vsyscall code is not mapped yet. resolve it manually. */
553 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
554 instr -= VSYSCALL_START - (unsigned long)&__vsyscall_0;
555 __inline_memcpy(instr, a->replacement, a->replacementlen);
556 diff = a->instrlen - a->replacementlen;
558 /* Pad the rest with nops */
559 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
560 k = diff;
561 if (k > ASM_NOP_MAX)
562 k = ASM_NOP_MAX;
563 __inline_memcpy(instr + i, k8_nops[k], k);
564 }
565 }
566 }
568 static int no_replacement __initdata = 0;
570 void __init alternative_instructions(void)
571 {
572 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
573 if (no_replacement)
574 return;
575 apply_alternatives(__alt_instructions, __alt_instructions_end);
576 }
578 static int __init noreplacement_setup(char *s)
579 {
580 no_replacement = 1;
581 return 0;
582 }
584 __setup("noreplacement", noreplacement_setup);
586 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
587 struct edd edd;
588 #ifdef CONFIG_EDD_MODULE
589 EXPORT_SYMBOL(edd);
590 #endif
591 /**
592 * copy_edd() - Copy the BIOS EDD information
593 * from boot_params into a safe place.
594 *
595 */
596 static inline void copy_edd(void)
597 {
598 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
599 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
600 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
601 edd.edd_info_nr = EDD_NR;
602 }
603 #else
604 static inline void copy_edd(void)
605 {
606 }
607 #endif
609 #ifndef CONFIG_XEN
610 #define EBDA_ADDR_POINTER 0x40E
611 static void __init reserve_ebda_region(void)
612 {
613 unsigned int addr;
614 /**
615 * there is a real-mode segmented pointer pointing to the
616 * 4K EBDA area at 0x40E
617 */
618 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
619 addr <<= 4;
620 if (addr)
621 reserve_bootmem_generic(addr, PAGE_SIZE);
622 }
623 #endif
625 void __init setup_arch(char **cmdline_p)
626 {
627 unsigned long kernel_end;
629 #ifdef CONFIG_XEN
630 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
631 drive_info = DRIVE_INFO;
632 kernel_end = 0; /* dummy */
633 screen_info = SCREEN_INFO;
635 if (xen_start_info->flags & SIF_INITDOMAIN) {
636 /* This is drawn from a dump from vgacon:startup in
637 * standard Linux. */
638 screen_info.orig_video_mode = 3;
639 screen_info.orig_video_isVGA = 1;
640 screen_info.orig_video_lines = 25;
641 screen_info.orig_video_cols = 80;
642 screen_info.orig_video_ega_bx = 3;
643 screen_info.orig_video_points = 16;
644 } else
645 screen_info.orig_video_isVGA = 0;
647 edid_info = EDID_INFO;
648 saved_video_mode = SAVED_VIDEO_MODE;
649 bootloader_type = LOADER_TYPE;
651 #ifdef CONFIG_BLK_DEV_RAM
652 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
653 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
654 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
657 #endif
659 setup_xen_features();
661 HYPERVISOR_vm_assist(VMASST_CMD_enable,
662 VMASST_TYPE_writable_pagetables);
664 ARCH_SETUP
665 #else
666 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
667 drive_info = DRIVE_INFO;
668 screen_info = SCREEN_INFO;
669 edid_info = EDID_INFO;
670 saved_video_mode = SAVED_VIDEO_MODE;
671 bootloader_type = LOADER_TYPE;
673 #ifdef CONFIG_BLK_DEV_RAM
674 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
675 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
676 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
677 #endif
678 setup_memory_region();
679 copy_edd();
680 #endif /* !CONFIG_XEN */
682 if (!MOUNT_ROOT_RDONLY)
683 root_mountflags &= ~MS_RDONLY;
684 init_mm.start_code = (unsigned long) &_text;
685 init_mm.end_code = (unsigned long) &_etext;
686 init_mm.end_data = (unsigned long) &_edata;
687 #ifdef CONFIG_XEN
688 init_mm.brk = start_pfn << PAGE_SHIFT;
689 #else
690 init_mm.brk = (unsigned long) &_end;
692 code_resource.start = virt_to_phys(&_text);
693 code_resource.end = virt_to_phys(&_etext)-1;
694 data_resource.start = virt_to_phys(&_etext);
695 data_resource.end = virt_to_phys(&_edata)-1;
696 #endif
698 parse_cmdline_early(cmdline_p);
700 early_identify_cpu(&boot_cpu_data);
702 /*
703 * partially used pages are not usable - thus
704 * we are rounding upwards:
705 */
706 end_pfn = e820_end_of_ram();
708 check_efer();
710 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
712 #ifdef CONFIG_ACPI_NUMA
713 /*
714 * Parse SRAT to discover nodes.
715 */
716 acpi_numa_init();
717 #endif
719 #ifdef CONFIG_NUMA
720 numa_initmem_init(start_pfn, end_pfn);
721 #else
722 contig_initmem_init(start_pfn, end_pfn);
723 #endif
725 #ifndef CONFIG_XEN
726 /* Reserve direct mapping */
727 reserve_bootmem_generic(table_start << PAGE_SHIFT,
728 (table_end - table_start) << PAGE_SHIFT);
730 /* reserve kernel */
731 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
732 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
734 /*
735 * reserve physical page 0 - it's a special BIOS page on many boxes,
736 * enabling clean reboots, SMP operation, laptop functions.
737 */
738 reserve_bootmem_generic(0, PAGE_SIZE);
740 /* reserve ebda region */
741 reserve_ebda_region();
742 #endif
744 #ifdef CONFIG_SMP
745 /*
746 * But first pinch a few for the stack/trampoline stuff
747 * FIXME: Don't need the extra page at 4K, but need to fix
748 * trampoline before removing it. (see the GDT stuff)
749 */
750 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
752 /* Reserve SMP trampoline */
753 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
754 #endif
756 #ifdef CONFIG_ACPI_SLEEP
757 /*
758 * Reserve low memory region for sleep support.
759 */
760 acpi_reserve_bootmem();
761 #endif
762 #ifdef CONFIG_XEN
763 #ifdef CONFIG_BLK_DEV_INITRD
764 if (xen_start_info->mod_start) {
765 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
766 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
767 initrd_start = INITRD_START + PAGE_OFFSET;
768 initrd_end = initrd_start+INITRD_SIZE;
769 initrd_below_start_ok = 1;
770 } else {
771 printk(KERN_ERR "initrd extends beyond end of memory "
772 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
773 (unsigned long)(INITRD_START + INITRD_SIZE),
774 (unsigned long)(end_pfn << PAGE_SHIFT));
775 initrd_start = 0;
776 }
777 }
778 #endif
779 #else /* CONFIG_XEN */
780 #ifdef CONFIG_BLK_DEV_INITRD
781 if (LOADER_TYPE && INITRD_START) {
782 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
783 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
784 initrd_start =
785 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
786 initrd_end = initrd_start+INITRD_SIZE;
787 }
788 else {
789 printk(KERN_ERR "initrd extends beyond end of memory "
790 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
791 (unsigned long)(INITRD_START + INITRD_SIZE),
792 (unsigned long)(end_pfn << PAGE_SHIFT));
793 initrd_start = 0;
794 }
795 }
796 #endif
797 #endif /* !CONFIG_XEN */
798 #ifdef CONFIG_KEXEC
799 if (crashk_res.start != crashk_res.end) {
800 reserve_bootmem(crashk_res.start,
801 crashk_res.end - crashk_res.start + 1);
802 }
803 #endif
805 paging_init();
806 #ifdef CONFIG_X86_LOCAL_APIC
807 /*
808 * Find and reserve possible boot-time SMP configuration:
809 */
810 find_smp_config();
811 #endif
812 #ifdef CONFIG_XEN
813 {
814 int i, j, k, fpp;
815 unsigned long va;
817 /* 'Initial mapping' of initrd must be destroyed. */
818 for (va = xen_start_info->mod_start;
819 va < (xen_start_info->mod_start+xen_start_info->mod_len);
820 va += PAGE_SIZE) {
821 HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
822 }
824 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
825 /* Make sure we have a large enough P->M table. */
826 phys_to_machine_mapping = alloc_bootmem(
827 end_pfn * sizeof(unsigned long));
828 memset(phys_to_machine_mapping, ~0,
829 end_pfn * sizeof(unsigned long));
830 memcpy(phys_to_machine_mapping,
831 (unsigned long *)xen_start_info->mfn_list,
832 xen_start_info->nr_pages * sizeof(unsigned long));
833 free_bootmem(
834 __pa(xen_start_info->mfn_list),
835 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
836 sizeof(unsigned long))));
838 /* Destroyed 'initial mapping' of old p2m table. */
839 for (va = xen_start_info->mfn_list;
840 va < (xen_start_info->mfn_list +
841 (xen_start_info->nr_pages*sizeof(unsigned long)));
842 va += PAGE_SIZE) {
843 HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
844 }
846 /*
847 * Initialise the list of the frames that specify the
848 * list of frames that make up the p2m table. Used by
849 * save/restore.
850 */
851 pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
852 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
853 virt_to_mfn(pfn_to_mfn_frame_list_list);
855 fpp = PAGE_SIZE/sizeof(unsigned long);
856 for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
857 if ((j % fpp) == 0) {
858 k++;
859 BUG_ON(k>=fpp);
860 pfn_to_mfn_frame_list[k] =
861 alloc_bootmem(PAGE_SIZE);
862 pfn_to_mfn_frame_list_list[k] =
863 virt_to_mfn(pfn_to_mfn_frame_list[k]);
864 j=0;
865 }
866 pfn_to_mfn_frame_list[k][j] =
867 virt_to_mfn(&phys_to_machine_mapping[i]);
868 }
869 HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
870 }
872 }
874 if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
875 {
876 acpi_disabled = 1;
877 #ifdef CONFIG_ACPI
878 acpi_ht = 0;
879 #endif
880 }
881 #endif
883 #ifndef CONFIG_XEN
884 check_ioapic();
885 #endif
887 zap_low_mappings(0);
889 #ifdef CONFIG_ACPI
890 /*
891 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
892 * Call this early for SRAT node setup.
893 */
894 acpi_boot_table_init();
896 /*
897 * Read APIC and some other early information from ACPI tables.
898 */
899 acpi_boot_init();
900 #endif
902 init_cpu_to_node();
904 #ifdef CONFIG_X86_LOCAL_APIC
905 /*
906 * get boot-time SMP configuration:
907 */
908 if (smp_found_config)
909 get_smp_config();
910 #ifndef CONFIG_XEN
911 init_apic_mappings();
912 #endif
913 #endif
914 #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
915 prefill_possible_map();
916 #endif
918 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
919 /*
920 * Request address space for all standard RAM and ROM resources
921 * and also for regions reported as reserved by the e820.
922 */
923 probe_roms();
924 e820_reserve_resources();
925 #endif
927 request_resource(&iomem_resource, &video_ram_resource);
929 {
930 unsigned i;
931 /* request I/O space for devices used on all i[345]86 PCs */
932 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
933 request_resource(&ioport_resource, &standard_io_resources[i]);
934 }
936 e820_setup_gap();
938 #ifdef CONFIG_GART_IOMMU
939 iommu_hole_init();
940 #endif
942 #ifdef CONFIG_XEN
943 {
944 physdev_op_t op;
946 op.cmd = PHYSDEVOP_SET_IOPL;
947 op.u.set_iopl.iopl = 1;
948 HYPERVISOR_physdev_op(&op);
950 if (xen_start_info->flags & SIF_INITDOMAIN) {
951 if (!(xen_start_info->flags & SIF_PRIVILEGED))
952 panic("Xen granted us console access "
953 "but not privileged status");
955 #ifdef CONFIG_VT
956 #if defined(CONFIG_VGA_CONSOLE)
957 conswitchp = &vga_con;
958 #elif defined(CONFIG_DUMMY_CONSOLE)
959 conswitchp = &dummy_con;
960 #endif
961 #endif
962 } else {
963 extern int console_use_vt;
964 console_use_vt = 0;
965 }
966 }
967 #else /* CONFIG_XEN */
969 #ifdef CONFIG_VT
970 #if defined(CONFIG_VGA_CONSOLE)
971 conswitchp = &vga_con;
972 #elif defined(CONFIG_DUMMY_CONSOLE)
973 conswitchp = &dummy_con;
974 #endif
975 #endif
977 #endif /* !CONFIG_XEN */
978 }
980 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
981 {
982 unsigned int *v;
984 if (c->extended_cpuid_level < 0x80000004)
985 return 0;
987 v = (unsigned int *) c->x86_model_id;
988 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
989 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
990 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
991 c->x86_model_id[48] = 0;
992 return 1;
993 }
996 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
997 {
998 unsigned int n, dummy, eax, ebx, ecx, edx;
1000 n = c->extended_cpuid_level;
1002 if (n >= 0x80000005) {
1003 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
1004 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
1005 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
1006 c->x86_cache_size=(ecx>>24)+(edx>>24);
1007 /* On K8 L1 TLB is inclusive, so don't count it */
1008 c->x86_tlbsize = 0;
1011 if (n >= 0x80000006) {
1012 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1013 ecx = cpuid_ecx(0x80000006);
1014 c->x86_cache_size = ecx >> 16;
1015 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
1017 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1018 c->x86_cache_size, ecx & 0xFF);
1021 if (n >= 0x80000007)
1022 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1023 if (n >= 0x80000008) {
1024 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1025 c->x86_virt_bits = (eax >> 8) & 0xff;
1026 c->x86_phys_bits = eax & 0xff;
1030 #ifdef CONFIG_NUMA
1031 static int nearby_node(int apicid)
1033 int i;
1034 for (i = apicid - 1; i >= 0; i--) {
1035 int node = apicid_to_node[i];
1036 if (node != NUMA_NO_NODE && node_online(node))
1037 return node;
1039 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
1040 int node = apicid_to_node[i];
1041 if (node != NUMA_NO_NODE && node_online(node))
1042 return node;
1044 return first_node(node_online_map); /* Shouldn't happen */
1046 #endif
1048 /*
1049 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
1050 * Assumes number of cores is a power of two.
1051 */
1052 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
1054 #ifdef CONFIG_SMP
1055 int cpu = smp_processor_id();
1056 unsigned bits;
1057 #ifdef CONFIG_NUMA
1058 int node = 0;
1059 unsigned apicid = phys_proc_id[cpu];
1060 #endif
1062 bits = 0;
1063 while ((1 << bits) < c->x86_max_cores)
1064 bits++;
1066 /* Low order bits define the core id (index of core in socket) */
1067 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
1068 /* Convert the APIC ID into the socket ID */
1069 phys_proc_id[cpu] >>= bits;
1071 #ifdef CONFIG_NUMA
1072 node = phys_proc_id[cpu];
1073 if (apicid_to_node[apicid] != NUMA_NO_NODE)
1074 node = apicid_to_node[apicid];
1075 if (!node_online(node)) {
1076 /* Two possibilities here:
1077 - The CPU is missing memory and no node was created.
1078 In that case try picking one from a nearby CPU
1079 - The APIC IDs differ from the HyperTransport node IDs
1080 which the K8 northbridge parsing fills in.
1081 Assume they are all increased by a constant offset,
1082 but in the same order as the HT nodeids.
1083 If that doesn't result in a usable node fall back to the
1084 path for the previous case. */
1085 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
1086 if (ht_nodeid >= 0 &&
1087 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
1088 node = apicid_to_node[ht_nodeid];
1089 /* Pick a nearby node */
1090 if (!node_online(node))
1091 node = nearby_node(apicid);
1093 numa_set_node(cpu, node);
1095 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
1096 cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
1097 #endif
1098 #endif
1101 static int __init init_amd(struct cpuinfo_x86 *c)
1103 int r;
1105 #ifdef CONFIG_SMP
1106 unsigned long value;
1108 /*
1109 * Disable TLB flush filter by setting HWCR.FFDIS on K8
1110 * bit 6 of msr C001_0015
1112 * Errata 63 for SH-B3 steppings
1113 * Errata 122 for all steppings (F+ have it disabled by default)
1114 */
1115 if (c->x86 == 15) {
1116 rdmsrl(MSR_K8_HWCR, value);
1117 value |= 1 << 6;
1118 wrmsrl(MSR_K8_HWCR, value);
1120 #endif
1122 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1123 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1124 clear_bit(0*32+31, &c->x86_capability);
1126 r = get_model_name(c);
1127 if (!r) {
1128 switch (c->x86) {
1129 case 15:
1130 /* Should distinguish Models here, but this is only
1131 a fallback anyways. */
1132 strcpy(c->x86_model_id, "Hammer");
1133 break;
1136 display_cacheinfo(c);
1138 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
1139 if (c->x86_power & (1<<8))
1140 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1142 if (c->extended_cpuid_level >= 0x80000008) {
1143 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
1144 if (c->x86_max_cores & (c->x86_max_cores - 1))
1145 c->x86_max_cores = 1;
1147 amd_detect_cmp(c);
1150 return r;
1153 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1155 #ifdef CONFIG_SMP
1156 u32 eax, ebx, ecx, edx;
1157 int index_msb, core_bits;
1158 int cpu = smp_processor_id();
1160 cpuid(1, &eax, &ebx, &ecx, &edx);
1162 c->apicid = phys_pkg_id(0);
1164 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1165 return;
1167 smp_num_siblings = (ebx & 0xff0000) >> 16;
1169 if (smp_num_siblings == 1) {
1170 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1171 } else if (smp_num_siblings > 1 ) {
1173 if (smp_num_siblings > NR_CPUS) {
1174 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1175 smp_num_siblings = 1;
1176 return;
1179 index_msb = get_count_order(smp_num_siblings);
1180 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1182 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1183 phys_proc_id[cpu]);
1185 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1187 index_msb = get_count_order(smp_num_siblings) ;
1189 core_bits = get_count_order(c->x86_max_cores);
1191 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
1192 ((1 << core_bits) - 1);
1194 if (c->x86_max_cores > 1)
1195 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1196 cpu_core_id[cpu]);
1198 #endif
1201 /*
1202 * find out the number of processor cores on the die
1203 */
1204 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1206 unsigned int eax;
1208 if (c->cpuid_level < 4)
1209 return 1;
1211 __asm__("cpuid"
1212 : "=a" (eax)
1213 : "0" (4), "c" (0)
1214 : "bx", "dx");
1216 if (eax & 0x1f)
1217 return ((eax >> 26) + 1);
1218 else
1219 return 1;
1222 static void srat_detect_node(void)
1224 #ifdef CONFIG_NUMA
1225 unsigned node;
1226 int cpu = smp_processor_id();
1228 /* Don't do the funky fallback heuristics the AMD version employs
1229 for now. */
1230 node = apicid_to_node[hard_smp_processor_id()];
1231 if (node == NUMA_NO_NODE)
1232 node = 0;
1233 numa_set_node(cpu, node);
1235 if (acpi_numa > 0)
1236 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
1237 #endif
1240 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1242 /* Cache sizes */
1243 unsigned n;
1245 init_intel_cacheinfo(c);
1246 n = c->extended_cpuid_level;
1247 if (n >= 0x80000008) {
1248 unsigned eax = cpuid_eax(0x80000008);
1249 c->x86_virt_bits = (eax >> 8) & 0xff;
1250 c->x86_phys_bits = eax & 0xff;
1251 /* CPUID workaround for Intel 0F34 CPU */
1252 if (c->x86_vendor == X86_VENDOR_INTEL &&
1253 c->x86 == 0xF && c->x86_model == 0x3 &&
1254 c->x86_mask == 0x4)
1255 c->x86_phys_bits = 36;
1258 if (c->x86 == 15)
1259 c->x86_cache_alignment = c->x86_clflush_size * 2;
1260 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1261 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1262 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1263 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1264 c->x86_max_cores = intel_num_cpu_cores(c);
1266 srat_detect_node();
1269 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1271 char *v = c->x86_vendor_id;
1273 if (!strcmp(v, "AuthenticAMD"))
1274 c->x86_vendor = X86_VENDOR_AMD;
1275 else if (!strcmp(v, "GenuineIntel"))
1276 c->x86_vendor = X86_VENDOR_INTEL;
1277 else
1278 c->x86_vendor = X86_VENDOR_UNKNOWN;
1281 struct cpu_model_info {
1282 int vendor;
1283 int family;
1284 char *model_names[16];
1285 };
1287 /* Do some early cpuid on the boot CPU to get some parameter that are
1288 needed before check_bugs. Everything advanced is in identify_cpu
1289 below. */
1290 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1292 u32 tfms;
1294 c->loops_per_jiffy = loops_per_jiffy;
1295 c->x86_cache_size = -1;
1296 c->x86_vendor = X86_VENDOR_UNKNOWN;
1297 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1298 c->x86_vendor_id[0] = '\0'; /* Unset */
1299 c->x86_model_id[0] = '\0'; /* Unset */
1300 c->x86_clflush_size = 64;
1301 c->x86_cache_alignment = c->x86_clflush_size;
1302 c->x86_max_cores = 1;
1303 c->extended_cpuid_level = 0;
1304 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1306 /* Get vendor name */
1307 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1308 (unsigned int *)&c->x86_vendor_id[0],
1309 (unsigned int *)&c->x86_vendor_id[8],
1310 (unsigned int *)&c->x86_vendor_id[4]);
1312 get_cpu_vendor(c);
1314 /* Initialize the standard set of capabilities */
1315 /* Note that the vendor-specific code below might override */
1317 /* Intel-defined flags: level 0x00000001 */
1318 if (c->cpuid_level >= 0x00000001) {
1319 __u32 misc;
1320 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1321 &c->x86_capability[0]);
1322 c->x86 = (tfms >> 8) & 0xf;
1323 c->x86_model = (tfms >> 4) & 0xf;
1324 c->x86_mask = tfms & 0xf;
1325 if (c->x86 == 0xf)
1326 c->x86 += (tfms >> 20) & 0xff;
1327 if (c->x86 >= 0x6)
1328 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1329 if (c->x86_capability[0] & (1<<19))
1330 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1331 } else {
1332 /* Have CPUID level 0 only - unheard of */
1333 c->x86 = 4;
1336 #ifdef CONFIG_SMP
1337 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1338 #endif
1341 /*
1342 * This does the hard work of actually picking apart the CPU stuff...
1343 */
1344 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1346 int i;
1347 u32 xlvl;
1349 early_identify_cpu(c);
1351 /* AMD-defined flags: level 0x80000001 */
1352 xlvl = cpuid_eax(0x80000000);
1353 c->extended_cpuid_level = xlvl;
1354 if ((xlvl & 0xffff0000) == 0x80000000) {
1355 if (xlvl >= 0x80000001) {
1356 c->x86_capability[1] = cpuid_edx(0x80000001);
1357 c->x86_capability[6] = cpuid_ecx(0x80000001);
1359 if (xlvl >= 0x80000004)
1360 get_model_name(c); /* Default name */
1363 /* Transmeta-defined flags: level 0x80860001 */
1364 xlvl = cpuid_eax(0x80860000);
1365 if ((xlvl & 0xffff0000) == 0x80860000) {
1366 /* Don't set x86_cpuid_level here for now to not confuse. */
1367 if (xlvl >= 0x80860001)
1368 c->x86_capability[2] = cpuid_edx(0x80860001);
1371 /*
1372 * Vendor-specific initialization. In this section we
1373 * canonicalize the feature flags, meaning if there are
1374 * features a certain CPU supports which CPUID doesn't
1375 * tell us, CPUID claiming incorrect flags, or other bugs,
1376 * we handle them here.
1378 * At the end of this section, c->x86_capability better
1379 * indicate the features this CPU genuinely supports!
1380 */
1381 switch (c->x86_vendor) {
1382 case X86_VENDOR_AMD:
1383 init_amd(c);
1384 break;
1386 case X86_VENDOR_INTEL:
1387 init_intel(c);
1388 break;
1390 case X86_VENDOR_UNKNOWN:
1391 default:
1392 display_cacheinfo(c);
1393 break;
1396 select_idle_routine(c);
1397 detect_ht(c);
1399 /*
1400 * On SMP, boot_cpu_data holds the common feature set between
1401 * all CPUs; so make sure that we indicate which features are
1402 * common between the CPUs. The first time this routine gets
1403 * executed, c == &boot_cpu_data.
1404 */
1405 if (c != &boot_cpu_data) {
1406 /* AND the already accumulated flags with these */
1407 for (i = 0 ; i < NCAPINTS ; i++)
1408 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1411 #ifdef CONFIG_X86_MCE
1412 mcheck_init(c);
1413 #endif
1414 if (c == &boot_cpu_data)
1415 mtrr_bp_init();
1416 else
1417 mtrr_ap_init();
1418 #ifdef CONFIG_NUMA
1419 numa_add_cpu(smp_processor_id());
1420 #endif
1424 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1426 if (c->x86_model_id[0])
1427 printk("%s", c->x86_model_id);
1429 if (c->x86_mask || c->cpuid_level >= 0)
1430 printk(" stepping %02x\n", c->x86_mask);
1431 else
1432 printk("\n");
1435 /*
1436 * Get CPU information for use by the procfs.
1437 */
1439 static int show_cpuinfo(struct seq_file *m, void *v)
1441 struct cpuinfo_x86 *c = v;
1443 /*
1444 * These flag bits must match the definitions in <asm/cpufeature.h>.
1445 * NULL means this bit is undefined or reserved; either way it doesn't
1446 * have meaning as far as Linux is concerned. Note that it's important
1447 * to realize there is a difference between this table and CPUID -- if
1448 * applications want to get the raw CPUID data, they should access
1449 * /dev/cpu/<cpu_nr>/cpuid instead.
1450 */
1451 static char *x86_cap_flags[] = {
1452 /* Intel-defined */
1453 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1454 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1455 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1456 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1458 /* AMD-defined */
1459 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1460 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1461 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1462 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1464 /* Transmeta-defined */
1465 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1466 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1467 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1468 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1470 /* Other (Linux-defined) */
1471 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1472 "constant_tsc", NULL, NULL,
1473 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1474 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1475 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1477 /* Intel-defined (#2) */
1478 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1479 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1481 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1483 /* VIA/Cyrix/Centaur-defined */
1484 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1485 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1486 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1487 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1489 /* AMD-defined (#2) */
1490 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1491 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1492 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1493 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1494 };
1495 static char *x86_power_flags[] = {
1496 "ts", /* temperature sensor */
1497 "fid", /* frequency id control */
1498 "vid", /* voltage id control */
1499 "ttp", /* thermal trip */
1500 "tm",
1501 "stc",
1502 NULL,
1503 /* nothing */ /* constant_tsc - moved to flags */
1504 };
1507 #ifdef CONFIG_SMP
1508 if (!cpu_online(c-cpu_data))
1509 return 0;
1510 #endif
1512 seq_printf(m,"processor\t: %u\n"
1513 "vendor_id\t: %s\n"
1514 "cpu family\t: %d\n"
1515 "model\t\t: %d\n"
1516 "model name\t: %s\n",
1517 (unsigned)(c-cpu_data),
1518 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1519 c->x86,
1520 (int)c->x86_model,
1521 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1523 if (c->x86_mask || c->cpuid_level >= 0)
1524 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1525 else
1526 seq_printf(m, "stepping\t: unknown\n");
1528 if (cpu_has(c,X86_FEATURE_TSC)) {
1529 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1530 if (!freq)
1531 freq = cpu_khz;
1532 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1533 freq / 1000, (freq % 1000));
1536 /* Cache size */
1537 if (c->x86_cache_size >= 0)
1538 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1540 #ifdef CONFIG_SMP
1541 if (smp_num_siblings * c->x86_max_cores > 1) {
1542 int cpu = c - cpu_data;
1543 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1544 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1545 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1546 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1548 #endif
1550 seq_printf(m,
1551 "fpu\t\t: yes\n"
1552 "fpu_exception\t: yes\n"
1553 "cpuid level\t: %d\n"
1554 "wp\t\t: yes\n"
1555 "flags\t\t:",
1556 c->cpuid_level);
1559 int i;
1560 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1561 if ( test_bit(i, &c->x86_capability) &&
1562 x86_cap_flags[i] != NULL )
1563 seq_printf(m, " %s", x86_cap_flags[i]);
1566 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1567 c->loops_per_jiffy/(500000/HZ),
1568 (c->loops_per_jiffy/(5000/HZ)) % 100);
1570 if (c->x86_tlbsize > 0)
1571 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1572 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1573 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1575 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1576 c->x86_phys_bits, c->x86_virt_bits);
1578 seq_printf(m, "power management:");
1580 unsigned i;
1581 for (i = 0; i < 32; i++)
1582 if (c->x86_power & (1 << i)) {
1583 if (i < ARRAY_SIZE(x86_power_flags) &&
1584 x86_power_flags[i])
1585 seq_printf(m, "%s%s",
1586 x86_power_flags[i][0]?" ":"",
1587 x86_power_flags[i]);
1588 else
1589 seq_printf(m, " [%d]", i);
1593 seq_printf(m, "\n\n");
1595 return 0;
1598 static void *c_start(struct seq_file *m, loff_t *pos)
1600 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1603 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1605 ++*pos;
1606 return c_start(m, pos);
1609 static void c_stop(struct seq_file *m, void *v)
1613 struct seq_operations cpuinfo_op = {
1614 .start =c_start,
1615 .next = c_next,
1616 .stop = c_stop,
1617 .show = show_cpuinfo,
1618 };
1620 static int __init run_dmi_scan(void)
1622 dmi_scan_machine();
1623 return 0;
1625 core_initcall(run_dmi_scan);