ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c @ 7475:3eee5653f08b

Fix allocation of phys_to_machine_mapping array during
x86/64 linux boot. The original builder-provided array
must be made read-only in the initial mapping.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Oct 21 16:22:09 2005 +0100 (2005-10-21)
parents 41489189b19e
children ab0addb6dcbb
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
43 #include <asm/mtrr.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/smp.h>
48 #include <asm/msr.h>
49 #include <asm/desc.h>
50 #include <video/edid.h>
51 #include <asm/e820.h>
52 #include <asm/dma.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
56 #include <asm/proto.h>
57 #include <asm/setup.h>
58 #include <asm/mach_apic.h>
59 #include <asm/numa.h>
60 #ifdef CONFIG_XEN
61 #include <linux/percpu.h>
62 #include <asm-xen/xen-public/physdev.h>
63 #include "setup_arch_pre.h"
64 #include <asm/hypervisor.h>
65 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
66 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
67 #define end_pfn_map end_pfn
68 #include <asm/mach-xen/setup_arch_post.h>
70 extern unsigned long start_pfn;
71 extern struct edid_info edid_info;
73 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
74 EXPORT_SYMBOL(HYPERVISOR_shared_info);
76 /* Allows setting of maximum possible memory size */
77 unsigned long xen_override_max_pfn;
79 unsigned long *phys_to_machine_mapping;
80 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
82 EXPORT_SYMBOL(phys_to_machine_mapping);
84 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
85 DEFINE_PER_CPU(int, nr_multicall_ents);
87 /* Raw start-of-day parameters from the hypervisor. */
88 start_info_t *xen_start_info;
89 EXPORT_SYMBOL(xen_start_info);
90 #endif
92 /*
93 * Machine setup..
94 */
96 struct cpuinfo_x86 boot_cpu_data;
98 unsigned long mmu_cr4_features;
100 int acpi_disabled;
101 EXPORT_SYMBOL(acpi_disabled);
102 #ifdef CONFIG_ACPI_BOOT
103 extern int __initdata acpi_ht;
104 extern acpi_interrupt_flags acpi_sci_flags;
105 int __initdata acpi_force = 0;
106 #endif
108 int acpi_numa __initdata;
110 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
111 int bootloader_type;
113 unsigned long saved_video_mode;
115 #ifdef CONFIG_SWIOTLB
116 int swiotlb;
117 EXPORT_SYMBOL(swiotlb);
118 #endif
120 /*
121 * Setup options
122 */
123 struct drive_info_struct { char dummy[32]; } drive_info;
124 struct screen_info screen_info;
125 struct sys_desc_table_struct {
126 unsigned short length;
127 unsigned char table[0];
128 };
130 struct edid_info edid_info;
131 struct e820map e820;
133 extern int root_mountflags;
134 extern char _text, _etext, _edata, _end;
136 char command_line[COMMAND_LINE_SIZE];
138 struct resource standard_io_resources[] = {
139 { .name = "dma1", .start = 0x00, .end = 0x1f,
140 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
141 { .name = "pic1", .start = 0x20, .end = 0x21,
142 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
143 { .name = "timer0", .start = 0x40, .end = 0x43,
144 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
145 { .name = "timer1", .start = 0x50, .end = 0x53,
146 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
147 { .name = "keyboard", .start = 0x60, .end = 0x6f,
148 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
149 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
150 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
151 { .name = "pic2", .start = 0xa0, .end = 0xa1,
152 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
153 { .name = "dma2", .start = 0xc0, .end = 0xdf,
154 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
155 { .name = "fpu", .start = 0xf0, .end = 0xff,
156 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
157 };
159 #define STANDARD_IO_RESOURCES \
160 (sizeof standard_io_resources / sizeof standard_io_resources[0])
162 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
164 struct resource data_resource = {
165 .name = "Kernel data",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_RAM,
169 };
170 struct resource code_resource = {
171 .name = "Kernel code",
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_RAM,
175 };
177 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
179 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
180 static struct resource system_rom_resource = {
181 .name = "System ROM",
182 .start = 0xf0000,
183 .end = 0xfffff,
184 .flags = IORESOURCE_ROM,
185 };
187 static struct resource extension_rom_resource = {
188 .name = "Extension ROM",
189 .start = 0xe0000,
190 .end = 0xeffff,
191 .flags = IORESOURCE_ROM,
192 };
194 static struct resource adapter_rom_resources[] = {
195 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
196 .flags = IORESOURCE_ROM },
197 { .name = "Adapter ROM", .start = 0, .end = 0,
198 .flags = IORESOURCE_ROM },
199 { .name = "Adapter ROM", .start = 0, .end = 0,
200 .flags = IORESOURCE_ROM },
201 { .name = "Adapter ROM", .start = 0, .end = 0,
202 .flags = IORESOURCE_ROM },
203 { .name = "Adapter ROM", .start = 0, .end = 0,
204 .flags = IORESOURCE_ROM },
205 { .name = "Adapter ROM", .start = 0, .end = 0,
206 .flags = IORESOURCE_ROM }
207 };
208 #endif
210 #define ADAPTER_ROM_RESOURCES \
211 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
213 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
214 static struct resource video_rom_resource = {
215 .name = "Video ROM",
216 .start = 0xc0000,
217 .end = 0xc7fff,
218 .flags = IORESOURCE_ROM,
219 };
220 #endif
222 static struct resource video_ram_resource = {
223 .name = "Video RAM area",
224 .start = 0xa0000,
225 .end = 0xbffff,
226 .flags = IORESOURCE_RAM,
227 };
229 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
230 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
232 static int __init romchecksum(unsigned char *rom, unsigned long length)
233 {
234 unsigned char *p, sum = 0;
236 for (p = rom; p < rom + length; p++)
237 sum += *p;
238 return sum == 0;
239 }
241 static void __init probe_roms(void)
242 {
243 unsigned long start, length, upper;
244 unsigned char *rom;
245 int i;
247 /* video rom */
248 upper = adapter_rom_resources[0].start;
249 for (start = video_rom_resource.start; start < upper; start += 2048) {
250 rom = isa_bus_to_virt(start);
251 if (!romsignature(rom))
252 continue;
254 video_rom_resource.start = start;
256 /* 0 < length <= 0x7f * 512, historically */
257 length = rom[2] * 512;
259 /* if checksum okay, trust length byte */
260 if (length && romchecksum(rom, length))
261 video_rom_resource.end = start + length - 1;
263 request_resource(&iomem_resource, &video_rom_resource);
264 break;
265 }
267 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
268 if (start < upper)
269 start = upper;
271 /* system rom */
272 request_resource(&iomem_resource, &system_rom_resource);
273 upper = system_rom_resource.start;
275 /* check for extension rom (ignore length byte!) */
276 rom = isa_bus_to_virt(extension_rom_resource.start);
277 if (romsignature(rom)) {
278 length = extension_rom_resource.end - extension_rom_resource.start + 1;
279 if (romchecksum(rom, length)) {
280 request_resource(&iomem_resource, &extension_rom_resource);
281 upper = extension_rom_resource.start;
282 }
283 }
285 /* check for adapter roms on 2k boundaries */
286 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
287 rom = isa_bus_to_virt(start);
288 if (!romsignature(rom))
289 continue;
291 /* 0 < length <= 0x7f * 512, historically */
292 length = rom[2] * 512;
294 /* but accept any length that fits if checksum okay */
295 if (!length || start + length > upper || !romchecksum(rom, length))
296 continue;
298 adapter_rom_resources[i].start = start;
299 adapter_rom_resources[i].end = start + length - 1;
300 request_resource(&iomem_resource, &adapter_rom_resources[i]);
302 start = adapter_rom_resources[i++].end & ~2047UL;
303 }
304 }
305 #endif
308 static __init void parse_cmdline_early (char ** cmdline_p)
309 {
310 char c = ' ', *to = command_line, *from = COMMAND_LINE;
311 int len = 0;
313 /* Save unparsed command line copy for /proc/cmdline */
314 #ifdef CONFIG_XEN
315 int max_cmdline;
317 if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
318 max_cmdline = COMMAND_LINE_SIZE;
319 memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
320 saved_command_line[max_cmdline-1] = '\0';
321 #else
322 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
323 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
324 #endif
326 for (;;) {
327 if (c != ' ')
328 goto next_char;
330 #ifdef CONFIG_SMP
331 /*
332 * If the BIOS enumerates physical processors before logical,
333 * maxcpus=N at enumeration-time can be used to disable HT.
334 */
335 else if (!memcmp(from, "maxcpus=", 8)) {
336 extern unsigned int maxcpus;
338 maxcpus = simple_strtoul(from + 8, NULL, 0);
339 }
340 #endif
341 #ifdef CONFIG_ACPI_BOOT
342 /* "acpi=off" disables both ACPI table parsing and interpreter init */
343 if (!memcmp(from, "acpi=off", 8))
344 disable_acpi();
346 if (!memcmp(from, "acpi=force", 10)) {
347 /* add later when we do DMI horrors: */
348 acpi_force = 1;
349 acpi_disabled = 0;
350 }
352 /* acpi=ht just means: do ACPI MADT parsing
353 at bootup, but don't enable the full ACPI interpreter */
354 if (!memcmp(from, "acpi=ht", 7)) {
355 if (!acpi_force)
356 disable_acpi();
357 acpi_ht = 1;
358 }
359 else if (!memcmp(from, "pci=noacpi", 10))
360 acpi_disable_pci();
361 else if (!memcmp(from, "acpi=noirq", 10))
362 acpi_noirq_set();
364 else if (!memcmp(from, "acpi_sci=edge", 13))
365 acpi_sci_flags.trigger = 1;
366 else if (!memcmp(from, "acpi_sci=level", 14))
367 acpi_sci_flags.trigger = 3;
368 else if (!memcmp(from, "acpi_sci=high", 13))
369 acpi_sci_flags.polarity = 1;
370 else if (!memcmp(from, "acpi_sci=low", 12))
371 acpi_sci_flags.polarity = 3;
373 /* acpi=strict disables out-of-spec workarounds */
374 else if (!memcmp(from, "acpi=strict", 11)) {
375 acpi_strict = 1;
376 }
377 #ifdef CONFIG_X86_IO_APIC
378 else if (!memcmp(from, "acpi_skip_timer_override", 24))
379 acpi_skip_timer_override = 1;
380 #endif
381 #endif
382 #ifndef CONFIG_XEN
383 if (!memcmp(from, "nolapic", 7) ||
384 !memcmp(from, "disableapic", 11))
385 disable_apic = 1;
387 if (!memcmp(from, "noapic", 6))
388 skip_ioapic_setup = 1;
390 if (!memcmp(from, "apic", 4)) {
391 skip_ioapic_setup = 0;
392 ioapic_force = 1;
393 }
394 #endif
395 if (!memcmp(from, "mem=", 4))
396 parse_memopt(from+4, &from);
398 #ifdef CONFIG_DISCONTIGMEM
399 if (!memcmp(from, "numa=", 5))
400 numa_setup(from+5);
401 #endif
403 #ifdef CONFIG_GART_IOMMU
404 if (!memcmp(from,"iommu=",6)) {
405 iommu_setup(from+6);
406 }
407 #endif
409 if (!memcmp(from,"oops=panic", 10))
410 panic_on_oops = 1;
412 if (!memcmp(from, "noexec=", 7))
413 nonx_setup(from + 7);
415 next_char:
416 c = *(from++);
417 if (!c)
418 break;
419 if (COMMAND_LINE_SIZE <= ++len)
420 break;
421 *(to++) = c;
422 }
423 *to = '\0';
424 *cmdline_p = command_line;
425 }
427 #ifndef CONFIG_DISCONTIGMEM
428 #ifdef CONFIG_XEN
429 static void __init contig_initmem_init(void)
430 {
431 unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
432 free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
433 reserve_bootmem(HIGH_MEMORY,
434 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
435 - HIGH_MEMORY);
436 }
437 #else
438 static void __init contig_initmem_init(void)
439 {
440 unsigned long bootmap_size, bootmap;
441 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
442 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
443 if (bootmap == -1L)
444 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
445 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
446 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
447 reserve_bootmem(bootmap, bootmap_size);
448 }
449 #endif /* !CONFIG_XEN */
450 #endif
452 /* Use inline assembly to define this because the nops are defined
453 as inline assembly strings in the include files and we cannot
454 get them easily into strings. */
455 asm("\t.data\nk8nops: "
456 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
457 K8_NOP7 K8_NOP8);
459 extern unsigned char k8nops[];
460 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
461 NULL,
462 k8nops,
463 k8nops + 1,
464 k8nops + 1 + 2,
465 k8nops + 1 + 2 + 3,
466 k8nops + 1 + 2 + 3 + 4,
467 k8nops + 1 + 2 + 3 + 4 + 5,
468 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
469 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
470 };
472 /* Replace instructions with better alternatives for this CPU type.
474 This runs before SMP is initialized to avoid SMP problems with
475 self modifying code. This implies that assymetric systems where
476 APs have less capabilities than the boot processor are not handled.
477 In this case boot with "noreplacement". */
478 void apply_alternatives(void *start, void *end)
479 {
480 struct alt_instr *a;
481 int diff, i, k;
482 for (a = start; (void *)a < end; a++) {
483 if (!boot_cpu_has(a->cpuid))
484 continue;
486 BUG_ON(a->replacementlen > a->instrlen);
487 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
488 diff = a->instrlen - a->replacementlen;
490 /* Pad the rest with nops */
491 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
492 k = diff;
493 if (k > ASM_NOP_MAX)
494 k = ASM_NOP_MAX;
495 __inline_memcpy(a->instr + i, k8_nops[k], k);
496 }
497 }
498 }
500 static int no_replacement __initdata = 0;
502 void __init alternative_instructions(void)
503 {
504 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
505 if (no_replacement)
506 return;
507 apply_alternatives(__alt_instructions, __alt_instructions_end);
508 }
510 static int __init noreplacement_setup(char *s)
511 {
512 no_replacement = 1;
513 return 0;
514 }
516 __setup("noreplacement", noreplacement_setup);
518 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
519 struct edd edd;
520 #ifdef CONFIG_EDD_MODULE
521 EXPORT_SYMBOL(edd);
522 #endif
523 /**
524 * copy_edd() - Copy the BIOS EDD information
525 * from boot_params into a safe place.
526 *
527 */
528 static inline void copy_edd(void)
529 {
530 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
531 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
532 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
533 edd.edd_info_nr = EDD_NR;
534 }
535 #else
536 static inline void copy_edd(void)
537 {
538 }
539 #endif
541 #ifndef CONFIG_XEN
542 #define EBDA_ADDR_POINTER 0x40E
543 static void __init reserve_ebda_region(void)
544 {
545 unsigned int addr;
546 /**
547 * there is a real-mode segmented pointer pointing to the
548 * 4K EBDA area at 0x40E
549 */
550 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
551 addr <<= 4;
552 if (addr)
553 reserve_bootmem_generic(addr, PAGE_SIZE);
554 }
555 #endif
557 void __init setup_arch(char **cmdline_p)
558 {
559 unsigned long kernel_end;
561 #ifdef CONFIG_XEN
562 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
563 drive_info = DRIVE_INFO;
564 kernel_end = 0; /* dummy */
565 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
566 screen_info = SCREEN_INFO;
568 /* This is drawn from a dump from vgacon:startup in standard Linux. */
569 screen_info.orig_video_mode = 3;
570 screen_info.orig_video_isVGA = 1;
571 screen_info.orig_video_lines = 25;
572 screen_info.orig_video_cols = 80;
573 screen_info.orig_video_ega_bx = 3;
574 screen_info.orig_video_points = 16;
575 #endif
576 edid_info = EDID_INFO;
577 saved_video_mode = SAVED_VIDEO_MODE;
578 bootloader_type = LOADER_TYPE;
580 #ifdef CONFIG_BLK_DEV_RAM
581 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
582 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
583 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
586 #endif
588 HYPERVISOR_vm_assist(VMASST_CMD_enable,
589 VMASST_TYPE_writable_pagetables);
591 ARCH_SETUP
592 #else
593 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
594 drive_info = DRIVE_INFO;
595 screen_info = SCREEN_INFO;
596 edid_info = EDID_INFO;
597 saved_video_mode = SAVED_VIDEO_MODE;
598 bootloader_type = LOADER_TYPE;
600 #ifdef CONFIG_BLK_DEV_RAM
601 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
602 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
603 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
604 #endif
605 setup_memory_region();
606 copy_edd();
607 #endif /* !CONFIG_XEN */
609 if (!MOUNT_ROOT_RDONLY)
610 root_mountflags &= ~MS_RDONLY;
611 init_mm.start_code = (unsigned long) &_text;
612 init_mm.end_code = (unsigned long) &_etext;
613 init_mm.end_data = (unsigned long) &_edata;
614 #ifdef CONFIG_XEN
615 init_mm.brk = start_pfn << PAGE_SHIFT;
616 #else
617 init_mm.brk = (unsigned long) &_end;
619 code_resource.start = virt_to_phys(&_text);
620 code_resource.end = virt_to_phys(&_etext)-1;
621 data_resource.start = virt_to_phys(&_etext);
622 data_resource.end = virt_to_phys(&_edata)-1;
623 #endif
625 parse_cmdline_early(cmdline_p);
627 early_identify_cpu(&boot_cpu_data);
629 /*
630 * partially used pages are not usable - thus
631 * we are rounding upwards:
632 */
633 end_pfn = e820_end_of_ram();
635 check_efer();
637 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
639 #ifdef CONFIG_ACPI_NUMA
640 /*
641 * Parse SRAT to discover nodes.
642 */
643 acpi_numa_init();
644 #endif
646 #ifdef CONFIG_DISCONTIGMEM
647 numa_initmem_init(0, end_pfn);
648 #else
649 contig_initmem_init();
650 #endif
652 #ifndef CONFIG_XEN
653 /* Reserve direct mapping */
654 reserve_bootmem_generic(table_start << PAGE_SHIFT,
655 (table_end - table_start) << PAGE_SHIFT);
657 /* reserve kernel */
658 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
659 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
661 /*
662 * reserve physical page 0 - it's a special BIOS page on many boxes,
663 * enabling clean reboots, SMP operation, laptop functions.
664 */
665 reserve_bootmem_generic(0, PAGE_SIZE);
667 /* reserve ebda region */
668 reserve_ebda_region();
669 #endif
672 #ifdef CONFIG_SMP
673 /*
674 * But first pinch a few for the stack/trampoline stuff
675 * FIXME: Don't need the extra page at 4K, but need to fix
676 * trampoline before removing it. (see the GDT stuff)
677 */
678 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
680 /* Reserve SMP trampoline */
681 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
682 #endif
684 #ifdef CONFIG_ACPI_SLEEP
685 /*
686 * Reserve low memory region for sleep support.
687 */
688 acpi_reserve_bootmem();
689 #endif
690 #ifdef CONFIG_XEN
691 #ifdef CONFIG_BLK_DEV_INITRD
692 if (xen_start_info->mod_start) {
693 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
694 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
695 initrd_start = INITRD_START + PAGE_OFFSET;
696 initrd_end = initrd_start+INITRD_SIZE;
697 initrd_below_start_ok = 1;
698 } else {
699 printk(KERN_ERR "initrd extends beyond end of memory "
700 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
701 (unsigned long)(INITRD_START + INITRD_SIZE),
702 (unsigned long)(end_pfn << PAGE_SHIFT));
703 initrd_start = 0;
704 }
705 }
706 #endif
707 #else /* CONFIG_XEN */
708 #ifdef CONFIG_BLK_DEV_INITRD
709 if (LOADER_TYPE && INITRD_START) {
710 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
711 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
712 initrd_start =
713 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
714 initrd_end = initrd_start+INITRD_SIZE;
715 }
716 else {
717 printk(KERN_ERR "initrd extends beyond end of memory "
718 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
719 (unsigned long)(INITRD_START + INITRD_SIZE),
720 (unsigned long)(end_pfn << PAGE_SHIFT));
721 initrd_start = 0;
722 }
723 }
724 #endif
725 #endif /* !CONFIG_XEN */
726 paging_init();
727 #ifdef CONFIG_X86_LOCAL_APIC
728 /*
729 * Find and reserve possible boot-time SMP configuration:
730 */
731 find_smp_config();
732 #endif
733 #ifdef CONFIG_XEN
734 {
735 int i, j, k, fpp;
737 /* Make sure we have a large enough P->M table. */
738 phys_to_machine_mapping = alloc_bootmem(
739 end_pfn * sizeof(unsigned long));
740 memset(phys_to_machine_mapping, ~0,
741 end_pfn * sizeof(unsigned long));
742 memcpy(phys_to_machine_mapping,
743 (unsigned long *)xen_start_info->mfn_list,
744 xen_start_info->nr_pages * sizeof(unsigned long));
745 free_bootmem(
746 __pa(xen_start_info->mfn_list),
747 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
748 sizeof(unsigned long))));
749 make_pages_readonly((void *)xen_start_info->mfn_list,
750 PFN_UP(xen_start_info->nr_pages *
751 sizeof(unsigned long)));
753 /*
754 * Initialise the list of the frames that specify the list of
755 * frames that make up the p2m table. Used by save/restore
756 */
757 pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
758 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
759 virt_to_mfn(pfn_to_mfn_frame_list_list);
761 fpp = PAGE_SIZE/sizeof(unsigned long);
762 for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
763 {
764 if ( (j % fpp) == 0 )
765 {
766 k++;
767 BUG_ON(k>=fpp);
768 pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE);
769 pfn_to_mfn_frame_list_list[k] =
770 virt_to_mfn(pfn_to_mfn_frame_list[k]);
771 j=0;
772 }
773 pfn_to_mfn_frame_list[k][j] =
774 virt_to_mfn(&phys_to_machine_mapping[i]);
775 }
776 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
777 }
779 if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
780 {
781 acpi_disabled = 1;
782 #ifdef CONFIG_ACPI_BOOT
783 acpi_ht = 0;
784 #endif
785 }
786 #endif
788 #ifndef CONFIG_XEN
789 check_ioapic();
790 #endif
792 #ifdef CONFIG_ACPI_BOOT
793 /*
794 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
795 * Call this early for SRAT node setup.
796 */
797 acpi_boot_table_init();
799 /*
800 * Read APIC and some other early information from ACPI tables.
801 */
802 acpi_boot_init();
803 #endif
805 #ifdef CONFIG_X86_LOCAL_APIC
806 /*
807 * get boot-time SMP configuration:
808 */
809 if (smp_found_config)
810 get_smp_config();
811 #ifndef CONFIG_XEN
812 init_apic_mappings();
813 #endif
814 #endif
816 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
817 /*
818 * Request address space for all standard RAM and ROM resources
819 * and also for regions reported as reserved by the e820.
820 */
821 probe_roms();
822 e820_reserve_resources();
823 #endif
825 request_resource(&iomem_resource, &video_ram_resource);
827 {
828 unsigned i;
829 /* request I/O space for devices used on all i[345]86 PCs */
830 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
831 request_resource(&ioport_resource, &standard_io_resources[i]);
832 }
834 e820_setup_gap();
836 #ifdef CONFIG_GART_IOMMU
837 iommu_hole_init();
838 #endif
840 #ifdef CONFIG_XEN
841 {
842 physdev_op_t op;
844 op.cmd = PHYSDEVOP_SET_IOPL;
845 op.u.set_iopl.iopl = 1;
846 HYPERVISOR_physdev_op(&op);
848 if (xen_start_info->flags & SIF_INITDOMAIN) {
849 if (!(xen_start_info->flags & SIF_PRIVILEGED))
850 panic("Xen granted us console access "
851 "but not privileged status");
853 #ifdef CONFIG_VT
854 #if defined(CONFIG_VGA_CONSOLE)
855 conswitchp = &vga_con;
856 #elif defined(CONFIG_DUMMY_CONSOLE)
857 conswitchp = &dummy_con;
858 #endif
859 #endif
860 } else {
861 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
862 extern const struct consw xennull_con;
863 extern int console_use_vt;
864 #if defined(CONFIG_VGA_CONSOLE)
865 /* disable VGA driver */
866 ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
867 #endif
868 conswitchp = &xennull_con;
869 console_use_vt = 0;
870 #endif
871 }
872 }
873 #else /* CONFIG_XEN */
875 #ifdef CONFIG_VT
876 #if defined(CONFIG_VGA_CONSOLE)
877 conswitchp = &vga_con;
878 #elif defined(CONFIG_DUMMY_CONSOLE)
879 conswitchp = &dummy_con;
880 #endif
881 #endif
883 #endif /* !CONFIG_XEN */
884 }
886 static int __init get_model_name(struct cpuinfo_x86 *c)
887 {
888 unsigned int *v;
890 if (c->extended_cpuid_level < 0x80000004)
891 return 0;
893 v = (unsigned int *) c->x86_model_id;
894 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
895 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
896 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
897 c->x86_model_id[48] = 0;
898 return 1;
899 }
902 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
903 {
904 unsigned int n, dummy, eax, ebx, ecx, edx;
906 n = c->extended_cpuid_level;
908 if (n >= 0x80000005) {
909 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
910 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
911 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
912 c->x86_cache_size=(ecx>>24)+(edx>>24);
913 /* On K8 L1 TLB is inclusive, so don't count it */
914 c->x86_tlbsize = 0;
915 }
917 if (n >= 0x80000006) {
918 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
919 ecx = cpuid_ecx(0x80000006);
920 c->x86_cache_size = ecx >> 16;
921 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
923 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
924 c->x86_cache_size, ecx & 0xFF);
925 }
927 if (n >= 0x80000007)
928 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
929 if (n >= 0x80000008) {
930 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
931 c->x86_virt_bits = (eax >> 8) & 0xff;
932 c->x86_phys_bits = eax & 0xff;
933 }
934 }
936 /*
937 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
938 * Assumes number of cores is a power of two.
939 */
940 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
941 {
942 #ifdef CONFIG_SMP
943 int cpu = smp_processor_id();
944 int node = 0;
945 unsigned bits;
946 if (c->x86_num_cores == 1)
947 return;
949 bits = 0;
950 while ((1 << bits) < c->x86_num_cores)
951 bits++;
953 /* Low order bits define the core id (index of core in socket) */
954 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
955 /* Convert the APIC ID into the socket ID */
956 phys_proc_id[cpu] >>= bits;
958 #ifdef CONFIG_NUMA
959 /* When an ACPI SRAT table is available use the mappings from SRAT
960 instead. */
961 if (acpi_numa <= 0) {
962 node = phys_proc_id[cpu];
963 if (!node_online(node))
964 node = first_node(node_online_map);
965 cpu_to_node[cpu] = node;
966 } else {
967 node = cpu_to_node[cpu];
968 }
969 #endif
971 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
972 cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
973 #endif
974 }
976 static int __init init_amd(struct cpuinfo_x86 *c)
977 {
978 int r;
979 int level;
981 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
982 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
983 clear_bit(0*32+31, &c->x86_capability);
985 /* C-stepping K8? */
986 level = cpuid_eax(1);
987 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
988 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
990 r = get_model_name(c);
991 if (!r) {
992 switch (c->x86) {
993 case 15:
994 /* Should distinguish Models here, but this is only
995 a fallback anyways. */
996 strcpy(c->x86_model_id, "Hammer");
997 break;
998 }
999 }
1000 display_cacheinfo(c);
1002 if (c->extended_cpuid_level >= 0x80000008) {
1003 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
1004 if (c->x86_num_cores & (c->x86_num_cores - 1))
1005 c->x86_num_cores = 1;
1007 amd_detect_cmp(c);
1010 return r;
1013 static void __init detect_ht(struct cpuinfo_x86 *c)
1015 #ifdef CONFIG_SMP
1016 u32 eax, ebx, ecx, edx;
1017 int index_msb, tmp;
1018 int cpu = smp_processor_id();
1020 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1021 return;
1023 cpuid(1, &eax, &ebx, &ecx, &edx);
1024 smp_num_siblings = (ebx & 0xff0000) >> 16;
1026 if (smp_num_siblings == 1) {
1027 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1028 } else if (smp_num_siblings > 1) {
1029 index_msb = 31;
1030 /*
1031 * At this point we only support two siblings per
1032 * processor package.
1033 */
1034 if (smp_num_siblings > NR_CPUS) {
1035 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1036 smp_num_siblings = 1;
1037 return;
1039 tmp = smp_num_siblings;
1040 while ((tmp & 0x80000000 ) == 0) {
1041 tmp <<=1 ;
1042 index_msb--;
1044 if (smp_num_siblings & (smp_num_siblings - 1))
1045 index_msb++;
1046 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1048 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1049 phys_proc_id[cpu]);
1051 smp_num_siblings = smp_num_siblings / c->x86_num_cores;
1053 tmp = smp_num_siblings;
1054 index_msb = 31;
1055 while ((tmp & 0x80000000) == 0) {
1056 tmp <<=1 ;
1057 index_msb--;
1059 if (smp_num_siblings & (smp_num_siblings - 1))
1060 index_msb++;
1062 cpu_core_id[cpu] = phys_pkg_id(index_msb);
1064 if (c->x86_num_cores > 1)
1065 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1066 cpu_core_id[cpu]);
1068 #endif
1071 /*
1072 * find out the number of processor cores on the die
1073 */
1074 static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
1076 unsigned int eax;
1078 if (c->cpuid_level < 4)
1079 return 1;
1081 __asm__("cpuid"
1082 : "=a" (eax)
1083 : "0" (4), "c" (0)
1084 : "bx", "dx");
1086 if (eax & 0x1f)
1087 return ((eax >> 26) + 1);
1088 else
1089 return 1;
1092 static void __init init_intel(struct cpuinfo_x86 *c)
1094 /* Cache sizes */
1095 unsigned n;
1097 init_intel_cacheinfo(c);
1098 n = c->extended_cpuid_level;
1099 if (n >= 0x80000008) {
1100 unsigned eax = cpuid_eax(0x80000008);
1101 c->x86_virt_bits = (eax >> 8) & 0xff;
1102 c->x86_phys_bits = eax & 0xff;
1105 if (c->x86 == 15)
1106 c->x86_cache_alignment = c->x86_clflush_size * 2;
1107 if (c->x86 >= 15)
1108 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1109 c->x86_num_cores = intel_num_cpu_cores(c);
1112 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1114 char *v = c->x86_vendor_id;
1116 if (!strcmp(v, "AuthenticAMD"))
1117 c->x86_vendor = X86_VENDOR_AMD;
1118 else if (!strcmp(v, "GenuineIntel"))
1119 c->x86_vendor = X86_VENDOR_INTEL;
1120 else
1121 c->x86_vendor = X86_VENDOR_UNKNOWN;
1124 struct cpu_model_info {
1125 int vendor;
1126 int family;
1127 char *model_names[16];
1128 };
1130 /* Do some early cpuid on the boot CPU to get some parameter that are
1131 needed before check_bugs. Everything advanced is in identify_cpu
1132 below. */
1133 void __init early_identify_cpu(struct cpuinfo_x86 *c)
1135 u32 tfms;
1137 c->loops_per_jiffy = loops_per_jiffy;
1138 c->x86_cache_size = -1;
1139 c->x86_vendor = X86_VENDOR_UNKNOWN;
1140 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1141 c->x86_vendor_id[0] = '\0'; /* Unset */
1142 c->x86_model_id[0] = '\0'; /* Unset */
1143 c->x86_clflush_size = 64;
1144 c->x86_cache_alignment = c->x86_clflush_size;
1145 c->x86_num_cores = 1;
1146 c->extended_cpuid_level = 0;
1147 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1149 /* Get vendor name */
1150 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1151 (unsigned int *)&c->x86_vendor_id[0],
1152 (unsigned int *)&c->x86_vendor_id[8],
1153 (unsigned int *)&c->x86_vendor_id[4]);
1155 get_cpu_vendor(c);
1157 /* Initialize the standard set of capabilities */
1158 /* Note that the vendor-specific code below might override */
1160 /* Intel-defined flags: level 0x00000001 */
1161 if (c->cpuid_level >= 0x00000001) {
1162 __u32 misc;
1163 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1164 &c->x86_capability[0]);
1165 c->x86 = (tfms >> 8) & 0xf;
1166 c->x86_model = (tfms >> 4) & 0xf;
1167 c->x86_mask = tfms & 0xf;
1168 if (c->x86 == 0xf) {
1169 c->x86 += (tfms >> 20) & 0xff;
1170 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1172 if (c->x86_capability[0] & (1<<19))
1173 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1174 } else {
1175 /* Have CPUID level 0 only - unheard of */
1176 c->x86 = 4;
1179 #ifdef CONFIG_SMP
1180 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1181 #endif
1184 /*
1185 * This does the hard work of actually picking apart the CPU stuff...
1186 */
1187 void __init identify_cpu(struct cpuinfo_x86 *c)
1189 int i;
1190 u32 xlvl;
1192 early_identify_cpu(c);
1194 /* AMD-defined flags: level 0x80000001 */
1195 xlvl = cpuid_eax(0x80000000);
1196 c->extended_cpuid_level = xlvl;
1197 if ((xlvl & 0xffff0000) == 0x80000000) {
1198 if (xlvl >= 0x80000001) {
1199 c->x86_capability[1] = cpuid_edx(0x80000001);
1200 c->x86_capability[6] = cpuid_ecx(0x80000001);
1202 if (xlvl >= 0x80000004)
1203 get_model_name(c); /* Default name */
1206 /* Transmeta-defined flags: level 0x80860001 */
1207 xlvl = cpuid_eax(0x80860000);
1208 if ((xlvl & 0xffff0000) == 0x80860000) {
1209 /* Don't set x86_cpuid_level here for now to not confuse. */
1210 if (xlvl >= 0x80860001)
1211 c->x86_capability[2] = cpuid_edx(0x80860001);
1214 /*
1215 * Vendor-specific initialization. In this section we
1216 * canonicalize the feature flags, meaning if there are
1217 * features a certain CPU supports which CPUID doesn't
1218 * tell us, CPUID claiming incorrect flags, or other bugs,
1219 * we handle them here.
1221 * At the end of this section, c->x86_capability better
1222 * indicate the features this CPU genuinely supports!
1223 */
1224 switch (c->x86_vendor) {
1225 case X86_VENDOR_AMD:
1226 init_amd(c);
1227 break;
1229 case X86_VENDOR_INTEL:
1230 init_intel(c);
1231 break;
1233 case X86_VENDOR_UNKNOWN:
1234 default:
1235 display_cacheinfo(c);
1236 break;
1239 select_idle_routine(c);
1240 detect_ht(c);
1242 /*
1243 * On SMP, boot_cpu_data holds the common feature set between
1244 * all CPUs; so make sure that we indicate which features are
1245 * common between the CPUs. The first time this routine gets
1246 * executed, c == &boot_cpu_data.
1247 */
1248 if (c != &boot_cpu_data) {
1249 /* AND the already accumulated flags with these */
1250 for (i = 0 ; i < NCAPINTS ; i++)
1251 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1254 #ifdef CONFIG_X86_MCE
1255 mcheck_init(c);
1256 #endif
1257 #ifdef CONFIG_NUMA
1258 if (c != &boot_cpu_data)
1259 numa_add_cpu(c - cpu_data);
1260 #endif
1264 void __init print_cpu_info(struct cpuinfo_x86 *c)
1266 if (c->x86_model_id[0])
1267 printk("%s", c->x86_model_id);
1269 if (c->x86_mask || c->cpuid_level >= 0)
1270 printk(" stepping %02x\n", c->x86_mask);
1271 else
1272 printk("\n");
1275 /*
1276 * Get CPU information for use by the procfs.
1277 */
1279 static int show_cpuinfo(struct seq_file *m, void *v)
1281 struct cpuinfo_x86 *c = v;
1283 /*
1284 * These flag bits must match the definitions in <asm/cpufeature.h>.
1285 * NULL means this bit is undefined or reserved; either way it doesn't
1286 * have meaning as far as Linux is concerned. Note that it's important
1287 * to realize there is a difference between this table and CPUID -- if
1288 * applications want to get the raw CPUID data, they should access
1289 * /dev/cpu/<cpu_nr>/cpuid instead.
1290 */
1291 static char *x86_cap_flags[] = {
1292 /* Intel-defined */
1293 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1294 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1295 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1296 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1298 /* AMD-defined */
1299 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1300 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1301 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1302 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1304 /* Transmeta-defined */
1305 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1306 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1307 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1308 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1310 /* Other (Linux-defined) */
1311 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1312 "constant_tsc", NULL, NULL,
1313 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1314 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1315 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1317 /* Intel-defined (#2) */
1318 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1319 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1320 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1321 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1323 /* VIA/Cyrix/Centaur-defined */
1324 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1325 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1326 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1327 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1329 /* AMD-defined (#2) */
1330 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1331 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1334 };
1335 static char *x86_power_flags[] = {
1336 "ts", /* temperature sensor */
1337 "fid", /* frequency id control */
1338 "vid", /* voltage id control */
1339 "ttp", /* thermal trip */
1340 "tm",
1341 "stc"
1342 };
1345 #ifdef CONFIG_SMP
1346 if (!cpu_online(c-cpu_data))
1347 return 0;
1348 #endif
1350 seq_printf(m,"processor\t: %u\n"
1351 "vendor_id\t: %s\n"
1352 "cpu family\t: %d\n"
1353 "model\t\t: %d\n"
1354 "model name\t: %s\n",
1355 (unsigned)(c-cpu_data),
1356 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1357 c->x86,
1358 (int)c->x86_model,
1359 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1361 if (c->x86_mask || c->cpuid_level >= 0)
1362 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1363 else
1364 seq_printf(m, "stepping\t: unknown\n");
1366 if (cpu_has(c,X86_FEATURE_TSC)) {
1367 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1368 cpu_khz / 1000, (cpu_khz % 1000));
1371 /* Cache size */
1372 if (c->x86_cache_size >= 0)
1373 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1375 #ifdef CONFIG_SMP
1376 if (smp_num_siblings * c->x86_num_cores > 1) {
1377 int cpu = c - cpu_data;
1378 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1379 seq_printf(m, "siblings\t: %d\n",
1380 c->x86_num_cores * smp_num_siblings);
1381 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1382 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1384 #endif
1386 seq_printf(m,
1387 "fpu\t\t: yes\n"
1388 "fpu_exception\t: yes\n"
1389 "cpuid level\t: %d\n"
1390 "wp\t\t: yes\n"
1391 "flags\t\t:",
1392 c->cpuid_level);
1395 int i;
1396 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1397 if ( test_bit(i, &c->x86_capability) &&
1398 x86_cap_flags[i] != NULL )
1399 seq_printf(m, " %s", x86_cap_flags[i]);
1402 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1403 c->loops_per_jiffy/(500000/HZ),
1404 (c->loops_per_jiffy/(5000/HZ)) % 100);
1406 if (c->x86_tlbsize > 0)
1407 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1408 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1409 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1411 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1412 c->x86_phys_bits, c->x86_virt_bits);
1414 seq_printf(m, "power management:");
1416 unsigned i;
1417 for (i = 0; i < 32; i++)
1418 if (c->x86_power & (1 << i)) {
1419 if (i < ARRAY_SIZE(x86_power_flags))
1420 seq_printf(m, " %s", x86_power_flags[i]);
1421 else
1422 seq_printf(m, " [%d]", i);
1426 seq_printf(m, "\n\n");
1428 return 0;
1431 static void *c_start(struct seq_file *m, loff_t *pos)
1433 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1436 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1438 ++*pos;
1439 return c_start(m, pos);
1442 static void c_stop(struct seq_file *m, void *v)
1446 struct seq_operations cpuinfo_op = {
1447 .start =c_start,
1448 .next = c_next,
1449 .stop = c_stop,
1450 .show = show_cpuinfo,
1451 };