ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c @ 6294:1a0723cd37f1

Fix many uses of machine addresses in XenLinux. Primarily
this fixes users of virt_to_machine/machine_to_virt to
use virt_to_mfn/mfn_to_virt where that is more appropriate.

This should be a big step to improved PAE stability.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 19 16:06:43 2005 +0000 (2005-08-19)
parents 47d49e8b8042
children f51fe43c5d1c 5f4724c13040 81576d3d1ca8 3a8f27c6d56c
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
43 #include <asm/mtrr.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/smp.h>
48 #include <asm/msr.h>
49 #include <asm/desc.h>
50 #include <video/edid.h>
51 #include <asm/e820.h>
52 #include <asm/dma.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
56 #include <asm/proto.h>
57 #include <asm/setup.h>
58 #include <asm/mach_apic.h>
59 #include <asm/numa.h>
60 #ifdef CONFIG_XEN
61 #include <linux/percpu.h>
62 #include <asm-xen/xen-public/physdev.h>
63 #include "setup_arch_pre.h"
64 #include <asm-xen/hypervisor.h>
65 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
66 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
67 #define end_pfn_map end_pfn
68 #include <asm/mach-xen/setup_arch_post.h>
70 extern unsigned long start_pfn;
71 extern struct edid_info edid_info;
73 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
74 EXPORT_SYMBOL(HYPERVISOR_shared_info);
76 /* Allows setting of maximum possible memory size */
77 unsigned long xen_override_max_pfn;
79 u32 *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
81 EXPORT_SYMBOL(phys_to_machine_mapping);
83 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
84 DEFINE_PER_CPU(int, nr_multicall_ents);
86 /* Raw start-of-day parameters from the hypervisor. */
87 union xen_start_info_union xen_start_info_union;
88 #endif
90 /*
91 * Machine setup..
92 */
94 struct cpuinfo_x86 boot_cpu_data;
96 unsigned long mmu_cr4_features;
98 int acpi_disabled;
99 EXPORT_SYMBOL(acpi_disabled);
100 #ifdef CONFIG_ACPI_BOOT
101 extern int __initdata acpi_ht;
102 extern acpi_interrupt_flags acpi_sci_flags;
103 int __initdata acpi_force = 0;
104 #endif
106 int acpi_numa __initdata;
108 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
109 int bootloader_type;
111 unsigned long saved_video_mode;
113 #ifdef CONFIG_SWIOTLB
114 int swiotlb;
115 EXPORT_SYMBOL(swiotlb);
116 #endif
118 /*
119 * Setup options
120 */
121 struct drive_info_struct { char dummy[32]; } drive_info;
122 struct screen_info screen_info;
123 struct sys_desc_table_struct {
124 unsigned short length;
125 unsigned char table[0];
126 };
128 struct edid_info edid_info;
129 struct e820map e820;
131 extern int root_mountflags;
132 extern char _text, _etext, _edata, _end;
134 char command_line[COMMAND_LINE_SIZE];
136 struct resource standard_io_resources[] = {
137 { .name = "dma1", .start = 0x00, .end = 0x1f,
138 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
139 { .name = "pic1", .start = 0x20, .end = 0x21,
140 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
141 { .name = "timer0", .start = 0x40, .end = 0x43,
142 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
143 { .name = "timer1", .start = 0x50, .end = 0x53,
144 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
145 { .name = "keyboard", .start = 0x60, .end = 0x6f,
146 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
147 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
148 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
149 { .name = "pic2", .start = 0xa0, .end = 0xa1,
150 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
151 { .name = "dma2", .start = 0xc0, .end = 0xdf,
152 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
153 { .name = "fpu", .start = 0xf0, .end = 0xff,
154 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
155 };
157 #define STANDARD_IO_RESOURCES \
158 (sizeof standard_io_resources / sizeof standard_io_resources[0])
160 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
162 struct resource data_resource = {
163 .name = "Kernel data",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_RAM,
167 };
168 struct resource code_resource = {
169 .name = "Kernel code",
170 .start = 0,
171 .end = 0,
172 .flags = IORESOURCE_RAM,
173 };
175 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
177 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
178 static struct resource system_rom_resource = {
179 .name = "System ROM",
180 .start = 0xf0000,
181 .end = 0xfffff,
182 .flags = IORESOURCE_ROM,
183 };
185 static struct resource extension_rom_resource = {
186 .name = "Extension ROM",
187 .start = 0xe0000,
188 .end = 0xeffff,
189 .flags = IORESOURCE_ROM,
190 };
192 static struct resource adapter_rom_resources[] = {
193 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
194 .flags = IORESOURCE_ROM },
195 { .name = "Adapter ROM", .start = 0, .end = 0,
196 .flags = IORESOURCE_ROM },
197 { .name = "Adapter ROM", .start = 0, .end = 0,
198 .flags = IORESOURCE_ROM },
199 { .name = "Adapter ROM", .start = 0, .end = 0,
200 .flags = IORESOURCE_ROM },
201 { .name = "Adapter ROM", .start = 0, .end = 0,
202 .flags = IORESOURCE_ROM },
203 { .name = "Adapter ROM", .start = 0, .end = 0,
204 .flags = IORESOURCE_ROM }
205 };
206 #endif
208 #define ADAPTER_ROM_RESOURCES \
209 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
211 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
212 static struct resource video_rom_resource = {
213 .name = "Video ROM",
214 .start = 0xc0000,
215 .end = 0xc7fff,
216 .flags = IORESOURCE_ROM,
217 };
218 #endif
220 static struct resource video_ram_resource = {
221 .name = "Video RAM area",
222 .start = 0xa0000,
223 .end = 0xbffff,
224 .flags = IORESOURCE_RAM,
225 };
227 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
228 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
230 static int __init romchecksum(unsigned char *rom, unsigned long length)
231 {
232 unsigned char *p, sum = 0;
234 for (p = rom; p < rom + length; p++)
235 sum += *p;
236 return sum == 0;
237 }
239 static void __init probe_roms(void)
240 {
241 unsigned long start, length, upper;
242 unsigned char *rom;
243 int i;
245 /* video rom */
246 upper = adapter_rom_resources[0].start;
247 for (start = video_rom_resource.start; start < upper; start += 2048) {
248 rom = isa_bus_to_virt(start);
249 if (!romsignature(rom))
250 continue;
252 video_rom_resource.start = start;
254 /* 0 < length <= 0x7f * 512, historically */
255 length = rom[2] * 512;
257 /* if checksum okay, trust length byte */
258 if (length && romchecksum(rom, length))
259 video_rom_resource.end = start + length - 1;
261 request_resource(&iomem_resource, &video_rom_resource);
262 break;
263 }
265 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
266 if (start < upper)
267 start = upper;
269 /* system rom */
270 request_resource(&iomem_resource, &system_rom_resource);
271 upper = system_rom_resource.start;
273 /* check for extension rom (ignore length byte!) */
274 rom = isa_bus_to_virt(extension_rom_resource.start);
275 if (romsignature(rom)) {
276 length = extension_rom_resource.end - extension_rom_resource.start + 1;
277 if (romchecksum(rom, length)) {
278 request_resource(&iomem_resource, &extension_rom_resource);
279 upper = extension_rom_resource.start;
280 }
281 }
283 /* check for adapter roms on 2k boundaries */
284 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
285 rom = isa_bus_to_virt(start);
286 if (!romsignature(rom))
287 continue;
289 /* 0 < length <= 0x7f * 512, historically */
290 length = rom[2] * 512;
292 /* but accept any length that fits if checksum okay */
293 if (!length || start + length > upper || !romchecksum(rom, length))
294 continue;
296 adapter_rom_resources[i].start = start;
297 adapter_rom_resources[i].end = start + length - 1;
298 request_resource(&iomem_resource, &adapter_rom_resources[i]);
300 start = adapter_rom_resources[i++].end & ~2047UL;
301 }
302 }
303 #endif
306 static __init void parse_cmdline_early (char ** cmdline_p)
307 {
308 char c = ' ', *to = command_line, *from = COMMAND_LINE;
309 int len = 0;
311 /* Save unparsed command line copy for /proc/cmdline */
312 #ifdef CONFIG_XEN
313 int max_cmdline;
315 if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
316 max_cmdline = COMMAND_LINE_SIZE;
317 memcpy(saved_command_line, xen_start_info.cmd_line, max_cmdline);
318 saved_command_line[max_cmdline-1] = '\0';
319 #else
320 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
321 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
322 #endif
324 for (;;) {
325 if (c != ' ')
326 goto next_char;
328 #ifdef CONFIG_SMP
329 /*
330 * If the BIOS enumerates physical processors before logical,
331 * maxcpus=N at enumeration-time can be used to disable HT.
332 */
333 else if (!memcmp(from, "maxcpus=", 8)) {
334 extern unsigned int maxcpus;
336 maxcpus = simple_strtoul(from + 8, NULL, 0);
337 }
338 #endif
339 #ifdef CONFIG_ACPI_BOOT
340 /* "acpi=off" disables both ACPI table parsing and interpreter init */
341 if (!memcmp(from, "acpi=off", 8))
342 disable_acpi();
344 if (!memcmp(from, "acpi=force", 10)) {
345 /* add later when we do DMI horrors: */
346 acpi_force = 1;
347 acpi_disabled = 0;
348 }
350 /* acpi=ht just means: do ACPI MADT parsing
351 at bootup, but don't enable the full ACPI interpreter */
352 if (!memcmp(from, "acpi=ht", 7)) {
353 if (!acpi_force)
354 disable_acpi();
355 acpi_ht = 1;
356 }
357 else if (!memcmp(from, "pci=noacpi", 10))
358 acpi_disable_pci();
359 else if (!memcmp(from, "acpi=noirq", 10))
360 acpi_noirq_set();
362 else if (!memcmp(from, "acpi_sci=edge", 13))
363 acpi_sci_flags.trigger = 1;
364 else if (!memcmp(from, "acpi_sci=level", 14))
365 acpi_sci_flags.trigger = 3;
366 else if (!memcmp(from, "acpi_sci=high", 13))
367 acpi_sci_flags.polarity = 1;
368 else if (!memcmp(from, "acpi_sci=low", 12))
369 acpi_sci_flags.polarity = 3;
371 /* acpi=strict disables out-of-spec workarounds */
372 else if (!memcmp(from, "acpi=strict", 11)) {
373 acpi_strict = 1;
374 }
375 #ifdef CONFIG_X86_IO_APIC
376 else if (!memcmp(from, "acpi_skip_timer_override", 24))
377 acpi_skip_timer_override = 1;
378 #endif
379 #endif
380 #ifndef CONFIG_XEN
381 if (!memcmp(from, "nolapic", 7) ||
382 !memcmp(from, "disableapic", 11))
383 disable_apic = 1;
385 if (!memcmp(from, "noapic", 6))
386 skip_ioapic_setup = 1;
388 if (!memcmp(from, "apic", 4)) {
389 skip_ioapic_setup = 0;
390 ioapic_force = 1;
391 }
392 #endif
393 if (!memcmp(from, "mem=", 4))
394 parse_memopt(from+4, &from);
396 #ifdef CONFIG_DISCONTIGMEM
397 if (!memcmp(from, "numa=", 5))
398 numa_setup(from+5);
399 #endif
401 #ifdef CONFIG_GART_IOMMU
402 if (!memcmp(from,"iommu=",6)) {
403 iommu_setup(from+6);
404 }
405 #endif
407 if (!memcmp(from,"oops=panic", 10))
408 panic_on_oops = 1;
410 if (!memcmp(from, "noexec=", 7))
411 nonx_setup(from + 7);
413 next_char:
414 c = *(from++);
415 if (!c)
416 break;
417 if (COMMAND_LINE_SIZE <= ++len)
418 break;
419 *(to++) = c;
420 }
421 *to = '\0';
422 *cmdline_p = command_line;
423 }
425 #ifndef CONFIG_DISCONTIGMEM
426 #ifdef CONFIG_XEN
427 static void __init contig_initmem_init(void)
428 {
429 unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
430 free_bootmem(0, end_pfn << PAGE_SHIFT);
431 /* XXX KAF: Why can't we leave low 1MB of memory free? */
432 reserve_bootmem(0, (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1));
433 }
434 #else
435 static void __init contig_initmem_init(void)
436 {
437 unsigned long bootmap_size, bootmap;
438 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
439 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
440 if (bootmap == -1L)
441 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
442 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
443 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
444 reserve_bootmem(bootmap, bootmap_size);
445 }
446 #endif /* !CONFIG_XEN */
447 #endif
449 /* Use inline assembly to define this because the nops are defined
450 as inline assembly strings in the include files and we cannot
451 get them easily into strings. */
452 asm("\t.data\nk8nops: "
453 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
454 K8_NOP7 K8_NOP8);
456 extern unsigned char k8nops[];
457 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
458 NULL,
459 k8nops,
460 k8nops + 1,
461 k8nops + 1 + 2,
462 k8nops + 1 + 2 + 3,
463 k8nops + 1 + 2 + 3 + 4,
464 k8nops + 1 + 2 + 3 + 4 + 5,
465 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
466 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
467 };
469 /* Replace instructions with better alternatives for this CPU type.
471 This runs before SMP is initialized to avoid SMP problems with
472 self modifying code. This implies that assymetric systems where
473 APs have less capabilities than the boot processor are not handled.
474 In this case boot with "noreplacement". */
475 void apply_alternatives(void *start, void *end)
476 {
477 struct alt_instr *a;
478 int diff, i, k;
479 for (a = start; (void *)a < end; a++) {
480 if (!boot_cpu_has(a->cpuid))
481 continue;
483 BUG_ON(a->replacementlen > a->instrlen);
484 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
485 diff = a->instrlen - a->replacementlen;
487 /* Pad the rest with nops */
488 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
489 k = diff;
490 if (k > ASM_NOP_MAX)
491 k = ASM_NOP_MAX;
492 __inline_memcpy(a->instr + i, k8_nops[k], k);
493 }
494 }
495 }
497 static int no_replacement __initdata = 0;
499 void __init alternative_instructions(void)
500 {
501 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
502 if (no_replacement)
503 return;
504 apply_alternatives(__alt_instructions, __alt_instructions_end);
505 }
507 static int __init noreplacement_setup(char *s)
508 {
509 no_replacement = 1;
510 return 0;
511 }
513 __setup("noreplacement", noreplacement_setup);
515 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
516 struct edd edd;
517 #ifdef CONFIG_EDD_MODULE
518 EXPORT_SYMBOL(edd);
519 #endif
520 /**
521 * copy_edd() - Copy the BIOS EDD information
522 * from boot_params into a safe place.
523 *
524 */
525 static inline void copy_edd(void)
526 {
527 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
528 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
529 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
530 edd.edd_info_nr = EDD_NR;
531 }
532 #else
533 static inline void copy_edd(void)
534 {
535 }
536 #endif
538 #ifdef CONFIG_XEN
539 #define reserve_ebda_region() void(0)
541 static void __init print_memory_map(char *who)
542 {
543 int i;
545 for (i = 0; i < e820.nr_map; i++) {
546 early_printk(" %s: %016Lx - %016Lx ", who,
547 e820.map[i].addr,
548 e820.map[i].addr + e820.map[i].size);
549 switch (e820.map[i].type) {
550 case E820_RAM: early_printk("(usable)\n");
551 break;
552 case E820_RESERVED:
553 early_printk("(reserved)\n");
554 break;
555 case E820_ACPI:
556 early_printk("(ACPI data)\n");
557 break;
558 case E820_NVS:
559 early_printk("(ACPI NVS)\n");
560 break;
561 default: early_printk("type %u\n", e820.map[i].type);
562 break;
563 }
564 }
565 }
567 void __init smp_alloc_memory(void)
568 {
569 int cpu;
571 for (cpu = 1; cpu < NR_CPUS; cpu++) {
572 cpu_gdt_descr[cpu].address = (unsigned long)
573 alloc_bootmem_low_pages(PAGE_SIZE);
574 /* XXX free unused pages later */
575 }
576 }
579 #else
580 #define EBDA_ADDR_POINTER 0x40E
581 static void __init reserve_ebda_region(void)
582 {
583 unsigned int addr;
584 /**
585 * there is a real-mode segmented pointer pointing to the
586 * 4K EBDA area at 0x40E
587 */
588 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
589 addr <<= 4;
590 if (addr)
591 reserve_bootmem_generic(addr, PAGE_SIZE);
592 }
593 #endif
595 void __init setup_arch(char **cmdline_p)
596 {
597 unsigned long kernel_end;
599 #ifdef CONFIG_XEN
600 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
601 drive_info = DRIVE_INFO;
602 kernel_end = 0; /* dummy */
603 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
604 screen_info = SCREEN_INFO;
606 /* This is drawn from a dump from vgacon:startup in standard Linux. */
607 screen_info.orig_video_mode = 3;
608 screen_info.orig_video_isVGA = 1;
609 screen_info.orig_video_lines = 25;
610 screen_info.orig_video_cols = 80;
611 screen_info.orig_video_ega_bx = 3;
612 screen_info.orig_video_points = 16;
613 #endif
614 edid_info = EDID_INFO;
615 saved_video_mode = SAVED_VIDEO_MODE;
616 bootloader_type = LOADER_TYPE;
618 #ifdef CONFIG_BLK_DEV_RAM
619 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
620 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
621 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
624 #endif
626 HYPERVISOR_vm_assist(VMASST_CMD_enable,
627 VMASST_TYPE_writable_pagetables);
629 ARCH_SETUP
630 print_memory_map(machine_specific_memory_setup());
631 #else
632 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
633 drive_info = DRIVE_INFO;
634 screen_info = SCREEN_INFO;
635 edid_info = EDID_INFO;
636 saved_video_mode = SAVED_VIDEO_MODE;
637 bootloader_type = LOADER_TYPE;
639 #ifdef CONFIG_BLK_DEV_RAM
640 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
641 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
642 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
643 #endif
644 setup_memory_region();
645 copy_edd();
646 #endif /* !CONFIG_XEN */
648 if (!MOUNT_ROOT_RDONLY)
649 root_mountflags &= ~MS_RDONLY;
650 init_mm.start_code = (unsigned long) &_text;
651 init_mm.end_code = (unsigned long) &_etext;
652 init_mm.end_data = (unsigned long) &_edata;
653 #ifdef CONFIG_XEN
654 init_mm.brk = start_pfn << PAGE_SHIFT;
655 #else
656 init_mm.brk = (unsigned long) &_end;
658 code_resource.start = virt_to_phys(&_text);
659 code_resource.end = virt_to_phys(&_etext)-1;
660 data_resource.start = virt_to_phys(&_etext);
661 data_resource.end = virt_to_phys(&_edata)-1;
662 #endif
664 parse_cmdline_early(cmdline_p);
666 early_identify_cpu(&boot_cpu_data);
668 /*
669 * partially used pages are not usable - thus
670 * we are rounding upwards:
671 */
672 end_pfn = e820_end_of_ram();
674 check_efer();
676 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
678 #ifdef CONFIG_ACPI_NUMA
679 /*
680 * Parse SRAT to discover nodes.
681 */
682 acpi_numa_init();
683 #endif
685 #ifdef CONFIG_DISCONTIGMEM
686 numa_initmem_init(0, end_pfn);
687 #else
688 contig_initmem_init();
689 #endif
691 #ifndef CONFIG_XEN
692 /* Reserve direct mapping */
693 reserve_bootmem_generic(table_start << PAGE_SHIFT,
694 (table_end - table_start) << PAGE_SHIFT);
696 /* reserve kernel */
697 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
698 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
700 /*
701 * reserve physical page 0 - it's a special BIOS page on many boxes,
702 * enabling clean reboots, SMP operation, laptop functions.
703 */
704 reserve_bootmem_generic(0, PAGE_SIZE);
706 /* reserve ebda region */
707 reserve_ebda_region();
708 #endif
711 #ifdef CONFIG_SMP
712 /*
713 * But first pinch a few for the stack/trampoline stuff
714 * FIXME: Don't need the extra page at 4K, but need to fix
715 * trampoline before removing it. (see the GDT stuff)
716 */
717 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
719 /* Reserve SMP trampoline */
720 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
721 #endif
723 #ifdef CONFIG_ACPI_SLEEP
724 /*
725 * Reserve low memory region for sleep support.
726 */
727 acpi_reserve_bootmem();
728 #endif
729 #ifdef CONFIG_XEN
730 #ifdef CONFIG_BLK_DEV_INITRD
731 if (xen_start_info.mod_start) {
732 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
733 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
734 initrd_start = INITRD_START + PAGE_OFFSET;
735 initrd_end = initrd_start+INITRD_SIZE;
736 initrd_below_start_ok = 1;
737 } else {
738 printk(KERN_ERR "initrd extends beyond end of memory "
739 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
740 (unsigned long)(INITRD_START + INITRD_SIZE),
741 (unsigned long)(end_pfn << PAGE_SHIFT));
742 initrd_start = 0;
743 }
744 }
745 #endif
746 #ifdef CONFIG_SMP
747 smp_alloc_memory();
748 #endif
749 #else /* CONFIG_XEN */
750 #ifdef CONFIG_BLK_DEV_INITRD
751 if (LOADER_TYPE && INITRD_START) {
752 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
753 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
754 initrd_start =
755 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
756 initrd_end = initrd_start+INITRD_SIZE;
757 }
758 else {
759 printk(KERN_ERR "initrd extends beyond end of memory "
760 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
761 (unsigned long)(INITRD_START + INITRD_SIZE),
762 (unsigned long)(end_pfn << PAGE_SHIFT));
763 initrd_start = 0;
764 }
765 }
766 #endif
767 #endif /* !CONFIG_XEN */
768 paging_init();
769 #ifdef CONFIG_X86_LOCAL_APIC
770 /*
771 * Find and reserve possible boot-time SMP configuration:
772 */
773 find_smp_config();
774 #endif
775 #ifdef CONFIG_XEN
776 {
777 int i, j;
778 /* Make sure we have a large enough P->M table. */
779 if (end_pfn > xen_start_info.nr_pages) {
780 phys_to_machine_mapping = alloc_bootmem(
781 max_pfn * sizeof(unsigned long));
782 memset(phys_to_machine_mapping, ~0,
783 max_pfn * sizeof(unsigned long));
784 memcpy(phys_to_machine_mapping,
785 (unsigned long *)xen_start_info.mfn_list,
786 xen_start_info.nr_pages * sizeof(unsigned long));
787 free_bootmem(
788 __pa(xen_start_info.mfn_list),
789 PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
790 sizeof(unsigned long))));
791 }
793 pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE);
795 for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
796 {
797 pfn_to_mfn_frame_list[j] =
798 virt_to_mfn(&phys_to_machine_mapping[i]);
799 }
801 }
802 #endif
804 #ifndef CONFIG_XEN
805 check_ioapic();
806 #endif
808 #ifdef CONFIG_ACPI_BOOT
809 /*
810 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
811 * Call this early for SRAT node setup.
812 */
813 acpi_boot_table_init();
815 /*
816 * Read APIC and some other early information from ACPI tables.
817 */
818 acpi_boot_init();
819 #endif
821 #ifdef CONFIG_X86_LOCAL_APIC
822 /*
823 * get boot-time SMP configuration:
824 */
825 if (smp_found_config)
826 get_smp_config();
827 #ifndef CONFIG_XEN
828 init_apic_mappings();
829 #endif
830 #endif
832 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
833 /*
834 * Request address space for all standard RAM and ROM resources
835 * and also for regions reported as reserved by the e820.
836 */
837 probe_roms();
838 e820_reserve_resources();
839 #endif
841 request_resource(&iomem_resource, &video_ram_resource);
843 {
844 unsigned i;
845 /* request I/O space for devices used on all i[345]86 PCs */
846 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
847 request_resource(&ioport_resource, &standard_io_resources[i]);
848 }
850 e820_setup_gap();
852 #ifdef CONFIG_GART_IOMMU
853 iommu_hole_init();
854 #endif
856 #ifdef CONFIG_XEN
857 {
858 physdev_op_t op;
860 op.cmd = PHYSDEVOP_SET_IOPL;
861 op.u.set_iopl.iopl = 1;
862 HYPERVISOR_physdev_op(&op);
864 if (xen_start_info.flags & SIF_INITDOMAIN) {
865 if (!(xen_start_info.flags & SIF_PRIVILEGED))
866 panic("Xen granted us console access "
867 "but not privileged status");
869 #ifdef CONFIG_VT
870 #if defined(CONFIG_VGA_CONSOLE)
871 conswitchp = &vga_con;
872 #elif defined(CONFIG_DUMMY_CONSOLE)
873 conswitchp = &dummy_con;
874 #endif
875 #endif
876 } else {
877 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
878 extern const struct consw xennull_con;
879 extern int console_use_vt;
880 #if defined(CONFIG_VGA_CONSOLE)
881 /* disable VGA driver */
882 ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
883 #endif
884 conswitchp = &xennull_con;
885 console_use_vt = 0;
886 #endif
887 }
888 }
889 #else /* CONFIG_XEN */
891 #ifdef CONFIG_VT
892 #if defined(CONFIG_VGA_CONSOLE)
893 conswitchp = &vga_con;
894 #elif defined(CONFIG_DUMMY_CONSOLE)
895 conswitchp = &dummy_con;
896 #endif
897 #endif
899 #endif /* !CONFIG_XEN */
900 }
902 static int __init get_model_name(struct cpuinfo_x86 *c)
903 {
904 unsigned int *v;
906 if (c->extended_cpuid_level < 0x80000004)
907 return 0;
909 v = (unsigned int *) c->x86_model_id;
910 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
911 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
912 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
913 c->x86_model_id[48] = 0;
914 return 1;
915 }
918 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
919 {
920 unsigned int n, dummy, eax, ebx, ecx, edx;
922 n = c->extended_cpuid_level;
924 if (n >= 0x80000005) {
925 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
926 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
927 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
928 c->x86_cache_size=(ecx>>24)+(edx>>24);
929 /* On K8 L1 TLB is inclusive, so don't count it */
930 c->x86_tlbsize = 0;
931 }
933 if (n >= 0x80000006) {
934 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
935 ecx = cpuid_ecx(0x80000006);
936 c->x86_cache_size = ecx >> 16;
937 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
939 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
940 c->x86_cache_size, ecx & 0xFF);
941 }
943 if (n >= 0x80000007)
944 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
945 if (n >= 0x80000008) {
946 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
947 c->x86_virt_bits = (eax >> 8) & 0xff;
948 c->x86_phys_bits = eax & 0xff;
949 }
950 }
952 /*
953 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
954 * Assumes number of cores is a power of two.
955 */
956 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
957 {
958 #ifdef CONFIG_SMP
959 int cpu = smp_processor_id();
960 int node = 0;
961 unsigned bits;
962 if (c->x86_num_cores == 1)
963 return;
965 bits = 0;
966 while ((1 << bits) < c->x86_num_cores)
967 bits++;
969 /* Low order bits define the core id (index of core in socket) */
970 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
971 /* Convert the APIC ID into the socket ID */
972 phys_proc_id[cpu] >>= bits;
974 #ifdef CONFIG_NUMA
975 /* When an ACPI SRAT table is available use the mappings from SRAT
976 instead. */
977 if (acpi_numa <= 0) {
978 node = phys_proc_id[cpu];
979 if (!node_online(node))
980 node = first_node(node_online_map);
981 cpu_to_node[cpu] = node;
982 } else {
983 node = cpu_to_node[cpu];
984 }
985 #endif
987 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
988 cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
989 #endif
990 }
992 static int __init init_amd(struct cpuinfo_x86 *c)
993 {
994 int r;
995 int level;
997 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
998 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
999 clear_bit(0*32+31, &c->x86_capability);
1001 /* C-stepping K8? */
1002 level = cpuid_eax(1);
1003 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
1004 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
1006 r = get_model_name(c);
1007 if (!r) {
1008 switch (c->x86) {
1009 case 15:
1010 /* Should distinguish Models here, but this is only
1011 a fallback anyways. */
1012 strcpy(c->x86_model_id, "Hammer");
1013 break;
1016 display_cacheinfo(c);
1018 if (c->extended_cpuid_level >= 0x80000008) {
1019 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
1020 if (c->x86_num_cores & (c->x86_num_cores - 1))
1021 c->x86_num_cores = 1;
1023 amd_detect_cmp(c);
1026 return r;
1029 static void __init detect_ht(struct cpuinfo_x86 *c)
1031 #ifdef CONFIG_SMP
1032 u32 eax, ebx, ecx, edx;
1033 int index_msb, tmp;
1034 int cpu = smp_processor_id();
1036 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1037 return;
1039 cpuid(1, &eax, &ebx, &ecx, &edx);
1040 smp_num_siblings = (ebx & 0xff0000) >> 16;
1042 if (smp_num_siblings == 1) {
1043 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1044 } else if (smp_num_siblings > 1) {
1045 index_msb = 31;
1046 /*
1047 * At this point we only support two siblings per
1048 * processor package.
1049 */
1050 if (smp_num_siblings > NR_CPUS) {
1051 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1052 smp_num_siblings = 1;
1053 return;
1055 tmp = smp_num_siblings;
1056 while ((tmp & 0x80000000 ) == 0) {
1057 tmp <<=1 ;
1058 index_msb--;
1060 if (smp_num_siblings & (smp_num_siblings - 1))
1061 index_msb++;
1062 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1064 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1065 phys_proc_id[cpu]);
1067 smp_num_siblings = smp_num_siblings / c->x86_num_cores;
1069 tmp = smp_num_siblings;
1070 index_msb = 31;
1071 while ((tmp & 0x80000000) == 0) {
1072 tmp <<=1 ;
1073 index_msb--;
1075 if (smp_num_siblings & (smp_num_siblings - 1))
1076 index_msb++;
1078 cpu_core_id[cpu] = phys_pkg_id(index_msb);
1080 if (c->x86_num_cores > 1)
1081 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1082 cpu_core_id[cpu]);
1084 #endif
1087 /*
1088 * find out the number of processor cores on the die
1089 */
1090 static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
1092 unsigned int eax;
1094 if (c->cpuid_level < 4)
1095 return 1;
1097 __asm__("cpuid"
1098 : "=a" (eax)
1099 : "0" (4), "c" (0)
1100 : "bx", "dx");
1102 if (eax & 0x1f)
1103 return ((eax >> 26) + 1);
1104 else
1105 return 1;
1108 static void __init init_intel(struct cpuinfo_x86 *c)
1110 /* Cache sizes */
1111 unsigned n;
1113 init_intel_cacheinfo(c);
1114 n = c->extended_cpuid_level;
1115 if (n >= 0x80000008) {
1116 unsigned eax = cpuid_eax(0x80000008);
1117 c->x86_virt_bits = (eax >> 8) & 0xff;
1118 c->x86_phys_bits = eax & 0xff;
1121 if (c->x86 == 15)
1122 c->x86_cache_alignment = c->x86_clflush_size * 2;
1123 if (c->x86 >= 15)
1124 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1125 c->x86_num_cores = intel_num_cpu_cores(c);
1128 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1130 char *v = c->x86_vendor_id;
1132 if (!strcmp(v, "AuthenticAMD"))
1133 c->x86_vendor = X86_VENDOR_AMD;
1134 else if (!strcmp(v, "GenuineIntel"))
1135 c->x86_vendor = X86_VENDOR_INTEL;
1136 else
1137 c->x86_vendor = X86_VENDOR_UNKNOWN;
1140 struct cpu_model_info {
1141 int vendor;
1142 int family;
1143 char *model_names[16];
1144 };
1146 /* Do some early cpuid on the boot CPU to get some parameter that are
1147 needed before check_bugs. Everything advanced is in identify_cpu
1148 below. */
1149 void __init early_identify_cpu(struct cpuinfo_x86 *c)
1151 u32 tfms;
1153 c->loops_per_jiffy = loops_per_jiffy;
1154 c->x86_cache_size = -1;
1155 c->x86_vendor = X86_VENDOR_UNKNOWN;
1156 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1157 c->x86_vendor_id[0] = '\0'; /* Unset */
1158 c->x86_model_id[0] = '\0'; /* Unset */
1159 c->x86_clflush_size = 64;
1160 c->x86_cache_alignment = c->x86_clflush_size;
1161 c->x86_num_cores = 1;
1162 c->extended_cpuid_level = 0;
1163 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1165 /* Get vendor name */
1166 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1167 (unsigned int *)&c->x86_vendor_id[0],
1168 (unsigned int *)&c->x86_vendor_id[8],
1169 (unsigned int *)&c->x86_vendor_id[4]);
1171 get_cpu_vendor(c);
1173 /* Initialize the standard set of capabilities */
1174 /* Note that the vendor-specific code below might override */
1176 /* Intel-defined flags: level 0x00000001 */
1177 if (c->cpuid_level >= 0x00000001) {
1178 __u32 misc;
1179 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1180 &c->x86_capability[0]);
1181 c->x86 = (tfms >> 8) & 0xf;
1182 c->x86_model = (tfms >> 4) & 0xf;
1183 c->x86_mask = tfms & 0xf;
1184 if (c->x86 == 0xf) {
1185 c->x86 += (tfms >> 20) & 0xff;
1186 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1188 if (c->x86_capability[0] & (1<<19))
1189 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1190 } else {
1191 /* Have CPUID level 0 only - unheard of */
1192 c->x86 = 4;
1195 #ifdef CONFIG_SMP
1196 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1197 #endif
1200 /*
1201 * This does the hard work of actually picking apart the CPU stuff...
1202 */
1203 void __init identify_cpu(struct cpuinfo_x86 *c)
1205 int i;
1206 u32 xlvl;
1208 early_identify_cpu(c);
1210 /* AMD-defined flags: level 0x80000001 */
1211 xlvl = cpuid_eax(0x80000000);
1212 c->extended_cpuid_level = xlvl;
1213 if ((xlvl & 0xffff0000) == 0x80000000) {
1214 if (xlvl >= 0x80000001) {
1215 c->x86_capability[1] = cpuid_edx(0x80000001);
1216 c->x86_capability[6] = cpuid_ecx(0x80000001);
1218 if (xlvl >= 0x80000004)
1219 get_model_name(c); /* Default name */
1222 /* Transmeta-defined flags: level 0x80860001 */
1223 xlvl = cpuid_eax(0x80860000);
1224 if ((xlvl & 0xffff0000) == 0x80860000) {
1225 /* Don't set x86_cpuid_level here for now to not confuse. */
1226 if (xlvl >= 0x80860001)
1227 c->x86_capability[2] = cpuid_edx(0x80860001);
1230 /*
1231 * Vendor-specific initialization. In this section we
1232 * canonicalize the feature flags, meaning if there are
1233 * features a certain CPU supports which CPUID doesn't
1234 * tell us, CPUID claiming incorrect flags, or other bugs,
1235 * we handle them here.
1237 * At the end of this section, c->x86_capability better
1238 * indicate the features this CPU genuinely supports!
1239 */
1240 switch (c->x86_vendor) {
1241 case X86_VENDOR_AMD:
1242 init_amd(c);
1243 break;
1245 case X86_VENDOR_INTEL:
1246 init_intel(c);
1247 break;
1249 case X86_VENDOR_UNKNOWN:
1250 default:
1251 display_cacheinfo(c);
1252 break;
1255 select_idle_routine(c);
1256 detect_ht(c);
1258 /*
1259 * On SMP, boot_cpu_data holds the common feature set between
1260 * all CPUs; so make sure that we indicate which features are
1261 * common between the CPUs. The first time this routine gets
1262 * executed, c == &boot_cpu_data.
1263 */
1264 if (c != &boot_cpu_data) {
1265 /* AND the already accumulated flags with these */
1266 for (i = 0 ; i < NCAPINTS ; i++)
1267 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1270 #ifdef CONFIG_X86_MCE
1271 mcheck_init(c);
1272 #endif
1273 #ifdef CONFIG_NUMA
1274 if (c != &boot_cpu_data)
1275 numa_add_cpu(c - cpu_data);
1276 #endif
1280 void __init print_cpu_info(struct cpuinfo_x86 *c)
1282 if (c->x86_model_id[0])
1283 printk("%s", c->x86_model_id);
1285 if (c->x86_mask || c->cpuid_level >= 0)
1286 printk(" stepping %02x\n", c->x86_mask);
1287 else
1288 printk("\n");
1291 /*
1292 * Get CPU information for use by the procfs.
1293 */
1295 static int show_cpuinfo(struct seq_file *m, void *v)
1297 struct cpuinfo_x86 *c = v;
1299 /*
1300 * These flag bits must match the definitions in <asm/cpufeature.h>.
1301 * NULL means this bit is undefined or reserved; either way it doesn't
1302 * have meaning as far as Linux is concerned. Note that it's important
1303 * to realize there is a difference between this table and CPUID -- if
1304 * applications want to get the raw CPUID data, they should access
1305 * /dev/cpu/<cpu_nr>/cpuid instead.
1306 */
1307 static char *x86_cap_flags[] = {
1308 /* Intel-defined */
1309 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1310 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1311 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1312 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1314 /* AMD-defined */
1315 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1316 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1317 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1318 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1320 /* Transmeta-defined */
1321 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1322 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1323 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1324 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1326 /* Other (Linux-defined) */
1327 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1328 "constant_tsc", NULL, NULL,
1329 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1330 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1331 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1333 /* Intel-defined (#2) */
1334 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1335 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1336 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1337 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1339 /* VIA/Cyrix/Centaur-defined */
1340 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1341 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1342 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1343 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1345 /* AMD-defined (#2) */
1346 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1347 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1348 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1350 };
1351 static char *x86_power_flags[] = {
1352 "ts", /* temperature sensor */
1353 "fid", /* frequency id control */
1354 "vid", /* voltage id control */
1355 "ttp", /* thermal trip */
1356 "tm",
1357 "stc"
1358 };
1361 #ifdef CONFIG_SMP
1362 if (!cpu_online(c-cpu_data))
1363 return 0;
1364 #endif
1366 seq_printf(m,"processor\t: %u\n"
1367 "vendor_id\t: %s\n"
1368 "cpu family\t: %d\n"
1369 "model\t\t: %d\n"
1370 "model name\t: %s\n",
1371 (unsigned)(c-cpu_data),
1372 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1373 c->x86,
1374 (int)c->x86_model,
1375 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1377 if (c->x86_mask || c->cpuid_level >= 0)
1378 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1379 else
1380 seq_printf(m, "stepping\t: unknown\n");
1382 if (cpu_has(c,X86_FEATURE_TSC)) {
1383 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1384 cpu_khz / 1000, (cpu_khz % 1000));
1387 /* Cache size */
1388 if (c->x86_cache_size >= 0)
1389 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1391 #ifdef CONFIG_SMP
1392 if (smp_num_siblings * c->x86_num_cores > 1) {
1393 int cpu = c - cpu_data;
1394 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1395 seq_printf(m, "siblings\t: %d\n",
1396 c->x86_num_cores * smp_num_siblings);
1397 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1398 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1400 #endif
1402 seq_printf(m,
1403 "fpu\t\t: yes\n"
1404 "fpu_exception\t: yes\n"
1405 "cpuid level\t: %d\n"
1406 "wp\t\t: yes\n"
1407 "flags\t\t:",
1408 c->cpuid_level);
1411 int i;
1412 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1413 if ( test_bit(i, &c->x86_capability) &&
1414 x86_cap_flags[i] != NULL )
1415 seq_printf(m, " %s", x86_cap_flags[i]);
1418 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1419 c->loops_per_jiffy/(500000/HZ),
1420 (c->loops_per_jiffy/(5000/HZ)) % 100);
1422 if (c->x86_tlbsize > 0)
1423 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1424 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1425 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1427 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1428 c->x86_phys_bits, c->x86_virt_bits);
1430 seq_printf(m, "power management:");
1432 unsigned i;
1433 for (i = 0; i < 32; i++)
1434 if (c->x86_power & (1 << i)) {
1435 if (i < ARRAY_SIZE(x86_power_flags))
1436 seq_printf(m, " %s", x86_power_flags[i]);
1437 else
1438 seq_printf(m, " [%d]", i);
1442 seq_printf(m, "\n\n");
1444 return 0;
1447 static void *c_start(struct seq_file *m, loff_t *pos)
1449 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1452 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1454 ++*pos;
1455 return c_start(m, pos);
1458 static void c_stop(struct seq_file *m, void *v)
1462 struct seq_operations cpuinfo_op = {
1463 .start =c_start,
1464 .next = c_next,
1465 .stop = c_stop,
1466 .show = show_cpuinfo,
1467 };