ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c @ 7138:7f732bba3ed4

Allow kernel built with privileged operation support (e.g. -xen/-xen0) to be used as a guest kernel on x86_64
Signed-off-by: ian@xensource.com
author iap10@firebug.cl.cam.ac.uk
date Thu Sep 29 19:59:18 2005 +0100 (2005-09-29)
parents 06d84bf87159
children 61b3b357d827 549325dfa161 a746126c3bb1
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
43 #include <asm/mtrr.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/smp.h>
48 #include <asm/msr.h>
49 #include <asm/desc.h>
50 #include <video/edid.h>
51 #include <asm/e820.h>
52 #include <asm/dma.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
56 #include <asm/proto.h>
57 #include <asm/setup.h>
58 #include <asm/mach_apic.h>
59 #include <asm/numa.h>
60 #ifdef CONFIG_XEN
61 #include <linux/percpu.h>
62 #include <asm-xen/xen-public/physdev.h>
63 #include "setup_arch_pre.h"
64 #include <asm/hypervisor.h>
65 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
66 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
67 #define end_pfn_map end_pfn
68 #include <asm/mach-xen/setup_arch_post.h>
70 extern unsigned long start_pfn;
71 extern struct edid_info edid_info;
73 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
74 EXPORT_SYMBOL(HYPERVISOR_shared_info);
76 /* Allows setting of maximum possible memory size */
77 unsigned long xen_override_max_pfn;
79 unsigned long *phys_to_machine_mapping;
80 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
82 EXPORT_SYMBOL(phys_to_machine_mapping);
84 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
85 DEFINE_PER_CPU(int, nr_multicall_ents);
87 /* Raw start-of-day parameters from the hypervisor. */
88 start_info_t *xen_start_info;
89 #endif
91 /*
92 * Machine setup..
93 */
95 struct cpuinfo_x86 boot_cpu_data;
97 unsigned long mmu_cr4_features;
99 int acpi_disabled;
100 EXPORT_SYMBOL(acpi_disabled);
101 #ifdef CONFIG_ACPI_BOOT
102 extern int __initdata acpi_ht;
103 extern acpi_interrupt_flags acpi_sci_flags;
104 int __initdata acpi_force = 0;
105 #endif
107 int acpi_numa __initdata;
109 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
110 int bootloader_type;
112 unsigned long saved_video_mode;
114 #ifdef CONFIG_SWIOTLB
115 int swiotlb;
116 EXPORT_SYMBOL(swiotlb);
117 #endif
119 /*
120 * Setup options
121 */
122 struct drive_info_struct { char dummy[32]; } drive_info;
123 struct screen_info screen_info;
124 struct sys_desc_table_struct {
125 unsigned short length;
126 unsigned char table[0];
127 };
129 struct edid_info edid_info;
130 struct e820map e820;
132 extern int root_mountflags;
133 extern char _text, _etext, _edata, _end;
135 char command_line[COMMAND_LINE_SIZE];
137 struct resource standard_io_resources[] = {
138 { .name = "dma1", .start = 0x00, .end = 0x1f,
139 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
140 { .name = "pic1", .start = 0x20, .end = 0x21,
141 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
142 { .name = "timer0", .start = 0x40, .end = 0x43,
143 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
144 { .name = "timer1", .start = 0x50, .end = 0x53,
145 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
146 { .name = "keyboard", .start = 0x60, .end = 0x6f,
147 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
148 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
149 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
150 { .name = "pic2", .start = 0xa0, .end = 0xa1,
151 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
152 { .name = "dma2", .start = 0xc0, .end = 0xdf,
153 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
154 { .name = "fpu", .start = 0xf0, .end = 0xff,
155 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
156 };
158 #define STANDARD_IO_RESOURCES \
159 (sizeof standard_io_resources / sizeof standard_io_resources[0])
161 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
163 struct resource data_resource = {
164 .name = "Kernel data",
165 .start = 0,
166 .end = 0,
167 .flags = IORESOURCE_RAM,
168 };
169 struct resource code_resource = {
170 .name = "Kernel code",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_RAM,
174 };
176 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
178 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
179 static struct resource system_rom_resource = {
180 .name = "System ROM",
181 .start = 0xf0000,
182 .end = 0xfffff,
183 .flags = IORESOURCE_ROM,
184 };
186 static struct resource extension_rom_resource = {
187 .name = "Extension ROM",
188 .start = 0xe0000,
189 .end = 0xeffff,
190 .flags = IORESOURCE_ROM,
191 };
193 static struct resource adapter_rom_resources[] = {
194 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
195 .flags = IORESOURCE_ROM },
196 { .name = "Adapter ROM", .start = 0, .end = 0,
197 .flags = IORESOURCE_ROM },
198 { .name = "Adapter ROM", .start = 0, .end = 0,
199 .flags = IORESOURCE_ROM },
200 { .name = "Adapter ROM", .start = 0, .end = 0,
201 .flags = IORESOURCE_ROM },
202 { .name = "Adapter ROM", .start = 0, .end = 0,
203 .flags = IORESOURCE_ROM },
204 { .name = "Adapter ROM", .start = 0, .end = 0,
205 .flags = IORESOURCE_ROM }
206 };
207 #endif
209 #define ADAPTER_ROM_RESOURCES \
210 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
212 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
213 static struct resource video_rom_resource = {
214 .name = "Video ROM",
215 .start = 0xc0000,
216 .end = 0xc7fff,
217 .flags = IORESOURCE_ROM,
218 };
219 #endif
221 static struct resource video_ram_resource = {
222 .name = "Video RAM area",
223 .start = 0xa0000,
224 .end = 0xbffff,
225 .flags = IORESOURCE_RAM,
226 };
228 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
229 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
231 static int __init romchecksum(unsigned char *rom, unsigned long length)
232 {
233 unsigned char *p, sum = 0;
235 for (p = rom; p < rom + length; p++)
236 sum += *p;
237 return sum == 0;
238 }
240 static void __init probe_roms(void)
241 {
242 unsigned long start, length, upper;
243 unsigned char *rom;
244 int i;
246 /* video rom */
247 upper = adapter_rom_resources[0].start;
248 for (start = video_rom_resource.start; start < upper; start += 2048) {
249 rom = isa_bus_to_virt(start);
250 if (!romsignature(rom))
251 continue;
253 video_rom_resource.start = start;
255 /* 0 < length <= 0x7f * 512, historically */
256 length = rom[2] * 512;
258 /* if checksum okay, trust length byte */
259 if (length && romchecksum(rom, length))
260 video_rom_resource.end = start + length - 1;
262 request_resource(&iomem_resource, &video_rom_resource);
263 break;
264 }
266 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
267 if (start < upper)
268 start = upper;
270 /* system rom */
271 request_resource(&iomem_resource, &system_rom_resource);
272 upper = system_rom_resource.start;
274 /* check for extension rom (ignore length byte!) */
275 rom = isa_bus_to_virt(extension_rom_resource.start);
276 if (romsignature(rom)) {
277 length = extension_rom_resource.end - extension_rom_resource.start + 1;
278 if (romchecksum(rom, length)) {
279 request_resource(&iomem_resource, &extension_rom_resource);
280 upper = extension_rom_resource.start;
281 }
282 }
284 /* check for adapter roms on 2k boundaries */
285 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
286 rom = isa_bus_to_virt(start);
287 if (!romsignature(rom))
288 continue;
290 /* 0 < length <= 0x7f * 512, historically */
291 length = rom[2] * 512;
293 /* but accept any length that fits if checksum okay */
294 if (!length || start + length > upper || !romchecksum(rom, length))
295 continue;
297 adapter_rom_resources[i].start = start;
298 adapter_rom_resources[i].end = start + length - 1;
299 request_resource(&iomem_resource, &adapter_rom_resources[i]);
301 start = adapter_rom_resources[i++].end & ~2047UL;
302 }
303 }
304 #endif
307 static __init void parse_cmdline_early (char ** cmdline_p)
308 {
309 char c = ' ', *to = command_line, *from = COMMAND_LINE;
310 int len = 0;
312 /* Save unparsed command line copy for /proc/cmdline */
313 #ifdef CONFIG_XEN
314 int max_cmdline;
316 if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
317 max_cmdline = COMMAND_LINE_SIZE;
318 memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
319 saved_command_line[max_cmdline-1] = '\0';
320 #else
321 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
322 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
323 #endif
325 for (;;) {
326 if (c != ' ')
327 goto next_char;
329 #ifdef CONFIG_SMP
330 /*
331 * If the BIOS enumerates physical processors before logical,
332 * maxcpus=N at enumeration-time can be used to disable HT.
333 */
334 else if (!memcmp(from, "maxcpus=", 8)) {
335 extern unsigned int maxcpus;
337 maxcpus = simple_strtoul(from + 8, NULL, 0);
338 }
339 #endif
340 #ifdef CONFIG_ACPI_BOOT
341 /* "acpi=off" disables both ACPI table parsing and interpreter init */
342 if (!memcmp(from, "acpi=off", 8))
343 disable_acpi();
345 if (!memcmp(from, "acpi=force", 10)) {
346 /* add later when we do DMI horrors: */
347 acpi_force = 1;
348 acpi_disabled = 0;
349 }
351 /* acpi=ht just means: do ACPI MADT parsing
352 at bootup, but don't enable the full ACPI interpreter */
353 if (!memcmp(from, "acpi=ht", 7)) {
354 if (!acpi_force)
355 disable_acpi();
356 acpi_ht = 1;
357 }
358 else if (!memcmp(from, "pci=noacpi", 10))
359 acpi_disable_pci();
360 else if (!memcmp(from, "acpi=noirq", 10))
361 acpi_noirq_set();
363 else if (!memcmp(from, "acpi_sci=edge", 13))
364 acpi_sci_flags.trigger = 1;
365 else if (!memcmp(from, "acpi_sci=level", 14))
366 acpi_sci_flags.trigger = 3;
367 else if (!memcmp(from, "acpi_sci=high", 13))
368 acpi_sci_flags.polarity = 1;
369 else if (!memcmp(from, "acpi_sci=low", 12))
370 acpi_sci_flags.polarity = 3;
372 /* acpi=strict disables out-of-spec workarounds */
373 else if (!memcmp(from, "acpi=strict", 11)) {
374 acpi_strict = 1;
375 }
376 #ifdef CONFIG_X86_IO_APIC
377 else if (!memcmp(from, "acpi_skip_timer_override", 24))
378 acpi_skip_timer_override = 1;
379 #endif
380 #endif
381 #ifndef CONFIG_XEN
382 if (!memcmp(from, "nolapic", 7) ||
383 !memcmp(from, "disableapic", 11))
384 disable_apic = 1;
386 if (!memcmp(from, "noapic", 6))
387 skip_ioapic_setup = 1;
389 if (!memcmp(from, "apic", 4)) {
390 skip_ioapic_setup = 0;
391 ioapic_force = 1;
392 }
393 #endif
394 if (!memcmp(from, "mem=", 4))
395 parse_memopt(from+4, &from);
397 #ifdef CONFIG_DISCONTIGMEM
398 if (!memcmp(from, "numa=", 5))
399 numa_setup(from+5);
400 #endif
402 #ifdef CONFIG_GART_IOMMU
403 if (!memcmp(from,"iommu=",6)) {
404 iommu_setup(from+6);
405 }
406 #endif
408 if (!memcmp(from,"oops=panic", 10))
409 panic_on_oops = 1;
411 if (!memcmp(from, "noexec=", 7))
412 nonx_setup(from + 7);
414 next_char:
415 c = *(from++);
416 if (!c)
417 break;
418 if (COMMAND_LINE_SIZE <= ++len)
419 break;
420 *(to++) = c;
421 }
422 *to = '\0';
423 *cmdline_p = command_line;
424 }
426 #ifndef CONFIG_DISCONTIGMEM
427 #ifdef CONFIG_XEN
428 static void __init contig_initmem_init(void)
429 {
430 unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
431 free_bootmem(0, end_pfn << PAGE_SHIFT);
432 reserve_bootmem(HIGH_MEMORY,
433 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
434 - HIGH_MEMORY);
435 }
436 #else
437 static void __init contig_initmem_init(void)
438 {
439 unsigned long bootmap_size, bootmap;
440 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
441 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
442 if (bootmap == -1L)
443 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
444 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
445 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
446 reserve_bootmem(bootmap, bootmap_size);
447 }
448 #endif /* !CONFIG_XEN */
449 #endif
451 /* Use inline assembly to define this because the nops are defined
452 as inline assembly strings in the include files and we cannot
453 get them easily into strings. */
454 asm("\t.data\nk8nops: "
455 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
456 K8_NOP7 K8_NOP8);
458 extern unsigned char k8nops[];
459 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
460 NULL,
461 k8nops,
462 k8nops + 1,
463 k8nops + 1 + 2,
464 k8nops + 1 + 2 + 3,
465 k8nops + 1 + 2 + 3 + 4,
466 k8nops + 1 + 2 + 3 + 4 + 5,
467 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
468 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
469 };
471 /* Replace instructions with better alternatives for this CPU type.
473 This runs before SMP is initialized to avoid SMP problems with
474 self modifying code. This implies that assymetric systems where
475 APs have less capabilities than the boot processor are not handled.
476 In this case boot with "noreplacement". */
477 void apply_alternatives(void *start, void *end)
478 {
479 struct alt_instr *a;
480 int diff, i, k;
481 for (a = start; (void *)a < end; a++) {
482 if (!boot_cpu_has(a->cpuid))
483 continue;
485 BUG_ON(a->replacementlen > a->instrlen);
486 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
487 diff = a->instrlen - a->replacementlen;
489 /* Pad the rest with nops */
490 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
491 k = diff;
492 if (k > ASM_NOP_MAX)
493 k = ASM_NOP_MAX;
494 __inline_memcpy(a->instr + i, k8_nops[k], k);
495 }
496 }
497 }
499 static int no_replacement __initdata = 0;
501 void __init alternative_instructions(void)
502 {
503 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
504 if (no_replacement)
505 return;
506 apply_alternatives(__alt_instructions, __alt_instructions_end);
507 }
509 static int __init noreplacement_setup(char *s)
510 {
511 no_replacement = 1;
512 return 0;
513 }
515 __setup("noreplacement", noreplacement_setup);
517 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
518 struct edd edd;
519 #ifdef CONFIG_EDD_MODULE
520 EXPORT_SYMBOL(edd);
521 #endif
522 /**
523 * copy_edd() - Copy the BIOS EDD information
524 * from boot_params into a safe place.
525 *
526 */
527 static inline void copy_edd(void)
528 {
529 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
530 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
531 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
532 edd.edd_info_nr = EDD_NR;
533 }
534 #else
535 static inline void copy_edd(void)
536 {
537 }
538 #endif
540 #ifndef CONFIG_XEN
541 #define EBDA_ADDR_POINTER 0x40E
542 static void __init reserve_ebda_region(void)
543 {
544 unsigned int addr;
545 /**
546 * there is a real-mode segmented pointer pointing to the
547 * 4K EBDA area at 0x40E
548 */
549 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
550 addr <<= 4;
551 if (addr)
552 reserve_bootmem_generic(addr, PAGE_SIZE);
553 }
554 #endif
556 void __init setup_arch(char **cmdline_p)
557 {
558 unsigned long kernel_end;
560 #ifdef CONFIG_XEN
561 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
562 drive_info = DRIVE_INFO;
563 kernel_end = 0; /* dummy */
564 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
565 screen_info = SCREEN_INFO;
567 /* This is drawn from a dump from vgacon:startup in standard Linux. */
568 screen_info.orig_video_mode = 3;
569 screen_info.orig_video_isVGA = 1;
570 screen_info.orig_video_lines = 25;
571 screen_info.orig_video_cols = 80;
572 screen_info.orig_video_ega_bx = 3;
573 screen_info.orig_video_points = 16;
574 #endif
575 edid_info = EDID_INFO;
576 saved_video_mode = SAVED_VIDEO_MODE;
577 bootloader_type = LOADER_TYPE;
579 #ifdef CONFIG_BLK_DEV_RAM
580 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
581 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
582 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
585 #endif
587 HYPERVISOR_vm_assist(VMASST_CMD_enable,
588 VMASST_TYPE_writable_pagetables);
590 ARCH_SETUP
591 #else
592 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
593 drive_info = DRIVE_INFO;
594 screen_info = SCREEN_INFO;
595 edid_info = EDID_INFO;
596 saved_video_mode = SAVED_VIDEO_MODE;
597 bootloader_type = LOADER_TYPE;
599 #ifdef CONFIG_BLK_DEV_RAM
600 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
601 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
602 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
603 #endif
604 setup_memory_region();
605 copy_edd();
606 #endif /* !CONFIG_XEN */
608 if (!MOUNT_ROOT_RDONLY)
609 root_mountflags &= ~MS_RDONLY;
610 init_mm.start_code = (unsigned long) &_text;
611 init_mm.end_code = (unsigned long) &_etext;
612 init_mm.end_data = (unsigned long) &_edata;
613 #ifdef CONFIG_XEN
614 init_mm.brk = start_pfn << PAGE_SHIFT;
615 #else
616 init_mm.brk = (unsigned long) &_end;
618 code_resource.start = virt_to_phys(&_text);
619 code_resource.end = virt_to_phys(&_etext)-1;
620 data_resource.start = virt_to_phys(&_etext);
621 data_resource.end = virt_to_phys(&_edata)-1;
622 #endif
624 parse_cmdline_early(cmdline_p);
626 early_identify_cpu(&boot_cpu_data);
628 /*
629 * partially used pages are not usable - thus
630 * we are rounding upwards:
631 */
632 end_pfn = e820_end_of_ram();
634 check_efer();
636 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
638 #ifdef CONFIG_ACPI_NUMA
639 /*
640 * Parse SRAT to discover nodes.
641 */
642 acpi_numa_init();
643 #endif
645 #ifdef CONFIG_DISCONTIGMEM
646 numa_initmem_init(0, end_pfn);
647 #else
648 contig_initmem_init();
649 #endif
651 #ifndef CONFIG_XEN
652 /* Reserve direct mapping */
653 reserve_bootmem_generic(table_start << PAGE_SHIFT,
654 (table_end - table_start) << PAGE_SHIFT);
656 /* reserve kernel */
657 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
658 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
660 /*
661 * reserve physical page 0 - it's a special BIOS page on many boxes,
662 * enabling clean reboots, SMP operation, laptop functions.
663 */
664 reserve_bootmem_generic(0, PAGE_SIZE);
666 /* reserve ebda region */
667 reserve_ebda_region();
668 #endif
671 #ifdef CONFIG_SMP
672 /*
673 * But first pinch a few for the stack/trampoline stuff
674 * FIXME: Don't need the extra page at 4K, but need to fix
675 * trampoline before removing it. (see the GDT stuff)
676 */
677 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
679 /* Reserve SMP trampoline */
680 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
681 #endif
683 #ifdef CONFIG_ACPI_SLEEP
684 /*
685 * Reserve low memory region for sleep support.
686 */
687 acpi_reserve_bootmem();
688 #endif
689 #ifdef CONFIG_XEN
690 #ifdef CONFIG_BLK_DEV_INITRD
691 if (xen_start_info->mod_start) {
692 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
693 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
694 initrd_start = INITRD_START + PAGE_OFFSET;
695 initrd_end = initrd_start+INITRD_SIZE;
696 initrd_below_start_ok = 1;
697 } else {
698 printk(KERN_ERR "initrd extends beyond end of memory "
699 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
700 (unsigned long)(INITRD_START + INITRD_SIZE),
701 (unsigned long)(end_pfn << PAGE_SHIFT));
702 initrd_start = 0;
703 }
704 }
705 #endif
706 #else /* CONFIG_XEN */
707 #ifdef CONFIG_BLK_DEV_INITRD
708 if (LOADER_TYPE && INITRD_START) {
709 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
710 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
711 initrd_start =
712 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
713 initrd_end = initrd_start+INITRD_SIZE;
714 }
715 else {
716 printk(KERN_ERR "initrd extends beyond end of memory "
717 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
718 (unsigned long)(INITRD_START + INITRD_SIZE),
719 (unsigned long)(end_pfn << PAGE_SHIFT));
720 initrd_start = 0;
721 }
722 }
723 #endif
724 #endif /* !CONFIG_XEN */
725 paging_init();
726 #ifdef CONFIG_X86_LOCAL_APIC
727 /*
728 * Find and reserve possible boot-time SMP configuration:
729 */
730 find_smp_config();
731 #endif
732 #ifdef CONFIG_XEN
733 {
734 int i, j, k, fpp;
735 /* Make sure we have a large enough P->M table. */
736 if (end_pfn > xen_start_info->nr_pages) {
737 phys_to_machine_mapping = alloc_bootmem(
738 end_pfn * sizeof(unsigned long));
739 memset(phys_to_machine_mapping, ~0,
740 end_pfn * sizeof(unsigned long));
741 memcpy(phys_to_machine_mapping,
742 (unsigned long *)xen_start_info->mfn_list,
743 xen_start_info->nr_pages * sizeof(unsigned long));
744 free_bootmem(
745 __pa(xen_start_info->mfn_list),
746 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
747 sizeof(unsigned long))));
748 }
750 /*
751 * Initialise the list of the frames that specify the list of
752 * frames that make up the p2m table. Used by save/restore
753 */
754 pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
755 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
756 virt_to_mfn(pfn_to_mfn_frame_list_list);
758 fpp = PAGE_SIZE/sizeof(unsigned long);
759 for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
760 {
761 if ( (j % fpp) == 0 )
762 {
763 k++;
764 BUG_ON(k>=fpp);
765 pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE);
766 pfn_to_mfn_frame_list_list[k] =
767 virt_to_mfn(pfn_to_mfn_frame_list[k]);
768 j=0;
769 }
770 pfn_to_mfn_frame_list[k][j] =
771 virt_to_mfn(&phys_to_machine_mapping[i]);
772 }
773 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
774 }
776 if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
777 {
778 acpi_disabled = 1;
779 #ifdef CONFIG_ACPI_BOOT
780 acpi_ht = 0;
781 #endif
782 }
783 #endif
785 #ifndef CONFIG_XEN
786 check_ioapic();
787 #endif
789 #ifdef CONFIG_ACPI_BOOT
790 /*
791 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
792 * Call this early for SRAT node setup.
793 */
794 acpi_boot_table_init();
796 /*
797 * Read APIC and some other early information from ACPI tables.
798 */
799 acpi_boot_init();
800 #endif
802 #ifdef CONFIG_X86_LOCAL_APIC
803 /*
804 * get boot-time SMP configuration:
805 */
806 if (smp_found_config)
807 get_smp_config();
808 #ifndef CONFIG_XEN
809 init_apic_mappings();
810 #endif
811 #endif
813 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
814 /*
815 * Request address space for all standard RAM and ROM resources
816 * and also for regions reported as reserved by the e820.
817 */
818 probe_roms();
819 e820_reserve_resources();
820 #endif
822 request_resource(&iomem_resource, &video_ram_resource);
824 {
825 unsigned i;
826 /* request I/O space for devices used on all i[345]86 PCs */
827 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
828 request_resource(&ioport_resource, &standard_io_resources[i]);
829 }
831 e820_setup_gap();
833 #ifdef CONFIG_GART_IOMMU
834 iommu_hole_init();
835 #endif
837 #ifdef CONFIG_XEN
838 {
839 physdev_op_t op;
841 op.cmd = PHYSDEVOP_SET_IOPL;
842 op.u.set_iopl.iopl = 1;
843 HYPERVISOR_physdev_op(&op);
845 if (xen_start_info->flags & SIF_INITDOMAIN) {
846 if (!(xen_start_info->flags & SIF_PRIVILEGED))
847 panic("Xen granted us console access "
848 "but not privileged status");
850 #ifdef CONFIG_VT
851 #if defined(CONFIG_VGA_CONSOLE)
852 conswitchp = &vga_con;
853 #elif defined(CONFIG_DUMMY_CONSOLE)
854 conswitchp = &dummy_con;
855 #endif
856 #endif
857 } else {
858 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
859 extern const struct consw xennull_con;
860 extern int console_use_vt;
861 #if defined(CONFIG_VGA_CONSOLE)
862 /* disable VGA driver */
863 ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
864 #endif
865 conswitchp = &xennull_con;
866 console_use_vt = 0;
867 #endif
868 }
869 }
870 #else /* CONFIG_XEN */
872 #ifdef CONFIG_VT
873 #if defined(CONFIG_VGA_CONSOLE)
874 conswitchp = &vga_con;
875 #elif defined(CONFIG_DUMMY_CONSOLE)
876 conswitchp = &dummy_con;
877 #endif
878 #endif
880 #endif /* !CONFIG_XEN */
881 }
883 static int __init get_model_name(struct cpuinfo_x86 *c)
884 {
885 unsigned int *v;
887 if (c->extended_cpuid_level < 0x80000004)
888 return 0;
890 v = (unsigned int *) c->x86_model_id;
891 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
892 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
893 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
894 c->x86_model_id[48] = 0;
895 return 1;
896 }
899 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
900 {
901 unsigned int n, dummy, eax, ebx, ecx, edx;
903 n = c->extended_cpuid_level;
905 if (n >= 0x80000005) {
906 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
907 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
908 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
909 c->x86_cache_size=(ecx>>24)+(edx>>24);
910 /* On K8 L1 TLB is inclusive, so don't count it */
911 c->x86_tlbsize = 0;
912 }
914 if (n >= 0x80000006) {
915 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
916 ecx = cpuid_ecx(0x80000006);
917 c->x86_cache_size = ecx >> 16;
918 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
920 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
921 c->x86_cache_size, ecx & 0xFF);
922 }
924 if (n >= 0x80000007)
925 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
926 if (n >= 0x80000008) {
927 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
928 c->x86_virt_bits = (eax >> 8) & 0xff;
929 c->x86_phys_bits = eax & 0xff;
930 }
931 }
933 /*
934 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
935 * Assumes number of cores is a power of two.
936 */
937 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
938 {
939 #ifdef CONFIG_SMP
940 int cpu = smp_processor_id();
941 int node = 0;
942 unsigned bits;
943 if (c->x86_num_cores == 1)
944 return;
946 bits = 0;
947 while ((1 << bits) < c->x86_num_cores)
948 bits++;
950 /* Low order bits define the core id (index of core in socket) */
951 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
952 /* Convert the APIC ID into the socket ID */
953 phys_proc_id[cpu] >>= bits;
955 #ifdef CONFIG_NUMA
956 /* When an ACPI SRAT table is available use the mappings from SRAT
957 instead. */
958 if (acpi_numa <= 0) {
959 node = phys_proc_id[cpu];
960 if (!node_online(node))
961 node = first_node(node_online_map);
962 cpu_to_node[cpu] = node;
963 } else {
964 node = cpu_to_node[cpu];
965 }
966 #endif
968 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
969 cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
970 #endif
971 }
973 static int __init init_amd(struct cpuinfo_x86 *c)
974 {
975 int r;
976 int level;
978 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
979 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
980 clear_bit(0*32+31, &c->x86_capability);
982 /* C-stepping K8? */
983 level = cpuid_eax(1);
984 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
985 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
987 r = get_model_name(c);
988 if (!r) {
989 switch (c->x86) {
990 case 15:
991 /* Should distinguish Models here, but this is only
992 a fallback anyways. */
993 strcpy(c->x86_model_id, "Hammer");
994 break;
995 }
996 }
997 display_cacheinfo(c);
999 if (c->extended_cpuid_level >= 0x80000008) {
1000 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
1001 if (c->x86_num_cores & (c->x86_num_cores - 1))
1002 c->x86_num_cores = 1;
1004 amd_detect_cmp(c);
1007 return r;
1010 static void __init detect_ht(struct cpuinfo_x86 *c)
1012 #ifdef CONFIG_SMP
1013 u32 eax, ebx, ecx, edx;
1014 int index_msb, tmp;
1015 int cpu = smp_processor_id();
1017 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1018 return;
1020 cpuid(1, &eax, &ebx, &ecx, &edx);
1021 smp_num_siblings = (ebx & 0xff0000) >> 16;
1023 if (smp_num_siblings == 1) {
1024 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1025 } else if (smp_num_siblings > 1) {
1026 index_msb = 31;
1027 /*
1028 * At this point we only support two siblings per
1029 * processor package.
1030 */
1031 if (smp_num_siblings > NR_CPUS) {
1032 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1033 smp_num_siblings = 1;
1034 return;
1036 tmp = smp_num_siblings;
1037 while ((tmp & 0x80000000 ) == 0) {
1038 tmp <<=1 ;
1039 index_msb--;
1041 if (smp_num_siblings & (smp_num_siblings - 1))
1042 index_msb++;
1043 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1045 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1046 phys_proc_id[cpu]);
1048 smp_num_siblings = smp_num_siblings / c->x86_num_cores;
1050 tmp = smp_num_siblings;
1051 index_msb = 31;
1052 while ((tmp & 0x80000000) == 0) {
1053 tmp <<=1 ;
1054 index_msb--;
1056 if (smp_num_siblings & (smp_num_siblings - 1))
1057 index_msb++;
1059 cpu_core_id[cpu] = phys_pkg_id(index_msb);
1061 if (c->x86_num_cores > 1)
1062 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1063 cpu_core_id[cpu]);
1065 #endif
1068 /*
1069 * find out the number of processor cores on the die
1070 */
1071 static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
1073 unsigned int eax;
1075 if (c->cpuid_level < 4)
1076 return 1;
1078 __asm__("cpuid"
1079 : "=a" (eax)
1080 : "0" (4), "c" (0)
1081 : "bx", "dx");
1083 if (eax & 0x1f)
1084 return ((eax >> 26) + 1);
1085 else
1086 return 1;
1089 static void __init init_intel(struct cpuinfo_x86 *c)
1091 /* Cache sizes */
1092 unsigned n;
1094 init_intel_cacheinfo(c);
1095 n = c->extended_cpuid_level;
1096 if (n >= 0x80000008) {
1097 unsigned eax = cpuid_eax(0x80000008);
1098 c->x86_virt_bits = (eax >> 8) & 0xff;
1099 c->x86_phys_bits = eax & 0xff;
1102 if (c->x86 == 15)
1103 c->x86_cache_alignment = c->x86_clflush_size * 2;
1104 if (c->x86 >= 15)
1105 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1106 c->x86_num_cores = intel_num_cpu_cores(c);
1109 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1111 char *v = c->x86_vendor_id;
1113 if (!strcmp(v, "AuthenticAMD"))
1114 c->x86_vendor = X86_VENDOR_AMD;
1115 else if (!strcmp(v, "GenuineIntel"))
1116 c->x86_vendor = X86_VENDOR_INTEL;
1117 else
1118 c->x86_vendor = X86_VENDOR_UNKNOWN;
1121 struct cpu_model_info {
1122 int vendor;
1123 int family;
1124 char *model_names[16];
1125 };
1127 /* Do some early cpuid on the boot CPU to get some parameter that are
1128 needed before check_bugs. Everything advanced is in identify_cpu
1129 below. */
1130 void __init early_identify_cpu(struct cpuinfo_x86 *c)
1132 u32 tfms;
1134 c->loops_per_jiffy = loops_per_jiffy;
1135 c->x86_cache_size = -1;
1136 c->x86_vendor = X86_VENDOR_UNKNOWN;
1137 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1138 c->x86_vendor_id[0] = '\0'; /* Unset */
1139 c->x86_model_id[0] = '\0'; /* Unset */
1140 c->x86_clflush_size = 64;
1141 c->x86_cache_alignment = c->x86_clflush_size;
1142 c->x86_num_cores = 1;
1143 c->extended_cpuid_level = 0;
1144 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1146 /* Get vendor name */
1147 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1148 (unsigned int *)&c->x86_vendor_id[0],
1149 (unsigned int *)&c->x86_vendor_id[8],
1150 (unsigned int *)&c->x86_vendor_id[4]);
1152 get_cpu_vendor(c);
1154 /* Initialize the standard set of capabilities */
1155 /* Note that the vendor-specific code below might override */
1157 /* Intel-defined flags: level 0x00000001 */
1158 if (c->cpuid_level >= 0x00000001) {
1159 __u32 misc;
1160 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1161 &c->x86_capability[0]);
1162 c->x86 = (tfms >> 8) & 0xf;
1163 c->x86_model = (tfms >> 4) & 0xf;
1164 c->x86_mask = tfms & 0xf;
1165 if (c->x86 == 0xf) {
1166 c->x86 += (tfms >> 20) & 0xff;
1167 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1169 if (c->x86_capability[0] & (1<<19))
1170 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1171 } else {
1172 /* Have CPUID level 0 only - unheard of */
1173 c->x86 = 4;
1176 #ifdef CONFIG_SMP
1177 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1178 #endif
1181 /*
1182 * This does the hard work of actually picking apart the CPU stuff...
1183 */
1184 void __init identify_cpu(struct cpuinfo_x86 *c)
1186 int i;
1187 u32 xlvl;
1189 early_identify_cpu(c);
1191 /* AMD-defined flags: level 0x80000001 */
1192 xlvl = cpuid_eax(0x80000000);
1193 c->extended_cpuid_level = xlvl;
1194 if ((xlvl & 0xffff0000) == 0x80000000) {
1195 if (xlvl >= 0x80000001) {
1196 c->x86_capability[1] = cpuid_edx(0x80000001);
1197 c->x86_capability[6] = cpuid_ecx(0x80000001);
1199 if (xlvl >= 0x80000004)
1200 get_model_name(c); /* Default name */
1203 /* Transmeta-defined flags: level 0x80860001 */
1204 xlvl = cpuid_eax(0x80860000);
1205 if ((xlvl & 0xffff0000) == 0x80860000) {
1206 /* Don't set x86_cpuid_level here for now to not confuse. */
1207 if (xlvl >= 0x80860001)
1208 c->x86_capability[2] = cpuid_edx(0x80860001);
1211 /*
1212 * Vendor-specific initialization. In this section we
1213 * canonicalize the feature flags, meaning if there are
1214 * features a certain CPU supports which CPUID doesn't
1215 * tell us, CPUID claiming incorrect flags, or other bugs,
1216 * we handle them here.
1218 * At the end of this section, c->x86_capability better
1219 * indicate the features this CPU genuinely supports!
1220 */
1221 switch (c->x86_vendor) {
1222 case X86_VENDOR_AMD:
1223 init_amd(c);
1224 break;
1226 case X86_VENDOR_INTEL:
1227 init_intel(c);
1228 break;
1230 case X86_VENDOR_UNKNOWN:
1231 default:
1232 display_cacheinfo(c);
1233 break;
1236 select_idle_routine(c);
1237 detect_ht(c);
1239 /*
1240 * On SMP, boot_cpu_data holds the common feature set between
1241 * all CPUs; so make sure that we indicate which features are
1242 * common between the CPUs. The first time this routine gets
1243 * executed, c == &boot_cpu_data.
1244 */
1245 if (c != &boot_cpu_data) {
1246 /* AND the already accumulated flags with these */
1247 for (i = 0 ; i < NCAPINTS ; i++)
1248 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1251 #ifdef CONFIG_X86_MCE
1252 mcheck_init(c);
1253 #endif
1254 #ifdef CONFIG_NUMA
1255 if (c != &boot_cpu_data)
1256 numa_add_cpu(c - cpu_data);
1257 #endif
1261 void __init print_cpu_info(struct cpuinfo_x86 *c)
1263 if (c->x86_model_id[0])
1264 printk("%s", c->x86_model_id);
1266 if (c->x86_mask || c->cpuid_level >= 0)
1267 printk(" stepping %02x\n", c->x86_mask);
1268 else
1269 printk("\n");
1272 /*
1273 * Get CPU information for use by the procfs.
1274 */
1276 static int show_cpuinfo(struct seq_file *m, void *v)
1278 struct cpuinfo_x86 *c = v;
1280 /*
1281 * These flag bits must match the definitions in <asm/cpufeature.h>.
1282 * NULL means this bit is undefined or reserved; either way it doesn't
1283 * have meaning as far as Linux is concerned. Note that it's important
1284 * to realize there is a difference between this table and CPUID -- if
1285 * applications want to get the raw CPUID data, they should access
1286 * /dev/cpu/<cpu_nr>/cpuid instead.
1287 */
1288 static char *x86_cap_flags[] = {
1289 /* Intel-defined */
1290 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1291 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1292 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1293 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1295 /* AMD-defined */
1296 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1297 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1298 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1299 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1301 /* Transmeta-defined */
1302 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1303 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1304 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1305 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1307 /* Other (Linux-defined) */
1308 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1309 "constant_tsc", NULL, NULL,
1310 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1311 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1312 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1314 /* Intel-defined (#2) */
1315 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1316 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1317 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1320 /* VIA/Cyrix/Centaur-defined */
1321 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1322 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1323 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1324 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1326 /* AMD-defined (#2) */
1327 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1328 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1329 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1330 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1331 };
1332 static char *x86_power_flags[] = {
1333 "ts", /* temperature sensor */
1334 "fid", /* frequency id control */
1335 "vid", /* voltage id control */
1336 "ttp", /* thermal trip */
1337 "tm",
1338 "stc"
1339 };
1342 #ifdef CONFIG_SMP
1343 if (!cpu_online(c-cpu_data))
1344 return 0;
1345 #endif
1347 seq_printf(m,"processor\t: %u\n"
1348 "vendor_id\t: %s\n"
1349 "cpu family\t: %d\n"
1350 "model\t\t: %d\n"
1351 "model name\t: %s\n",
1352 (unsigned)(c-cpu_data),
1353 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1354 c->x86,
1355 (int)c->x86_model,
1356 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1358 if (c->x86_mask || c->cpuid_level >= 0)
1359 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1360 else
1361 seq_printf(m, "stepping\t: unknown\n");
1363 if (cpu_has(c,X86_FEATURE_TSC)) {
1364 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1365 cpu_khz / 1000, (cpu_khz % 1000));
1368 /* Cache size */
1369 if (c->x86_cache_size >= 0)
1370 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1372 #ifdef CONFIG_SMP
1373 if (smp_num_siblings * c->x86_num_cores > 1) {
1374 int cpu = c - cpu_data;
1375 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1376 seq_printf(m, "siblings\t: %d\n",
1377 c->x86_num_cores * smp_num_siblings);
1378 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1379 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1381 #endif
1383 seq_printf(m,
1384 "fpu\t\t: yes\n"
1385 "fpu_exception\t: yes\n"
1386 "cpuid level\t: %d\n"
1387 "wp\t\t: yes\n"
1388 "flags\t\t:",
1389 c->cpuid_level);
1392 int i;
1393 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1394 if ( test_bit(i, &c->x86_capability) &&
1395 x86_cap_flags[i] != NULL )
1396 seq_printf(m, " %s", x86_cap_flags[i]);
1399 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1400 c->loops_per_jiffy/(500000/HZ),
1401 (c->loops_per_jiffy/(5000/HZ)) % 100);
1403 if (c->x86_tlbsize > 0)
1404 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1405 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1406 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1408 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1409 c->x86_phys_bits, c->x86_virt_bits);
1411 seq_printf(m, "power management:");
1413 unsigned i;
1414 for (i = 0; i < 32; i++)
1415 if (c->x86_power & (1 << i)) {
1416 if (i < ARRAY_SIZE(x86_power_flags))
1417 seq_printf(m, " %s", x86_power_flags[i]);
1418 else
1419 seq_printf(m, " [%d]", i);
1423 seq_printf(m, "\n\n");
1425 return 0;
1428 static void *c_start(struct seq_file *m, loff_t *pos)
1430 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1433 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1435 ++*pos;
1436 return c_start(m, pos);
1439 static void c_stop(struct seq_file *m, void *v)
1443 struct seq_operations cpuinfo_op = {
1444 .start =c_start,
1445 .next = c_next,
1446 .stop = c_stop,
1447 .show = show_cpuinfo,
1448 };