ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c @ 6669:fb90dd31c6d7

Extend save/restore interface to cope with domains >4GB by adding a level of indirection.
Signed-off-by: ian@xensource.com
author iap10@firebug.cl.cam.ac.uk
date Wed Sep 07 02:31:24 2005 +0000 (2005-09-07)
parents 8db9c5873b9b
children 7a36f58f64ee
line source
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
43 #include <asm/mtrr.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/smp.h>
48 #include <asm/msr.h>
49 #include <asm/desc.h>
50 #include <video/edid.h>
51 #include <asm/e820.h>
52 #include <asm/dma.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
56 #include <asm/proto.h>
57 #include <asm/setup.h>
58 #include <asm/mach_apic.h>
59 #include <asm/numa.h>
60 #ifdef CONFIG_XEN
61 #include <linux/percpu.h>
62 #include <asm-xen/xen-public/physdev.h>
63 #include "setup_arch_pre.h"
64 #include <asm-xen/hypervisor.h>
65 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
66 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
67 #define end_pfn_map end_pfn
68 #include <asm/mach-xen/setup_arch_post.h>
70 extern unsigned long start_pfn;
71 extern struct edid_info edid_info;
73 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
74 EXPORT_SYMBOL(HYPERVISOR_shared_info);
76 /* Allows setting of maximum possible memory size */
77 unsigned long xen_override_max_pfn;
79 unsigned long *phys_to_machine_mapping;
80 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
82 EXPORT_SYMBOL(phys_to_machine_mapping);
84 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
85 DEFINE_PER_CPU(int, nr_multicall_ents);
87 /* Raw start-of-day parameters from the hypervisor. */
88 start_info_t *xen_start_info;
89 #endif
91 /*
92 * Machine setup..
93 */
95 struct cpuinfo_x86 boot_cpu_data;
97 unsigned long mmu_cr4_features;
99 int acpi_disabled;
100 EXPORT_SYMBOL(acpi_disabled);
101 #ifdef CONFIG_ACPI_BOOT
102 extern int __initdata acpi_ht;
103 extern acpi_interrupt_flags acpi_sci_flags;
104 int __initdata acpi_force = 0;
105 #endif
107 int acpi_numa __initdata;
109 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
110 int bootloader_type;
112 unsigned long saved_video_mode;
114 #ifdef CONFIG_SWIOTLB
115 int swiotlb;
116 EXPORT_SYMBOL(swiotlb);
117 #endif
119 /*
120 * Setup options
121 */
122 struct drive_info_struct { char dummy[32]; } drive_info;
123 struct screen_info screen_info;
124 struct sys_desc_table_struct {
125 unsigned short length;
126 unsigned char table[0];
127 };
129 struct edid_info edid_info;
130 struct e820map e820;
132 extern int root_mountflags;
133 extern char _text, _etext, _edata, _end;
135 char command_line[COMMAND_LINE_SIZE];
137 struct resource standard_io_resources[] = {
138 { .name = "dma1", .start = 0x00, .end = 0x1f,
139 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
140 { .name = "pic1", .start = 0x20, .end = 0x21,
141 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
142 { .name = "timer0", .start = 0x40, .end = 0x43,
143 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
144 { .name = "timer1", .start = 0x50, .end = 0x53,
145 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
146 { .name = "keyboard", .start = 0x60, .end = 0x6f,
147 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
148 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
149 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
150 { .name = "pic2", .start = 0xa0, .end = 0xa1,
151 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
152 { .name = "dma2", .start = 0xc0, .end = 0xdf,
153 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
154 { .name = "fpu", .start = 0xf0, .end = 0xff,
155 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
156 };
158 #define STANDARD_IO_RESOURCES \
159 (sizeof standard_io_resources / sizeof standard_io_resources[0])
161 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
163 struct resource data_resource = {
164 .name = "Kernel data",
165 .start = 0,
166 .end = 0,
167 .flags = IORESOURCE_RAM,
168 };
169 struct resource code_resource = {
170 .name = "Kernel code",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_RAM,
174 };
176 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
178 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
179 static struct resource system_rom_resource = {
180 .name = "System ROM",
181 .start = 0xf0000,
182 .end = 0xfffff,
183 .flags = IORESOURCE_ROM,
184 };
186 static struct resource extension_rom_resource = {
187 .name = "Extension ROM",
188 .start = 0xe0000,
189 .end = 0xeffff,
190 .flags = IORESOURCE_ROM,
191 };
193 static struct resource adapter_rom_resources[] = {
194 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
195 .flags = IORESOURCE_ROM },
196 { .name = "Adapter ROM", .start = 0, .end = 0,
197 .flags = IORESOURCE_ROM },
198 { .name = "Adapter ROM", .start = 0, .end = 0,
199 .flags = IORESOURCE_ROM },
200 { .name = "Adapter ROM", .start = 0, .end = 0,
201 .flags = IORESOURCE_ROM },
202 { .name = "Adapter ROM", .start = 0, .end = 0,
203 .flags = IORESOURCE_ROM },
204 { .name = "Adapter ROM", .start = 0, .end = 0,
205 .flags = IORESOURCE_ROM }
206 };
207 #endif
209 #define ADAPTER_ROM_RESOURCES \
210 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
212 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
213 static struct resource video_rom_resource = {
214 .name = "Video ROM",
215 .start = 0xc0000,
216 .end = 0xc7fff,
217 .flags = IORESOURCE_ROM,
218 };
219 #endif
221 static struct resource video_ram_resource = {
222 .name = "Video RAM area",
223 .start = 0xa0000,
224 .end = 0xbffff,
225 .flags = IORESOURCE_RAM,
226 };
228 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
229 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
231 static int __init romchecksum(unsigned char *rom, unsigned long length)
232 {
233 unsigned char *p, sum = 0;
235 for (p = rom; p < rom + length; p++)
236 sum += *p;
237 return sum == 0;
238 }
240 static void __init probe_roms(void)
241 {
242 unsigned long start, length, upper;
243 unsigned char *rom;
244 int i;
246 /* video rom */
247 upper = adapter_rom_resources[0].start;
248 for (start = video_rom_resource.start; start < upper; start += 2048) {
249 rom = isa_bus_to_virt(start);
250 if (!romsignature(rom))
251 continue;
253 video_rom_resource.start = start;
255 /* 0 < length <= 0x7f * 512, historically */
256 length = rom[2] * 512;
258 /* if checksum okay, trust length byte */
259 if (length && romchecksum(rom, length))
260 video_rom_resource.end = start + length - 1;
262 request_resource(&iomem_resource, &video_rom_resource);
263 break;
264 }
266 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
267 if (start < upper)
268 start = upper;
270 /* system rom */
271 request_resource(&iomem_resource, &system_rom_resource);
272 upper = system_rom_resource.start;
274 /* check for extension rom (ignore length byte!) */
275 rom = isa_bus_to_virt(extension_rom_resource.start);
276 if (romsignature(rom)) {
277 length = extension_rom_resource.end - extension_rom_resource.start + 1;
278 if (romchecksum(rom, length)) {
279 request_resource(&iomem_resource, &extension_rom_resource);
280 upper = extension_rom_resource.start;
281 }
282 }
284 /* check for adapter roms on 2k boundaries */
285 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
286 rom = isa_bus_to_virt(start);
287 if (!romsignature(rom))
288 continue;
290 /* 0 < length <= 0x7f * 512, historically */
291 length = rom[2] * 512;
293 /* but accept any length that fits if checksum okay */
294 if (!length || start + length > upper || !romchecksum(rom, length))
295 continue;
297 adapter_rom_resources[i].start = start;
298 adapter_rom_resources[i].end = start + length - 1;
299 request_resource(&iomem_resource, &adapter_rom_resources[i]);
301 start = adapter_rom_resources[i++].end & ~2047UL;
302 }
303 }
304 #endif
307 static __init void parse_cmdline_early (char ** cmdline_p)
308 {
309 char c = ' ', *to = command_line, *from = COMMAND_LINE;
310 int len = 0;
312 /* Save unparsed command line copy for /proc/cmdline */
313 #ifdef CONFIG_XEN
314 int max_cmdline;
316 if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
317 max_cmdline = COMMAND_LINE_SIZE;
318 memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
319 saved_command_line[max_cmdline-1] = '\0';
320 #else
321 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
322 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
323 #endif
325 for (;;) {
326 if (c != ' ')
327 goto next_char;
329 #ifdef CONFIG_SMP
330 /*
331 * If the BIOS enumerates physical processors before logical,
332 * maxcpus=N at enumeration-time can be used to disable HT.
333 */
334 else if (!memcmp(from, "maxcpus=", 8)) {
335 extern unsigned int maxcpus;
337 maxcpus = simple_strtoul(from + 8, NULL, 0);
338 }
339 #endif
340 #ifdef CONFIG_ACPI_BOOT
341 /* "acpi=off" disables both ACPI table parsing and interpreter init */
342 if (!memcmp(from, "acpi=off", 8))
343 disable_acpi();
345 if (!memcmp(from, "acpi=force", 10)) {
346 /* add later when we do DMI horrors: */
347 acpi_force = 1;
348 acpi_disabled = 0;
349 }
351 /* acpi=ht just means: do ACPI MADT parsing
352 at bootup, but don't enable the full ACPI interpreter */
353 if (!memcmp(from, "acpi=ht", 7)) {
354 if (!acpi_force)
355 disable_acpi();
356 acpi_ht = 1;
357 }
358 else if (!memcmp(from, "pci=noacpi", 10))
359 acpi_disable_pci();
360 else if (!memcmp(from, "acpi=noirq", 10))
361 acpi_noirq_set();
363 else if (!memcmp(from, "acpi_sci=edge", 13))
364 acpi_sci_flags.trigger = 1;
365 else if (!memcmp(from, "acpi_sci=level", 14))
366 acpi_sci_flags.trigger = 3;
367 else if (!memcmp(from, "acpi_sci=high", 13))
368 acpi_sci_flags.polarity = 1;
369 else if (!memcmp(from, "acpi_sci=low", 12))
370 acpi_sci_flags.polarity = 3;
372 /* acpi=strict disables out-of-spec workarounds */
373 else if (!memcmp(from, "acpi=strict", 11)) {
374 acpi_strict = 1;
375 }
376 #ifdef CONFIG_X86_IO_APIC
377 else if (!memcmp(from, "acpi_skip_timer_override", 24))
378 acpi_skip_timer_override = 1;
379 #endif
380 #endif
381 #ifndef CONFIG_XEN
382 if (!memcmp(from, "nolapic", 7) ||
383 !memcmp(from, "disableapic", 11))
384 disable_apic = 1;
386 if (!memcmp(from, "noapic", 6))
387 skip_ioapic_setup = 1;
389 if (!memcmp(from, "apic", 4)) {
390 skip_ioapic_setup = 0;
391 ioapic_force = 1;
392 }
393 #endif
394 if (!memcmp(from, "mem=", 4))
395 parse_memopt(from+4, &from);
397 #ifdef CONFIG_DISCONTIGMEM
398 if (!memcmp(from, "numa=", 5))
399 numa_setup(from+5);
400 #endif
402 #ifdef CONFIG_GART_IOMMU
403 if (!memcmp(from,"iommu=",6)) {
404 iommu_setup(from+6);
405 }
406 #endif
408 if (!memcmp(from,"oops=panic", 10))
409 panic_on_oops = 1;
411 if (!memcmp(from, "noexec=", 7))
412 nonx_setup(from + 7);
414 next_char:
415 c = *(from++);
416 if (!c)
417 break;
418 if (COMMAND_LINE_SIZE <= ++len)
419 break;
420 *(to++) = c;
421 }
422 *to = '\0';
423 *cmdline_p = command_line;
424 }
426 #ifndef CONFIG_DISCONTIGMEM
427 #ifdef CONFIG_XEN
428 static void __init contig_initmem_init(void)
429 {
430 unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
431 free_bootmem(0, end_pfn << PAGE_SHIFT);
432 reserve_bootmem(HIGH_MEMORY,
433 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
434 - HIGH_MEMORY);
435 }
436 #else
437 static void __init contig_initmem_init(void)
438 {
439 unsigned long bootmap_size, bootmap;
440 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
441 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
442 if (bootmap == -1L)
443 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
444 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
445 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
446 reserve_bootmem(bootmap, bootmap_size);
447 }
448 #endif /* !CONFIG_XEN */
449 #endif
451 /* Use inline assembly to define this because the nops are defined
452 as inline assembly strings in the include files and we cannot
453 get them easily into strings. */
454 asm("\t.data\nk8nops: "
455 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
456 K8_NOP7 K8_NOP8);
458 extern unsigned char k8nops[];
459 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
460 NULL,
461 k8nops,
462 k8nops + 1,
463 k8nops + 1 + 2,
464 k8nops + 1 + 2 + 3,
465 k8nops + 1 + 2 + 3 + 4,
466 k8nops + 1 + 2 + 3 + 4 + 5,
467 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
468 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
469 };
471 /* Replace instructions with better alternatives for this CPU type.
473 This runs before SMP is initialized to avoid SMP problems with
474 self modifying code. This implies that assymetric systems where
475 APs have less capabilities than the boot processor are not handled.
476 In this case boot with "noreplacement". */
477 void apply_alternatives(void *start, void *end)
478 {
479 struct alt_instr *a;
480 int diff, i, k;
481 for (a = start; (void *)a < end; a++) {
482 if (!boot_cpu_has(a->cpuid))
483 continue;
485 BUG_ON(a->replacementlen > a->instrlen);
486 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
487 diff = a->instrlen - a->replacementlen;
489 /* Pad the rest with nops */
490 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
491 k = diff;
492 if (k > ASM_NOP_MAX)
493 k = ASM_NOP_MAX;
494 __inline_memcpy(a->instr + i, k8_nops[k], k);
495 }
496 }
497 }
499 static int no_replacement __initdata = 0;
501 void __init alternative_instructions(void)
502 {
503 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
504 if (no_replacement)
505 return;
506 apply_alternatives(__alt_instructions, __alt_instructions_end);
507 }
509 static int __init noreplacement_setup(char *s)
510 {
511 no_replacement = 1;
512 return 0;
513 }
515 __setup("noreplacement", noreplacement_setup);
517 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
518 struct edd edd;
519 #ifdef CONFIG_EDD_MODULE
520 EXPORT_SYMBOL(edd);
521 #endif
522 /**
523 * copy_edd() - Copy the BIOS EDD information
524 * from boot_params into a safe place.
525 *
526 */
527 static inline void copy_edd(void)
528 {
529 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
530 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
531 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
532 edd.edd_info_nr = EDD_NR;
533 }
534 #else
535 static inline void copy_edd(void)
536 {
537 }
538 #endif
540 #ifndef CONFIG_XEN
541 #define EBDA_ADDR_POINTER 0x40E
542 static void __init reserve_ebda_region(void)
543 {
544 unsigned int addr;
545 /**
546 * there is a real-mode segmented pointer pointing to the
547 * 4K EBDA area at 0x40E
548 */
549 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
550 addr <<= 4;
551 if (addr)
552 reserve_bootmem_generic(addr, PAGE_SIZE);
553 }
554 #endif
556 void __init setup_arch(char **cmdline_p)
557 {
558 unsigned long kernel_end;
560 #ifdef CONFIG_XEN
561 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
562 drive_info = DRIVE_INFO;
563 kernel_end = 0; /* dummy */
564 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
565 screen_info = SCREEN_INFO;
567 /* This is drawn from a dump from vgacon:startup in standard Linux. */
568 screen_info.orig_video_mode = 3;
569 screen_info.orig_video_isVGA = 1;
570 screen_info.orig_video_lines = 25;
571 screen_info.orig_video_cols = 80;
572 screen_info.orig_video_ega_bx = 3;
573 screen_info.orig_video_points = 16;
574 #endif
575 edid_info = EDID_INFO;
576 saved_video_mode = SAVED_VIDEO_MODE;
577 bootloader_type = LOADER_TYPE;
579 #ifdef CONFIG_BLK_DEV_RAM
580 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
581 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
582 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
585 #endif
587 HYPERVISOR_vm_assist(VMASST_CMD_enable,
588 VMASST_TYPE_writable_pagetables);
590 ARCH_SETUP
591 #else
592 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
593 drive_info = DRIVE_INFO;
594 screen_info = SCREEN_INFO;
595 edid_info = EDID_INFO;
596 saved_video_mode = SAVED_VIDEO_MODE;
597 bootloader_type = LOADER_TYPE;
599 #ifdef CONFIG_BLK_DEV_RAM
600 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
601 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
602 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
603 #endif
604 setup_memory_region();
605 copy_edd();
606 #endif /* !CONFIG_XEN */
608 if (!MOUNT_ROOT_RDONLY)
609 root_mountflags &= ~MS_RDONLY;
610 init_mm.start_code = (unsigned long) &_text;
611 init_mm.end_code = (unsigned long) &_etext;
612 init_mm.end_data = (unsigned long) &_edata;
613 #ifdef CONFIG_XEN
614 init_mm.brk = start_pfn << PAGE_SHIFT;
615 #else
616 init_mm.brk = (unsigned long) &_end;
618 code_resource.start = virt_to_phys(&_text);
619 code_resource.end = virt_to_phys(&_etext)-1;
620 data_resource.start = virt_to_phys(&_etext);
621 data_resource.end = virt_to_phys(&_edata)-1;
622 #endif
624 parse_cmdline_early(cmdline_p);
626 early_identify_cpu(&boot_cpu_data);
628 /*
629 * partially used pages are not usable - thus
630 * we are rounding upwards:
631 */
632 end_pfn = e820_end_of_ram();
634 check_efer();
636 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
638 #ifdef CONFIG_ACPI_NUMA
639 /*
640 * Parse SRAT to discover nodes.
641 */
642 acpi_numa_init();
643 #endif
645 #ifdef CONFIG_DISCONTIGMEM
646 numa_initmem_init(0, end_pfn);
647 #else
648 contig_initmem_init();
649 #endif
651 #ifndef CONFIG_XEN
652 /* Reserve direct mapping */
653 reserve_bootmem_generic(table_start << PAGE_SHIFT,
654 (table_end - table_start) << PAGE_SHIFT);
656 /* reserve kernel */
657 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
658 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
660 /*
661 * reserve physical page 0 - it's a special BIOS page on many boxes,
662 * enabling clean reboots, SMP operation, laptop functions.
663 */
664 reserve_bootmem_generic(0, PAGE_SIZE);
666 /* reserve ebda region */
667 reserve_ebda_region();
668 #endif
671 #ifdef CONFIG_SMP
672 /*
673 * But first pinch a few for the stack/trampoline stuff
674 * FIXME: Don't need the extra page at 4K, but need to fix
675 * trampoline before removing it. (see the GDT stuff)
676 */
677 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
679 /* Reserve SMP trampoline */
680 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
681 #endif
683 #ifdef CONFIG_ACPI_SLEEP
684 /*
685 * Reserve low memory region for sleep support.
686 */
687 acpi_reserve_bootmem();
688 #endif
689 #ifdef CONFIG_XEN
690 #ifdef CONFIG_BLK_DEV_INITRD
691 if (xen_start_info->mod_start) {
692 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
693 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
694 initrd_start = INITRD_START + PAGE_OFFSET;
695 initrd_end = initrd_start+INITRD_SIZE;
696 initrd_below_start_ok = 1;
697 } else {
698 printk(KERN_ERR "initrd extends beyond end of memory "
699 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
700 (unsigned long)(INITRD_START + INITRD_SIZE),
701 (unsigned long)(end_pfn << PAGE_SHIFT));
702 initrd_start = 0;
703 }
704 }
705 #endif
706 #else /* CONFIG_XEN */
707 #ifdef CONFIG_BLK_DEV_INITRD
708 if (LOADER_TYPE && INITRD_START) {
709 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
710 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
711 initrd_start =
712 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
713 initrd_end = initrd_start+INITRD_SIZE;
714 }
715 else {
716 printk(KERN_ERR "initrd extends beyond end of memory "
717 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
718 (unsigned long)(INITRD_START + INITRD_SIZE),
719 (unsigned long)(end_pfn << PAGE_SHIFT));
720 initrd_start = 0;
721 }
722 }
723 #endif
724 #endif /* !CONFIG_XEN */
725 paging_init();
726 #ifdef CONFIG_X86_LOCAL_APIC
727 /*
728 * Find and reserve possible boot-time SMP configuration:
729 */
730 find_smp_config();
731 #endif
732 #ifdef CONFIG_XEN
733 {
734 int i, j, k, fpp;
735 /* Make sure we have a large enough P->M table. */
736 if (end_pfn > xen_start_info->nr_pages) {
737 phys_to_machine_mapping = alloc_bootmem(
738 end_pfn * sizeof(unsigned long));
739 memset(phys_to_machine_mapping, ~0,
740 end_pfn * sizeof(unsigned long));
741 memcpy(phys_to_machine_mapping,
742 (unsigned long *)xen_start_info->mfn_list,
743 xen_start_info->nr_pages * sizeof(unsigned long));
744 free_bootmem(
745 __pa(xen_start_info->mfn_list),
746 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
747 sizeof(unsigned long))));
748 }
750 /*
751 * Initialise the list of the frames that specify the list of
752 * frames that make up the p2m table. Used by save/restore
753 */
754 pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
755 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
756 virt_to_mfn(pfn_to_mfn_frame_list_list);
758 fpp = PAGE_SIZE/sizeof(unsigned long);
759 for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
760 {
761 if ( (j % fpp) == 0 )
762 {
763 k++;
764 BUG_ON(k>=fpp);
765 pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE);
766 pfn_to_mfn_frame_list_list[k] =
767 virt_to_mfn(pfn_to_mfn_frame_list[k]);
768 j=0;
769 }
770 pfn_to_mfn_frame_list[k][j] =
771 virt_to_mfn(&phys_to_machine_mapping[i]);
772 }
773 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
779 }
780 #endif
782 #ifndef CONFIG_XEN
783 check_ioapic();
784 #endif
786 #ifdef CONFIG_ACPI_BOOT
787 /*
788 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
789 * Call this early for SRAT node setup.
790 */
791 acpi_boot_table_init();
793 /*
794 * Read APIC and some other early information from ACPI tables.
795 */
796 acpi_boot_init();
797 #endif
799 #ifdef CONFIG_X86_LOCAL_APIC
800 /*
801 * get boot-time SMP configuration:
802 */
803 if (smp_found_config)
804 get_smp_config();
805 #ifndef CONFIG_XEN
806 init_apic_mappings();
807 #endif
808 #endif
810 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
811 /*
812 * Request address space for all standard RAM and ROM resources
813 * and also for regions reported as reserved by the e820.
814 */
815 probe_roms();
816 e820_reserve_resources();
817 #endif
819 request_resource(&iomem_resource, &video_ram_resource);
821 {
822 unsigned i;
823 /* request I/O space for devices used on all i[345]86 PCs */
824 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
825 request_resource(&ioport_resource, &standard_io_resources[i]);
826 }
828 e820_setup_gap();
830 #ifdef CONFIG_GART_IOMMU
831 iommu_hole_init();
832 #endif
834 #ifdef CONFIG_XEN
835 {
836 physdev_op_t op;
838 op.cmd = PHYSDEVOP_SET_IOPL;
839 op.u.set_iopl.iopl = 1;
840 HYPERVISOR_physdev_op(&op);
842 if (xen_start_info->flags & SIF_INITDOMAIN) {
843 if (!(xen_start_info->flags & SIF_PRIVILEGED))
844 panic("Xen granted us console access "
845 "but not privileged status");
847 #ifdef CONFIG_VT
848 #if defined(CONFIG_VGA_CONSOLE)
849 conswitchp = &vga_con;
850 #elif defined(CONFIG_DUMMY_CONSOLE)
851 conswitchp = &dummy_con;
852 #endif
853 #endif
854 } else {
855 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
856 extern const struct consw xennull_con;
857 extern int console_use_vt;
858 #if defined(CONFIG_VGA_CONSOLE)
859 /* disable VGA driver */
860 ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
861 #endif
862 conswitchp = &xennull_con;
863 console_use_vt = 0;
864 #endif
865 }
866 }
867 #else /* CONFIG_XEN */
869 #ifdef CONFIG_VT
870 #if defined(CONFIG_VGA_CONSOLE)
871 conswitchp = &vga_con;
872 #elif defined(CONFIG_DUMMY_CONSOLE)
873 conswitchp = &dummy_con;
874 #endif
875 #endif
877 #endif /* !CONFIG_XEN */
878 }
880 static int __init get_model_name(struct cpuinfo_x86 *c)
881 {
882 unsigned int *v;
884 if (c->extended_cpuid_level < 0x80000004)
885 return 0;
887 v = (unsigned int *) c->x86_model_id;
888 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
889 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
890 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
891 c->x86_model_id[48] = 0;
892 return 1;
893 }
896 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
897 {
898 unsigned int n, dummy, eax, ebx, ecx, edx;
900 n = c->extended_cpuid_level;
902 if (n >= 0x80000005) {
903 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
904 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
905 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
906 c->x86_cache_size=(ecx>>24)+(edx>>24);
907 /* On K8 L1 TLB is inclusive, so don't count it */
908 c->x86_tlbsize = 0;
909 }
911 if (n >= 0x80000006) {
912 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
913 ecx = cpuid_ecx(0x80000006);
914 c->x86_cache_size = ecx >> 16;
915 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
917 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
918 c->x86_cache_size, ecx & 0xFF);
919 }
921 if (n >= 0x80000007)
922 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
923 if (n >= 0x80000008) {
924 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
925 c->x86_virt_bits = (eax >> 8) & 0xff;
926 c->x86_phys_bits = eax & 0xff;
927 }
928 }
930 /*
931 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
932 * Assumes number of cores is a power of two.
933 */
934 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
935 {
936 #ifdef CONFIG_SMP
937 int cpu = smp_processor_id();
938 int node = 0;
939 unsigned bits;
940 if (c->x86_num_cores == 1)
941 return;
943 bits = 0;
944 while ((1 << bits) < c->x86_num_cores)
945 bits++;
947 /* Low order bits define the core id (index of core in socket) */
948 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
949 /* Convert the APIC ID into the socket ID */
950 phys_proc_id[cpu] >>= bits;
952 #ifdef CONFIG_NUMA
953 /* When an ACPI SRAT table is available use the mappings from SRAT
954 instead. */
955 if (acpi_numa <= 0) {
956 node = phys_proc_id[cpu];
957 if (!node_online(node))
958 node = first_node(node_online_map);
959 cpu_to_node[cpu] = node;
960 } else {
961 node = cpu_to_node[cpu];
962 }
963 #endif
965 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
966 cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
967 #endif
968 }
970 static int __init init_amd(struct cpuinfo_x86 *c)
971 {
972 int r;
973 int level;
975 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
976 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
977 clear_bit(0*32+31, &c->x86_capability);
979 /* C-stepping K8? */
980 level = cpuid_eax(1);
981 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
982 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
984 r = get_model_name(c);
985 if (!r) {
986 switch (c->x86) {
987 case 15:
988 /* Should distinguish Models here, but this is only
989 a fallback anyways. */
990 strcpy(c->x86_model_id, "Hammer");
991 break;
992 }
993 }
994 display_cacheinfo(c);
996 if (c->extended_cpuid_level >= 0x80000008) {
997 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
998 if (c->x86_num_cores & (c->x86_num_cores - 1))
999 c->x86_num_cores = 1;
1001 amd_detect_cmp(c);
1004 return r;
1007 static void __init detect_ht(struct cpuinfo_x86 *c)
1009 #ifdef CONFIG_SMP
1010 u32 eax, ebx, ecx, edx;
1011 int index_msb, tmp;
1012 int cpu = smp_processor_id();
1014 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1015 return;
1017 cpuid(1, &eax, &ebx, &ecx, &edx);
1018 smp_num_siblings = (ebx & 0xff0000) >> 16;
1020 if (smp_num_siblings == 1) {
1021 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1022 } else if (smp_num_siblings > 1) {
1023 index_msb = 31;
1024 /*
1025 * At this point we only support two siblings per
1026 * processor package.
1027 */
1028 if (smp_num_siblings > NR_CPUS) {
1029 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1030 smp_num_siblings = 1;
1031 return;
1033 tmp = smp_num_siblings;
1034 while ((tmp & 0x80000000 ) == 0) {
1035 tmp <<=1 ;
1036 index_msb--;
1038 if (smp_num_siblings & (smp_num_siblings - 1))
1039 index_msb++;
1040 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1042 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1043 phys_proc_id[cpu]);
1045 smp_num_siblings = smp_num_siblings / c->x86_num_cores;
1047 tmp = smp_num_siblings;
1048 index_msb = 31;
1049 while ((tmp & 0x80000000) == 0) {
1050 tmp <<=1 ;
1051 index_msb--;
1053 if (smp_num_siblings & (smp_num_siblings - 1))
1054 index_msb++;
1056 cpu_core_id[cpu] = phys_pkg_id(index_msb);
1058 if (c->x86_num_cores > 1)
1059 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1060 cpu_core_id[cpu]);
1062 #endif
1065 /*
1066 * find out the number of processor cores on the die
1067 */
1068 static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
1070 unsigned int eax;
1072 if (c->cpuid_level < 4)
1073 return 1;
1075 __asm__("cpuid"
1076 : "=a" (eax)
1077 : "0" (4), "c" (0)
1078 : "bx", "dx");
1080 if (eax & 0x1f)
1081 return ((eax >> 26) + 1);
1082 else
1083 return 1;
1086 static void __init init_intel(struct cpuinfo_x86 *c)
1088 /* Cache sizes */
1089 unsigned n;
1091 init_intel_cacheinfo(c);
1092 n = c->extended_cpuid_level;
1093 if (n >= 0x80000008) {
1094 unsigned eax = cpuid_eax(0x80000008);
1095 c->x86_virt_bits = (eax >> 8) & 0xff;
1096 c->x86_phys_bits = eax & 0xff;
1099 if (c->x86 == 15)
1100 c->x86_cache_alignment = c->x86_clflush_size * 2;
1101 if (c->x86 >= 15)
1102 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1103 c->x86_num_cores = intel_num_cpu_cores(c);
1106 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1108 char *v = c->x86_vendor_id;
1110 if (!strcmp(v, "AuthenticAMD"))
1111 c->x86_vendor = X86_VENDOR_AMD;
1112 else if (!strcmp(v, "GenuineIntel"))
1113 c->x86_vendor = X86_VENDOR_INTEL;
1114 else
1115 c->x86_vendor = X86_VENDOR_UNKNOWN;
1118 struct cpu_model_info {
1119 int vendor;
1120 int family;
1121 char *model_names[16];
1122 };
1124 /* Do some early cpuid on the boot CPU to get some parameter that are
1125 needed before check_bugs. Everything advanced is in identify_cpu
1126 below. */
1127 void __init early_identify_cpu(struct cpuinfo_x86 *c)
1129 u32 tfms;
1131 c->loops_per_jiffy = loops_per_jiffy;
1132 c->x86_cache_size = -1;
1133 c->x86_vendor = X86_VENDOR_UNKNOWN;
1134 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1135 c->x86_vendor_id[0] = '\0'; /* Unset */
1136 c->x86_model_id[0] = '\0'; /* Unset */
1137 c->x86_clflush_size = 64;
1138 c->x86_cache_alignment = c->x86_clflush_size;
1139 c->x86_num_cores = 1;
1140 c->extended_cpuid_level = 0;
1141 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1143 /* Get vendor name */
1144 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1145 (unsigned int *)&c->x86_vendor_id[0],
1146 (unsigned int *)&c->x86_vendor_id[8],
1147 (unsigned int *)&c->x86_vendor_id[4]);
1149 get_cpu_vendor(c);
1151 /* Initialize the standard set of capabilities */
1152 /* Note that the vendor-specific code below might override */
1154 /* Intel-defined flags: level 0x00000001 */
1155 if (c->cpuid_level >= 0x00000001) {
1156 __u32 misc;
1157 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1158 &c->x86_capability[0]);
1159 c->x86 = (tfms >> 8) & 0xf;
1160 c->x86_model = (tfms >> 4) & 0xf;
1161 c->x86_mask = tfms & 0xf;
1162 if (c->x86 == 0xf) {
1163 c->x86 += (tfms >> 20) & 0xff;
1164 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1166 if (c->x86_capability[0] & (1<<19))
1167 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1168 } else {
1169 /* Have CPUID level 0 only - unheard of */
1170 c->x86 = 4;
1173 #ifdef CONFIG_SMP
1174 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1175 #endif
1178 /*
1179 * This does the hard work of actually picking apart the CPU stuff...
1180 */
1181 void __init identify_cpu(struct cpuinfo_x86 *c)
1183 int i;
1184 u32 xlvl;
1186 early_identify_cpu(c);
1188 /* AMD-defined flags: level 0x80000001 */
1189 xlvl = cpuid_eax(0x80000000);
1190 c->extended_cpuid_level = xlvl;
1191 if ((xlvl & 0xffff0000) == 0x80000000) {
1192 if (xlvl >= 0x80000001) {
1193 c->x86_capability[1] = cpuid_edx(0x80000001);
1194 c->x86_capability[6] = cpuid_ecx(0x80000001);
1196 if (xlvl >= 0x80000004)
1197 get_model_name(c); /* Default name */
1200 /* Transmeta-defined flags: level 0x80860001 */
1201 xlvl = cpuid_eax(0x80860000);
1202 if ((xlvl & 0xffff0000) == 0x80860000) {
1203 /* Don't set x86_cpuid_level here for now to not confuse. */
1204 if (xlvl >= 0x80860001)
1205 c->x86_capability[2] = cpuid_edx(0x80860001);
1208 /*
1209 * Vendor-specific initialization. In this section we
1210 * canonicalize the feature flags, meaning if there are
1211 * features a certain CPU supports which CPUID doesn't
1212 * tell us, CPUID claiming incorrect flags, or other bugs,
1213 * we handle them here.
1215 * At the end of this section, c->x86_capability better
1216 * indicate the features this CPU genuinely supports!
1217 */
1218 switch (c->x86_vendor) {
1219 case X86_VENDOR_AMD:
1220 init_amd(c);
1221 break;
1223 case X86_VENDOR_INTEL:
1224 init_intel(c);
1225 break;
1227 case X86_VENDOR_UNKNOWN:
1228 default:
1229 display_cacheinfo(c);
1230 break;
1233 select_idle_routine(c);
1234 detect_ht(c);
1236 /*
1237 * On SMP, boot_cpu_data holds the common feature set between
1238 * all CPUs; so make sure that we indicate which features are
1239 * common between the CPUs. The first time this routine gets
1240 * executed, c == &boot_cpu_data.
1241 */
1242 if (c != &boot_cpu_data) {
1243 /* AND the already accumulated flags with these */
1244 for (i = 0 ; i < NCAPINTS ; i++)
1245 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1248 #ifdef CONFIG_X86_MCE
1249 mcheck_init(c);
1250 #endif
1251 #ifdef CONFIG_NUMA
1252 if (c != &boot_cpu_data)
1253 numa_add_cpu(c - cpu_data);
1254 #endif
1258 void __init print_cpu_info(struct cpuinfo_x86 *c)
1260 if (c->x86_model_id[0])
1261 printk("%s", c->x86_model_id);
1263 if (c->x86_mask || c->cpuid_level >= 0)
1264 printk(" stepping %02x\n", c->x86_mask);
1265 else
1266 printk("\n");
1269 /*
1270 * Get CPU information for use by the procfs.
1271 */
1273 static int show_cpuinfo(struct seq_file *m, void *v)
1275 struct cpuinfo_x86 *c = v;
1277 /*
1278 * These flag bits must match the definitions in <asm/cpufeature.h>.
1279 * NULL means this bit is undefined or reserved; either way it doesn't
1280 * have meaning as far as Linux is concerned. Note that it's important
1281 * to realize there is a difference between this table and CPUID -- if
1282 * applications want to get the raw CPUID data, they should access
1283 * /dev/cpu/<cpu_nr>/cpuid instead.
1284 */
1285 static char *x86_cap_flags[] = {
1286 /* Intel-defined */
1287 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1288 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1289 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1290 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1292 /* AMD-defined */
1293 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1294 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1295 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1296 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1298 /* Transmeta-defined */
1299 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1300 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1301 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1302 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1304 /* Other (Linux-defined) */
1305 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1306 "constant_tsc", NULL, NULL,
1307 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1308 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1309 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1311 /* Intel-defined (#2) */
1312 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1313 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1314 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1315 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1317 /* VIA/Cyrix/Centaur-defined */
1318 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1319 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1320 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1321 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1323 /* AMD-defined (#2) */
1324 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1325 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1326 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1327 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1328 };
1329 static char *x86_power_flags[] = {
1330 "ts", /* temperature sensor */
1331 "fid", /* frequency id control */
1332 "vid", /* voltage id control */
1333 "ttp", /* thermal trip */
1334 "tm",
1335 "stc"
1336 };
1339 #ifdef CONFIG_SMP
1340 if (!cpu_online(c-cpu_data))
1341 return 0;
1342 #endif
1344 seq_printf(m,"processor\t: %u\n"
1345 "vendor_id\t: %s\n"
1346 "cpu family\t: %d\n"
1347 "model\t\t: %d\n"
1348 "model name\t: %s\n",
1349 (unsigned)(c-cpu_data),
1350 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1351 c->x86,
1352 (int)c->x86_model,
1353 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1355 if (c->x86_mask || c->cpuid_level >= 0)
1356 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1357 else
1358 seq_printf(m, "stepping\t: unknown\n");
1360 if (cpu_has(c,X86_FEATURE_TSC)) {
1361 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1362 cpu_khz / 1000, (cpu_khz % 1000));
1365 /* Cache size */
1366 if (c->x86_cache_size >= 0)
1367 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1369 #ifdef CONFIG_SMP
1370 if (smp_num_siblings * c->x86_num_cores > 1) {
1371 int cpu = c - cpu_data;
1372 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1373 seq_printf(m, "siblings\t: %d\n",
1374 c->x86_num_cores * smp_num_siblings);
1375 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1376 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1378 #endif
1380 seq_printf(m,
1381 "fpu\t\t: yes\n"
1382 "fpu_exception\t: yes\n"
1383 "cpuid level\t: %d\n"
1384 "wp\t\t: yes\n"
1385 "flags\t\t:",
1386 c->cpuid_level);
1389 int i;
1390 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1391 if ( test_bit(i, &c->x86_capability) &&
1392 x86_cap_flags[i] != NULL )
1393 seq_printf(m, " %s", x86_cap_flags[i]);
1396 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1397 c->loops_per_jiffy/(500000/HZ),
1398 (c->loops_per_jiffy/(5000/HZ)) % 100);
1400 if (c->x86_tlbsize > 0)
1401 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1402 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1403 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1405 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1406 c->x86_phys_bits, c->x86_virt_bits);
1408 seq_printf(m, "power management:");
1410 unsigned i;
1411 for (i = 0; i < 32; i++)
1412 if (c->x86_power & (1 << i)) {
1413 if (i < ARRAY_SIZE(x86_power_flags))
1414 seq_printf(m, " %s", x86_power_flags[i]);
1415 else
1416 seq_printf(m, " [%d]", i);
1420 seq_printf(m, "\n\n");
1422 return 0;
1425 static void *c_start(struct seq_file *m, loff_t *pos)
1427 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1430 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1432 ++*pos;
1433 return c_start(m, pos);
1436 static void c_stop(struct seq_file *m, void *v)
1440 struct seq_operations cpuinfo_op = {
1441 .start =c_start,
1442 .next = c_next,
1443 .stop = c_stop,
1444 .show = show_cpuinfo,
1445 };