ia64/xen-unstable

view linux-2.4.27-xen-sparse/arch/xen/kernel/setup.c @ 2621:9402048e2325

bitkeeper revision 1.1159.1.218 (416a8128OiHXHyk_Sy8FsA0YUQcEnA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-26dom0
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xeno.bk-26dom0
author cl349@freefall.cl.cam.ac.uk
date Mon Oct 11 12:48:40 2004 +0000 (2004-10-11)
parents ff4e7a241335
children b914ff7d73b5 3f929065a1d1
line source
1 /*
2 * linux/arch/i386/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 /*
8 * This file handles the architecture-dependent parts of initialization
9 */
11 #define __KERNEL_SYSCALLS__
12 static int errno;
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/a.out.h>
23 #include <linux/tty.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/config.h>
27 #include <linux/init.h>
28 #include <linux/apm_bios.h>
29 #ifdef CONFIG_BLK_DEV_RAM
30 #include <linux/blk.h>
31 #endif
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/seq_file.h>
35 #include <linux/reboot.h>
36 #include <asm/processor.h>
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <asm/mtrr.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/io.h>
43 #include <asm/smp.h>
44 #include <asm/msr.h>
45 #include <asm/desc.h>
46 #include <asm/dma.h>
47 #include <asm/mpspec.h>
48 #include <asm/mmu_context.h>
49 #include <asm/ctrl_if.h>
50 #include <asm/hypervisor.h>
51 #include <asm/hypervisor-ifs/dom0_ops.h>
52 #include <linux/netdevice.h>
53 #include <linux/rtnetlink.h>
54 #include <linux/tqueue.h>
55 #include <net/pkt_sched.h> /* dev_(de)activate */
57 /*
58 * Point at the empty zero page to start with. We map the real shared_info
59 * page as soon as fixmap is up and running.
60 */
61 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
63 unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
65 multicall_entry_t multicall_list[8];
66 int nr_multicall_ents = 0;
68 /*
69 * Machine setup..
70 */
72 char ignore_irq13; /* set if exception 16 works */
73 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
75 unsigned long mmu_cr4_features;
77 unsigned char * vgacon_mmap;
79 /*
80 * Bus types ..
81 */
82 #ifdef CONFIG_EISA
83 int EISA_bus;
84 #endif
85 int MCA_bus;
87 /* for MCA, but anyone else can use it if they want */
88 unsigned int machine_id;
89 unsigned int machine_submodel_id;
90 unsigned int BIOS_revision;
91 unsigned int mca_pentium_flag;
93 /* For PCI or other memory-mapped resources */
94 unsigned long pci_mem_start = 0x10000000;
96 /*
97 * Setup options
98 */
99 struct drive_info_struct { char dummy[32]; } drive_info;
100 struct screen_info screen_info;
101 struct apm_info apm_info;
102 struct sys_desc_table_struct {
103 unsigned short length;
104 unsigned char table[0];
105 };
107 unsigned char aux_device_present;
109 extern int root_mountflags;
110 extern char _text, _etext, _edata, _end;
112 extern int blk_nohighio;
114 int enable_acpi_smp_table;
116 /* Raw start-of-day parameters from the hypervisor. */
117 union xen_start_info_union xen_start_info_union;
119 #define COMMAND_LINE_SIZE 256
120 static char command_line[COMMAND_LINE_SIZE];
121 char saved_command_line[COMMAND_LINE_SIZE];
123 /* parse_mem_cmdline()
124 * returns the value of the mem= boot param converted to pages or 0
125 */
126 static int __init parse_mem_cmdline (char ** cmdline_p)
127 {
128 char c = ' ', *to = command_line, *from = saved_command_line;
129 int len = 0;
130 unsigned long long bytes;
131 int mem_param = 0;
133 /* Save unparsed command line copy for /proc/cmdline */
134 memcpy(saved_command_line, xen_start_info.cmd_line, COMMAND_LINE_SIZE);
135 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
137 for (;;) {
138 /*
139 * "mem=nopentium" disables the 4MB page tables.
140 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
141 * to <mem>, overriding the bios size.
142 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
143 * <start> to <start>+<mem>, overriding the bios size.
144 */
145 if (c == ' ' && !memcmp(from, "mem=", 4)) {
146 if (to != command_line)
147 to--;
148 if (!memcmp(from+4, "nopentium", 9)) {
149 from += 9+4;
150 } else if (!memcmp(from+4, "exactmap", 8)) {
151 from += 8+4;
152 } else {
153 bytes = memparse(from+4, &from);
154 mem_param = bytes>>PAGE_SHIFT;
155 if (*from == '@')
156 (void)memparse(from+1, &from);
157 }
158 }
160 c = *(from++);
161 if (!c)
162 break;
163 if (COMMAND_LINE_SIZE <= ++len)
164 break;
165 *(to++) = c;
166 }
167 *to = '\0';
168 *cmdline_p = command_line;
170 return mem_param;
171 }
173 /*
174 * Every exception-fixup table is sorted (i.e., kernel main table, and every
175 * module table. Some elements may be out of order if they reference text.init,
176 * for example.
177 */
178 static void sort_exception_table(struct exception_table_entry *start,
179 struct exception_table_entry *end)
180 {
181 struct exception_table_entry *p, *q, tmp;
183 for ( p = start; p < end; p++ )
184 {
185 for ( q = p-1; q > start; q-- )
186 if ( p->insn > q->insn )
187 break;
188 if ( ++q != p )
189 {
190 tmp = *p;
191 memmove(q+1, q, (p-q)*sizeof(*p));
192 *q = tmp;
193 }
194 }
195 }
197 int xen_module_init(struct module *mod)
198 {
199 sort_exception_table(mod->ex_table_start, mod->ex_table_end);
200 return 0;
201 }
203 void __init setup_arch(char **cmdline_p)
204 {
205 int i,j;
206 unsigned long bootmap_size, start_pfn, lmax_low_pfn;
207 int mem_param; /* user specified memory size in pages */
208 int boot_pfn; /* low pages available for bootmem */
210 extern void hypervisor_callback(void);
211 extern void failsafe_callback(void);
213 extern unsigned long cpu0_pte_quicklist[];
214 extern unsigned long cpu0_pgd_quicklist[];
216 extern const struct exception_table_entry __start___ex_table[];
217 extern const struct exception_table_entry __stop___ex_table[];
219 extern char _stext;
221 /* Force a quick death if the kernel panics. */
222 extern int panic_timeout;
223 if ( panic_timeout == 0 )
224 panic_timeout = 1;
226 /* Ensure that the kernel exception-fixup table is sorted. */
227 sort_exception_table(__start___ex_table, __stop___ex_table);
229 #ifndef CONFIG_HIGHIO
230 blk_nohighio = 1;
231 #endif
233 HYPERVISOR_vm_assist(VMASST_CMD_enable,
234 VMASST_TYPE_4gb_segments);
236 HYPERVISOR_set_callbacks(
237 __KERNEL_CS, (unsigned long)hypervisor_callback,
238 __KERNEL_CS, (unsigned long)failsafe_callback);
240 boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
241 boot_cpu_data.pte_quick = cpu0_pte_quicklist;
243 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
244 memset(&drive_info, 0, sizeof(drive_info));
245 memset(&screen_info, 0, sizeof(screen_info));
247 /* This is drawn from a dump from vgacon:startup in standard Linux. */
248 screen_info.orig_video_mode = 3;
249 screen_info.orig_video_isVGA = 1;
250 screen_info.orig_video_lines = 25;
251 screen_info.orig_video_cols = 80;
252 screen_info.orig_video_ega_bx = 3;
253 screen_info.orig_video_points = 16;
255 memset(&apm_info.bios, 0, sizeof(apm_info.bios));
256 aux_device_present = 0;
257 #ifdef CONFIG_BLK_DEV_RAM
258 rd_image_start = 0;
259 rd_prompt = 0;
260 rd_doload = 0;
261 #endif
263 root_mountflags &= ~MS_RDONLY;
264 init_mm.start_code = (unsigned long) &_text;
265 init_mm.end_code = (unsigned long) &_etext;
266 init_mm.end_data = (unsigned long) &_edata;
267 init_mm.brk = (unsigned long) &_end;
269 /* The mem= kernel command line param overrides the detected amount
270 * of memory. For xenolinux, if this override is larger than detected
271 * memory, then boot using only detected memory and make provisions to
272 * use all of the override value. The hypervisor can give this
273 * domain more memory later on and it will be added to the free
274 * lists at that time. See claim_new_pages() in
275 * arch/xen/drivers/balloon/balloon.c
276 */
277 mem_param = parse_mem_cmdline(cmdline_p);
278 if (!mem_param) mem_param = xen_start_info.nr_pages;
280 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
281 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
282 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
284 /*
285 * 128MB for vmalloc(), iomap(), kmap(), and fixaddr mappings.
286 */
287 #define VMALLOC_RESERVE (unsigned long)(128 << 20)
288 #define MAXMEM (unsigned long)(HYPERVISOR_VIRT_START-PAGE_OFFSET-VMALLOC_RESERVE)
289 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
290 #define MAX_NONPAE_PFN (1 << 20)
292 /*
293 * Determine low and high memory ranges:
294 */
295 lmax_low_pfn = max_pfn = mem_param;
296 if (lmax_low_pfn > MAXMEM_PFN) {
297 lmax_low_pfn = MAXMEM_PFN;
298 #ifndef CONFIG_HIGHMEM
299 /* Maximum memory usable is what is directly addressable */
300 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
301 MAXMEM>>20);
302 if (max_pfn > MAX_NONPAE_PFN)
303 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
304 else
305 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
306 #else /* !CONFIG_HIGHMEM */
307 #ifndef CONFIG_X86_PAE
308 if (max_pfn > MAX_NONPAE_PFN) {
309 max_pfn = MAX_NONPAE_PFN;
310 printk(KERN_WARNING "Warning only 4GB will be used.\n");
311 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
312 }
313 #endif /* !CONFIG_X86_PAE */
314 #endif /* !CONFIG_HIGHMEM */
315 }
317 #ifdef CONFIG_HIGHMEM
318 highstart_pfn = highend_pfn = max_pfn;
319 if (max_pfn > MAXMEM_PFN) {
320 highstart_pfn = MAXMEM_PFN;
321 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
322 pages_to_mb(highend_pfn - highstart_pfn));
323 }
324 #endif
326 phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
327 cur_pgd = init_mm.pgd = (pgd_t *)xen_start_info.pt_base;
329 start_pfn = (__pa(xen_start_info.pt_base) >> PAGE_SHIFT) +
330 xen_start_info.nr_pt_frames;
332 /*
333 * Initialize the boot-time allocator, and free up all RAM. Then reserve
334 * space for OS image, initrd, phys->machine table, bootstrap page table,
335 * and the bootmem bitmap.
336 * NB. There is definitely enough room for the bootmem bitmap in the
337 * bootstrap page table. We are guaranteed to get >=512kB unused 'padding'
338 * for our own use after all bootstrap elements (see hypervisor-if.h).
339 */
340 boot_pfn = min((int)xen_start_info.nr_pages,lmax_low_pfn);
341 bootmap_size = init_bootmem(start_pfn,boot_pfn);
342 free_bootmem(0, PFN_PHYS(boot_pfn));
343 reserve_bootmem(__pa(&_stext),
344 PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1 -
345 __pa(&_stext));
347 /* init_bootmem() set the global max_low_pfn to boot_pfn. Now max_low_pfn
348 * can be set to the override value.
349 */
350 max_low_pfn = lmax_low_pfn;
354 #ifdef CONFIG_BLK_DEV_INITRD
355 if ( xen_start_info.mod_start != 0 )
356 {
357 if ( (__pa(xen_start_info.mod_start) + xen_start_info.mod_len) <=
358 (max_low_pfn << PAGE_SHIFT) )
359 {
360 initrd_start = xen_start_info.mod_start;
361 initrd_end = initrd_start + xen_start_info.mod_len;
362 initrd_below_start_ok = 1;
363 }
364 else
365 {
366 printk(KERN_ERR "initrd extends beyond end of memory "
367 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
368 __pa(xen_start_info.mod_start) + xen_start_info.mod_len,
369 max_low_pfn << PAGE_SHIFT);
370 initrd_start = 0;
371 }
372 }
373 #endif
375 paging_init();
377 pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
378 for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
379 {
380 pfn_to_mfn_frame_list[j] =
381 virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
382 }
383 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
384 virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
386 /* If we are a privileged guest OS then we should request IO privileges. */
387 if ( xen_start_info.flags & SIF_PRIVILEGED )
388 {
389 dom0_op_t op;
390 op.cmd = DOM0_IOPL;
391 op.u.iopl.domain = DOMID_SELF;
392 op.u.iopl.iopl = 1;
393 if( HYPERVISOR_dom0_op(&op) != 0 )
394 panic("Unable to obtain IOPL, despite being SIF_PRIVILEGED");
395 current->thread.io_pl = 1;
396 }
398 if (xen_start_info.flags & SIF_INITDOMAIN )
399 {
400 if( !(xen_start_info.flags & SIF_PRIVILEGED) )
401 panic("Xen granted us console access but not privileged status");
403 #if defined(CONFIG_VT)
404 #if defined(CONFIG_VGA_CONSOLE)
405 conswitchp = &vga_con;
406 #elif defined(CONFIG_DUMMY_CONSOLE)
407 conswitchp = &dummy_con;
408 #endif
409 #endif
410 }
411 }
413 static int cachesize_override __initdata = -1;
414 static int __init cachesize_setup(char *str)
415 {
416 get_option (&str, &cachesize_override);
417 return 1;
418 }
419 __setup("cachesize=", cachesize_setup);
421 static int __init highio_setup(char *str)
422 {
423 printk("i386: disabling HIGHMEM block I/O\n");
424 blk_nohighio = 1;
425 return 1;
426 }
427 __setup("nohighio", highio_setup);
429 static int __init get_model_name(struct cpuinfo_x86 *c)
430 {
431 unsigned int *v;
432 char *p, *q;
434 if (cpuid_eax(0x80000000) < 0x80000004)
435 return 0;
437 v = (unsigned int *) c->x86_model_id;
438 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
439 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
440 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
441 c->x86_model_id[48] = 0;
443 /* Intel chips right-justify this string for some dumb reason;
444 undo that brain damage */
445 p = q = &c->x86_model_id[0];
446 while ( *p == ' ' )
447 p++;
448 if ( p != q ) {
449 while ( *p )
450 *q++ = *p++;
451 while ( q <= &c->x86_model_id[48] )
452 *q++ = '\0'; /* Zero-pad the rest */
453 }
455 return 1;
456 }
459 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
460 {
461 unsigned int n, dummy, ecx, edx, l2size;
463 n = cpuid_eax(0x80000000);
465 if (n >= 0x80000005) {
466 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
467 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
468 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
469 c->x86_cache_size=(ecx>>24)+(edx>>24);
470 }
472 if (n < 0x80000006) /* Some chips just has a large L1. */
473 return;
475 ecx = cpuid_ecx(0x80000006);
476 l2size = ecx >> 16;
478 /* AMD errata T13 (order #21922) */
479 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
480 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
481 l2size = 64;
482 if (c->x86_model == 4 &&
483 (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
484 l2size = 256;
485 }
487 /* Intel PIII Tualatin. This comes in two flavours.
488 * One has 256kb of cache, the other 512. We have no way
489 * to determine which, so we use a boottime override
490 * for the 512kb model, and assume 256 otherwise.
491 */
492 if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&
493 (c->x86_model == 11) && (l2size == 0))
494 l2size = 256;
496 if (c->x86_vendor == X86_VENDOR_CENTAUR) {
497 /* VIA C3 CPUs (670-68F) need further shifting. */
498 if ((c->x86 == 6) &&
499 ((c->x86_model == 7) || (c->x86_model == 8))) {
500 l2size >>= 8;
501 }
503 /* VIA also screwed up Nehemiah stepping 1, and made
504 it return '65KB' instead of '64KB'
505 - Note, it seems this may only be in engineering samples. */
506 if ((c->x86==6) && (c->x86_model==9) &&
507 (c->x86_mask==1) && (l2size==65))
508 l2size -= 1;
509 }
511 /* Allow user to override all this if necessary. */
512 if (cachesize_override != -1)
513 l2size = cachesize_override;
515 if ( l2size == 0 )
516 return; /* Again, no L2 cache is possible */
518 c->x86_cache_size = l2size;
520 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
521 l2size, ecx & 0xFF);
522 }
524 static void __init init_c3(struct cpuinfo_x86 *c)
525 {
526 /* Test for Centaur Extended Feature Flags presence */
527 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
528 /* store Centaur Extended Feature Flags as
529 * word 5 of the CPU capability bit array
530 */
531 c->x86_capability[5] = cpuid_edx(0xC0000001);
532 }
534 switch (c->x86_model) {
535 case 9: /* Nehemiah */
536 default:
537 get_model_name(c);
538 display_cacheinfo(c);
539 break;
540 }
541 }
543 static void __init init_centaur(struct cpuinfo_x86 *c)
544 {
545 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
546 3DNow is IDd by bit 31 in extended CPUID (1*3231) anyway */
547 clear_bit(0*32+31, &c->x86_capability);
549 switch (c->x86) {
550 case 6:
551 init_c3(c);
552 break;
553 default:
554 panic("Unsupported Centaur CPU (%i)\n", c->x86);
555 }
556 }
558 static int __init init_amd(struct cpuinfo_x86 *c)
559 {
560 int r;
562 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
563 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
564 clear_bit(0*32+31, &c->x86_capability);
566 r = get_model_name(c);
568 switch(c->x86)
569 {
570 case 5: /* We don't like AMD K6 */
571 panic("Unsupported AMD processor\n");
572 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
573 break;
574 }
576 display_cacheinfo(c);
577 return r;
578 }
581 static void __init init_intel(struct cpuinfo_x86 *c)
582 {
583 char *p = NULL;
584 unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
586 if (c->cpuid_level > 1) {
587 /* supports eax=2 call */
588 int i, j, n;
589 int regs[4];
590 unsigned char *dp = (unsigned char *)regs;
592 /* Number of times to iterate */
593 n = cpuid_eax(2) & 0xFF;
595 for ( i = 0 ; i < n ; i++ ) {
596 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
598 /* If bit 31 is set, this is an unknown format */
599 for ( j = 0 ; j < 3 ; j++ ) {
600 if ( regs[j] < 0 ) regs[j] = 0;
601 }
603 /* Byte 0 is level count, not a descriptor */
604 for ( j = 1 ; j < 16 ; j++ ) {
605 unsigned char des = dp[j];
606 unsigned char dl, dh;
607 unsigned int cs;
609 dh = des >> 4;
610 dl = des & 0x0F;
612 /* Black magic... */
614 switch ( dh )
615 {
616 case 0:
617 switch ( dl ) {
618 case 6:
619 /* L1 I cache */
620 l1i += 8;
621 break;
622 case 8:
623 /* L1 I cache */
624 l1i += 16;
625 break;
626 case 10:
627 /* L1 D cache */
628 l1d += 8;
629 break;
630 case 12:
631 /* L1 D cache */
632 l1d += 16;
633 break;
634 default:;
635 /* TLB, or unknown */
636 }
637 break;
638 case 2:
639 if ( dl ) {
640 /* L3 cache */
641 cs = (dl-1) << 9;
642 l3 += cs;
643 }
644 break;
645 case 4:
646 if ( c->x86 > 6 && dl ) {
647 /* P4 family */
648 /* L3 cache */
649 cs = 128 << (dl-1);
650 l3 += cs;
651 break;
652 }
653 /* else same as 8 - fall through */
654 case 8:
655 if ( dl ) {
656 /* L2 cache */
657 cs = 128 << (dl-1);
658 l2 += cs;
659 }
660 break;
661 case 6:
662 if (dl > 5) {
663 /* L1 D cache */
664 cs = 8<<(dl-6);
665 l1d += cs;
666 }
667 break;
668 case 7:
669 if ( dl >= 8 )
670 {
671 /* L2 cache */
672 cs = 64<<(dl-8);
673 l2 += cs;
674 } else {
675 /* L0 I cache, count as L1 */
676 cs = dl ? (16 << (dl-1)) : 12;
677 l1i += cs;
678 }
679 break;
680 default:
681 /* TLB, or something else we don't know about */
682 break;
683 }
684 }
685 }
686 if ( l1i || l1d )
687 printk(KERN_INFO "CPU: L1 I cache: %dK, L1 D cache: %dK\n",
688 l1i, l1d);
689 if ( l2 )
690 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
691 if ( l3 )
692 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
694 /*
695 * This assumes the L3 cache is shared; it typically lives in
696 * the northbridge. The L1 caches are included by the L2
697 * cache, and so should not be included for the purpose of
698 * SMP switching weights.
699 */
700 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
701 }
703 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
704 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
705 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
707 /* Names for the Pentium II/Celeron processors
708 detectable only by also checking the cache size.
709 Dixon is NOT a Celeron. */
710 if (c->x86 == 6) {
711 switch (c->x86_model) {
712 case 5:
713 if (l2 == 0)
714 p = "Celeron (Covington)";
715 if (l2 == 256)
716 p = "Mobile Pentium II (Dixon)";
717 break;
719 case 6:
720 if (l2 == 128)
721 p = "Celeron (Mendocino)";
722 break;
724 case 8:
725 if (l2 == 128)
726 p = "Celeron (Coppermine)";
727 break;
728 }
729 }
731 if ( p )
732 strcpy(c->x86_model_id, p);
733 }
735 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
736 {
737 char *v = c->x86_vendor_id;
739 if (!strcmp(v, "GenuineIntel"))
740 c->x86_vendor = X86_VENDOR_INTEL;
741 else if (!strcmp(v, "AuthenticAMD"))
742 c->x86_vendor = X86_VENDOR_AMD;
743 else if (!strcmp(v, "CentaurHauls"))
744 c->x86_vendor = X86_VENDOR_CENTAUR;
745 else
746 c->x86_vendor = X86_VENDOR_UNKNOWN;
747 }
749 struct cpu_model_info {
750 int vendor;
751 int family;
752 char *model_names[16];
753 };
755 /* Naming convention should be: <Name> [(<Codename>)] */
756 /* This table only is used unless init_<vendor>() below doesn't set it; */
757 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
758 static struct cpu_model_info cpu_models[] __initdata = {
759 { X86_VENDOR_INTEL, 6,
760 { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
761 NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
762 "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
763 "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
764 { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
765 { "Athlon", "Athlon",
766 "Athlon", NULL, "Athlon", NULL,
767 NULL, NULL, NULL,
768 NULL, NULL, NULL, NULL, NULL, NULL, NULL }}
769 };
771 /* Look up CPU names by table lookup. */
772 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
773 {
774 struct cpu_model_info *info = cpu_models;
775 int i;
777 if ( c->x86_model >= 16 )
778 return NULL; /* Range check */
780 for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
781 if ( info->vendor == c->x86_vendor &&
782 info->family == c->x86 ) {
783 return info->model_names[c->x86_model];
784 }
785 info++;
786 }
787 return NULL; /* Not found */
788 }
792 /* Standard macro to see if a specific flag is changeable */
793 static inline int flag_is_changeable_p(u32 flag)
794 {
795 u32 f1, f2;
797 asm("pushfl\n\t"
798 "pushfl\n\t"
799 "popl %0\n\t"
800 "movl %0,%1\n\t"
801 "xorl %2,%0\n\t"
802 "pushl %0\n\t"
803 "popfl\n\t"
804 "pushfl\n\t"
805 "popl %0\n\t"
806 "popfl\n\t"
807 : "=&r" (f1), "=&r" (f2)
808 : "ir" (flag));
810 return ((f1^f2) & flag) != 0;
811 }
814 /* Probe for the CPUID instruction */
815 static int __init have_cpuid_p(void)
816 {
817 return flag_is_changeable_p(X86_EFLAGS_ID);
818 }
822 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
823 unsigned char eddnr;
824 struct edd_info edd[EDDMAXNR];
825 unsigned int edd_disk80_sig;
826 /**
827 * copy_edd() - Copy the BIOS EDD information
828 * from empty_zero_page into a safe place.
829 *
830 */
831 static inline void copy_edd(void)
832 {
833 eddnr = EDD_NR;
834 memcpy(edd, EDD_BUF, sizeof(edd));
835 edd_disk80_sig = DISK80_SIGNATURE_BUFFER;
836 }
837 #else
838 static inline void copy_edd(void) {}
839 #endif
841 /*
842 * This does the hard work of actually picking apart the CPU stuff...
843 */
844 void __init identify_cpu(struct cpuinfo_x86 *c)
845 {
846 int junk, i;
847 u32 xlvl, tfms;
849 c->loops_per_jiffy = loops_per_jiffy;
850 c->x86_cache_size = -1;
851 c->x86_vendor = X86_VENDOR_UNKNOWN;
852 c->cpuid_level = -1; /* CPUID not detected */
853 c->x86_model = c->x86_mask = 0; /* So far unknown... */
854 c->x86_vendor_id[0] = '\0'; /* Unset */
855 c->x86_model_id[0] = '\0'; /* Unset */
856 memset(&c->x86_capability, 0, sizeof c->x86_capability);
857 c->hard_math = 1;
859 if ( !have_cpuid_p() ) {
860 panic("Processor must support CPUID\n");
861 } else {
862 /* CPU does have CPUID */
864 /* Get vendor name */
865 cpuid(0x00000000, &c->cpuid_level,
866 (int *)&c->x86_vendor_id[0],
867 (int *)&c->x86_vendor_id[8],
868 (int *)&c->x86_vendor_id[4]);
870 get_cpu_vendor(c);
871 /* Initialize the standard set of capabilities */
872 /* Note that the vendor-specific code below might override */
874 /* Intel-defined flags: level 0x00000001 */
875 if ( c->cpuid_level >= 0x00000001 ) {
876 u32 capability, excap;
877 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
878 c->x86_capability[0] = capability;
879 c->x86_capability[4] = excap;
880 c->x86 = (tfms >> 8) & 15;
881 c->x86_model = (tfms >> 4) & 15;
882 if (c->x86 == 0xf) {
883 c->x86 += (tfms >> 20) & 0xff;
884 c->x86_model += ((tfms >> 16) & 0xF) << 4;
885 }
886 c->x86_mask = tfms & 15;
887 } else {
888 /* Have CPUID level 0 only - unheard of */
889 c->x86 = 4;
890 }
892 /* AMD-defined flags: level 0x80000001 */
893 xlvl = cpuid_eax(0x80000000);
894 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
895 if ( xlvl >= 0x80000001 )
896 c->x86_capability[1] = cpuid_edx(0x80000001);
897 if ( xlvl >= 0x80000004 )
898 get_model_name(c); /* Default name */
899 }
901 /* Transmeta-defined flags: level 0x80860001 */
902 xlvl = cpuid_eax(0x80860000);
903 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
904 if ( xlvl >= 0x80860001 )
905 c->x86_capability[2] = cpuid_edx(0x80860001);
906 }
907 }
909 printk(KERN_DEBUG "CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
910 c->x86_capability[0],
911 c->x86_capability[1],
912 c->x86_capability[2],
913 c->x86_vendor);
915 /*
916 * Vendor-specific initialization. In this section we
917 * canonicalize the feature flags, meaning if there are
918 * features a certain CPU supports which CPUID doesn't
919 * tell us, CPUID claiming incorrect flags, or other bugs,
920 * we handle them here.
921 *
922 * At the end of this section, c->x86_capability better
923 * indicate the features this CPU genuinely supports!
924 */
925 switch ( c->x86_vendor ) {
926 case X86_VENDOR_AMD:
927 init_amd(c);
928 break;
930 case X86_VENDOR_INTEL:
931 init_intel(c);
932 break;
934 case X86_VENDOR_CENTAUR:
935 init_centaur(c);
936 break;
938 default:
939 printk("Unsupported CPU vendor (%d) -- please report!\n",
940 c->x86_vendor);
941 }
943 printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n",
944 c->x86_capability[0],
945 c->x86_capability[1],
946 c->x86_capability[2],
947 c->x86_capability[3]);
950 /* If the model name is still unset, do table lookup. */
951 if ( !c->x86_model_id[0] ) {
952 char *p;
953 p = table_lookup_model(c);
954 if ( p )
955 strcpy(c->x86_model_id, p);
956 else
957 /* Last resort... */
958 sprintf(c->x86_model_id, "%02x/%02x",
959 c->x86_vendor, c->x86_model);
960 }
962 /* Now the feature flags better reflect actual CPU features! */
964 printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
965 c->x86_capability[0],
966 c->x86_capability[1],
967 c->x86_capability[2],
968 c->x86_capability[3]);
970 /*
971 * On SMP, boot_cpu_data holds the common feature set between
972 * all CPUs; so make sure that we indicate which features are
973 * common between the CPUs. The first time this routine gets
974 * executed, c == &boot_cpu_data.
975 */
976 if ( c != &boot_cpu_data ) {
977 /* AND the already accumulated flags with these */
978 for ( i = 0 ; i < NCAPINTS ; i++ )
979 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
980 }
982 printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n",
983 boot_cpu_data.x86_capability[0],
984 boot_cpu_data.x86_capability[1],
985 boot_cpu_data.x86_capability[2],
986 boot_cpu_data.x86_capability[3]);
987 }
990 /* These need to match <asm/processor.h> */
991 static char *cpu_vendor_names[] __initdata = {
992 "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
995 void __init print_cpu_info(struct cpuinfo_x86 *c)
996 {
997 char *vendor = NULL;
999 if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
1000 vendor = cpu_vendor_names[c->x86_vendor];
1001 else if (c->cpuid_level >= 0)
1002 vendor = c->x86_vendor_id;
1004 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
1005 printk("%s ", vendor);
1007 if (!c->x86_model_id[0])
1008 printk("%d86", c->x86);
1009 else
1010 printk("%s", c->x86_model_id);
1012 if (c->x86_mask || c->cpuid_level >= 0)
1013 printk(" stepping %02x\n", c->x86_mask);
1014 else
1015 printk("\n");
1018 /*
1019 * Get CPU information for use by the procfs.
1020 */
1021 static int show_cpuinfo(struct seq_file *m, void *v)
1023 /*
1024 * These flag bits must match the definitions in <asm/cpufeature.h>.
1025 * NULL means this bit is undefined or reserved; either way it doesn't
1026 * have meaning as far as Linux is concerned. Note that it's important
1027 * to realize there is a difference between this table and CPUID -- if
1028 * applications want to get the raw CPUID data, they should access
1029 * /dev/cpu/<cpu_nr>/cpuid instead.
1030 */
1031 static char *x86_cap_flags[] = {
1032 /* Intel-defined */
1033 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1034 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1035 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1036 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1038 /* AMD-defined */
1039 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, "mp", NULL, NULL, "mmxext", NULL,
1042 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1044 /* Transmeta-defined */
1045 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1046 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1047 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1050 /* Other (Linux-defined) */
1051 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1052 NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1054 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1055 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1057 /* Intel-defined (#2) */
1058 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
1059 "est", NULL, "cid", NULL, NULL, NULL, NULL, NULL,
1060 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1061 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1063 /* VIA/Cyrix/Centaur-defined */
1064 NULL, NULL, "xstore", NULL, NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1069 };
1070 struct cpuinfo_x86 *c = v;
1071 int i, n = c - cpu_data;
1072 int fpu_exception;
1074 #ifdef CONFIG_SMP
1075 if (!(cpu_online_map & (1<<n)))
1076 return 0;
1077 #endif
1078 seq_printf(m, "processor\t: %d\n"
1079 "vendor_id\t: %s\n"
1080 "cpu family\t: %d\n"
1081 "model\t\t: %d\n"
1082 "model name\t: %s\n",
1083 n,
1084 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1085 c->x86,
1086 c->x86_model,
1087 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1089 if (c->x86_mask || c->cpuid_level >= 0)
1090 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1091 else
1092 seq_printf(m, "stepping\t: unknown\n");
1094 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
1095 seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
1096 cpu_khz / 1000, (cpu_khz % 1000));
1099 /* Cache size */
1100 if (c->x86_cache_size >= 0)
1101 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1103 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
1104 fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
1105 seq_printf(m, "fdiv_bug\t: %s\n"
1106 "hlt_bug\t\t: %s\n"
1107 "f00f_bug\t: %s\n"
1108 "coma_bug\t: %s\n"
1109 "fpu\t\t: %s\n"
1110 "fpu_exception\t: %s\n"
1111 "cpuid level\t: %d\n"
1112 "wp\t\t: %s\n"
1113 "flags\t\t:",
1114 c->fdiv_bug ? "yes" : "no",
1115 c->hlt_works_ok ? "no" : "yes",
1116 c->f00f_bug ? "yes" : "no",
1117 c->coma_bug ? "yes" : "no",
1118 c->hard_math ? "yes" : "no",
1119 fpu_exception ? "yes" : "no",
1120 c->cpuid_level,
1121 c->wp_works_ok ? "yes" : "no");
1123 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1124 if ( test_bit(i, &c->x86_capability) &&
1125 x86_cap_flags[i] != NULL )
1126 seq_printf(m, " %s", x86_cap_flags[i]);
1128 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
1129 c->loops_per_jiffy/(500000/HZ),
1130 (c->loops_per_jiffy/(5000/HZ)) % 100);
1131 return 0;
1134 static void *c_start(struct seq_file *m, loff_t *pos)
1136 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1138 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1140 ++*pos;
1141 return c_start(m, pos);
1143 static void c_stop(struct seq_file *m, void *v)
1146 struct seq_operations cpuinfo_op = {
1147 start: c_start,
1148 next: c_next,
1149 stop: c_stop,
1150 show: show_cpuinfo,
1151 };
1153 unsigned long cpu_initialized __initdata = 0;
1155 /*
1156 * cpu_init() initializes state that is per-CPU. Some data is already
1157 * initialized (naturally) in the bootstrap process, such as the GDT
1158 * and IDT. We reload them nevertheless, this function acts as a
1159 * 'CPU state barrier', nothing should get across.
1160 */
1161 void __init cpu_init (void)
1163 int nr = smp_processor_id();
1165 if (test_and_set_bit(nr, &cpu_initialized)) {
1166 printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
1167 for (;;) __sti();
1169 printk(KERN_INFO "Initializing CPU#%d\n", nr);
1171 /*
1172 * set up and load the per-CPU TSS and LDT
1173 */
1174 atomic_inc(&init_mm.mm_count);
1175 current->active_mm = &init_mm;
1176 if(current->mm)
1177 BUG();
1178 enter_lazy_tlb(&init_mm, current, nr);
1180 HYPERVISOR_stack_switch(__KERNEL_DS, current->thread.esp0);
1182 load_LDT(&init_mm.context);
1183 flush_page_update_queue();
1185 /* Force FPU initialization. */
1186 current->flags &= ~PF_USEDFPU;
1187 current->used_math = 0;
1188 stts();