ia64/xen-unstable

view xenolinux-2.4.16-sparse/arch/xeno/kernel/setup.c @ 86:4a10fe9b20ec

bitkeeper revision 1.15 (3e24a984iRiWWcgfKCxu2p5q3YbxXw)

Many files:
First half of support for per-domain GDTs and LDTs
author kaf24@labyrinth.cl.cam.ac.uk
date Wed Jan 15 00:21:24 2003 +0000 (2003-01-15)
parents c3e6a52cd801
children 0ecf87d4739a
line source
1 /*
2 * linux/arch/i386/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 /*
8 * This file handles the architecture-dependent parts of initialization
9 */
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/slab.h>
19 #include <linux/user.h>
20 #include <linux/a.out.h>
21 #include <linux/tty.h>
22 #include <linux/ioport.h>
23 #include <linux/delay.h>
24 #include <linux/config.h>
25 #include <linux/init.h>
26 #include <linux/apm_bios.h>
27 #ifdef CONFIG_BLK_DEV_RAM
28 #include <linux/blk.h>
29 #endif
30 #include <linux/highmem.h>
31 #include <linux/bootmem.h>
32 #include <linux/seq_file.h>
33 #include <asm/processor.h>
34 #include <linux/console.h>
35 #include <asm/mtrr.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/smp.h>
40 #include <asm/msr.h>
41 #include <asm/desc.h>
42 #include <asm/dma.h>
43 #include <asm/mpspec.h>
44 #include <asm/mmu_context.h>
45 #include <asm/hypervisor.h>
47 shared_info_t *HYPERVISOR_shared_info;
49 /*
50 * Machine setup..
51 */
53 char ignore_irq13; /* set if exception 16 works */
54 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
56 unsigned long mmu_cr4_features;
58 /*
59 * Bus types ..
60 */
61 #ifdef CONFIG_EISA
62 int EISA_bus;
63 #endif
64 int MCA_bus;
66 /* for MCA, but anyone else can use it if they want */
67 unsigned int machine_id;
68 unsigned int machine_submodel_id;
69 unsigned int BIOS_revision;
70 unsigned int mca_pentium_flag;
72 /* For PCI or other memory-mapped resources */
73 unsigned long pci_mem_start = 0x10000000;
75 /*
76 * Setup options
77 */
78 struct drive_info_struct { char dummy[32]; } drive_info;
79 struct screen_info screen_info;
80 struct apm_info apm_info;
81 struct sys_desc_table_struct {
82 unsigned short length;
83 unsigned char table[0];
84 };
86 unsigned char aux_device_present;
88 extern int root_mountflags;
89 extern char _text, _etext, _edata, _end;
91 int enable_acpi_smp_table;
93 /* Raw start-of-day parameters from the hypervisor. */
94 union start_info_union start_info_union;
96 #define COMMAND_LINE_SIZE 256
97 static char command_line[COMMAND_LINE_SIZE];
98 char saved_command_line[COMMAND_LINE_SIZE];
100 static void __init parse_mem_cmdline (char ** cmdline_p)
101 {
102 char c = ' ', *to = command_line, *from = saved_command_line;
103 int len = 0;
105 /* Save unparsed command line copy for /proc/cmdline */
106 memcpy(saved_command_line, start_info.cmd_line, COMMAND_LINE_SIZE);
107 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
109 for (;;) {
110 /*
111 * "mem=nopentium" disables the 4MB page tables.
112 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
113 * to <mem>, overriding the bios size.
114 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
115 * <start> to <start>+<mem>, overriding the bios size.
116 */
117 if (c == ' ' && !memcmp(from, "mem=", 4)) {
118 if (to != command_line)
119 to--;
120 if (!memcmp(from+4, "nopentium", 9)) {
121 from += 9+4;
122 } else if (!memcmp(from+4, "exactmap", 8)) {
123 from += 8+4;
124 } else {
125 (void)memparse(from+4, &from);
126 if (*from == '@')
127 (void)memparse(from+1, &from);
128 }
129 }
131 c = *(from++);
132 if (!c)
133 break;
134 if (COMMAND_LINE_SIZE <= ++len)
135 break;
136 *(to++) = c;
137 }
138 *to = '\0';
139 *cmdline_p = command_line;
140 }
142 void __init setup_arch(char **cmdline_p)
143 {
144 unsigned long start_pfn, max_pfn, max_low_pfn;
145 unsigned long bootmap_size;
147 extern void hypervisor_callback(void);
148 extern void failsafe_callback(void);
150 extern unsigned long cpu0_pte_quicklist[];
151 extern unsigned long cpu0_pgd_quicklist[];
153 HYPERVISOR_shared_info->event_address =
154 (unsigned long)hypervisor_callback;
155 HYPERVISOR_shared_info->failsafe_address =
156 (unsigned long)failsafe_callback;
158 boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
159 boot_cpu_data.pte_quick = cpu0_pte_quicklist;
161 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
162 memset(&drive_info, 0, sizeof(drive_info));
163 memset(&screen_info, 0, sizeof(screen_info));
164 memset(&apm_info.bios, 0, sizeof(apm_info.bios));
165 aux_device_present = 0;
167 #ifdef CONFIG_BLK_DEV_RAM
168 rd_image_start = 0;
169 rd_prompt = 0;
170 rd_doload = 0;
171 #endif
173 root_mountflags &= ~MS_RDONLY;
174 init_mm.start_code = (unsigned long) &_text;
175 init_mm.end_code = (unsigned long) &_etext;
176 init_mm.end_data = (unsigned long) &_edata;
177 init_mm.brk = (unsigned long) &_end;
179 parse_mem_cmdline(cmdline_p);
181 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
182 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
183 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
185 /*
186 * 128MB for vmalloc and initrd
187 */
188 #define VMALLOC_RESERVE (unsigned long)(128 << 20)
189 #define MAXMEM (unsigned long)(HYPERVISOR_VIRT_START-PAGE_OFFSET-VMALLOC_RESERVE)
190 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
191 #define MAX_NONPAE_PFN (1 << 20)
193 /*
194 * partially used pages are not usable - thus
195 * we are rounding upwards:
196 */
197 #ifdef CONFIG_BLK_DEV_INITRD
198 if ( start_info.mod_start )
199 start_pfn = PFN_UP(__pa(start_info.mod_start + start_info.mod_len));
200 else
201 #endif
202 start_pfn = PFN_UP(__pa(&_end));
203 max_pfn = start_info.nr_pages;
205 /*
206 * Determine low and high memory ranges:
207 */
208 max_low_pfn = max_pfn;
209 if (max_low_pfn > MAXMEM_PFN) {
210 max_low_pfn = MAXMEM_PFN;
211 #ifndef CONFIG_HIGHMEM
212 /* Maximum memory usable is what is directly addressable */
213 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
214 MAXMEM>>20);
215 if (max_pfn > MAX_NONPAE_PFN)
216 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
217 else
218 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
219 #else /* !CONFIG_HIGHMEM */
220 #ifndef CONFIG_X86_PAE
221 if (max_pfn > MAX_NONPAE_PFN) {
222 max_pfn = MAX_NONPAE_PFN;
223 printk(KERN_WARNING "Warning only 4GB will be used.\n");
224 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
225 }
226 #endif /* !CONFIG_X86_PAE */
227 #endif /* !CONFIG_HIGHMEM */
228 }
230 #ifdef CONFIG_HIGHMEM
231 highstart_pfn = highend_pfn = max_pfn;
232 if (max_pfn > MAXMEM_PFN) {
233 highstart_pfn = MAXMEM_PFN;
234 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
235 pages_to_mb(highend_pfn - highstart_pfn));
236 }
237 #endif
239 /*
240 * Initialize the boot-time allocator, and free up all RAM.
241 * Then reserve space for OS image, and the bootmem bitmap.
242 */
243 bootmap_size = init_bootmem(start_pfn, max_low_pfn);
244 free_bootmem(0, PFN_PHYS(max_low_pfn));
245 reserve_bootmem(0, PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1);
247 /* Now reserve space for the hypervisor-provided page tables. */
248 {
249 unsigned long *pgd = (unsigned long *)start_info.pt_base;
250 unsigned long pte;
251 int i;
252 reserve_bootmem(__pa(pgd), PAGE_SIZE);
253 for ( i = 0; i < (HYPERVISOR_VIRT_START>>22); i++ )
254 {
255 unsigned long pgde = *pgd++;
256 if ( !(pgde & 1) ) continue;
257 pte = (pgde & PAGE_MASK) - start_info.phys_base;
258 reserve_bootmem(pte, PAGE_SIZE);
259 }
260 }
261 cur_pgd = init_mm.pgd = (pgd_t *)start_info.pt_base;
262 queue_pgd_pin(__pa(init_mm.pgd));
264 #ifdef CONFIG_BLK_DEV_INITRD
265 if (start_info.mod_start) {
266 if ((__pa(start_info.mod_start) + start_info.mod_len) <=
267 (max_low_pfn << PAGE_SHIFT)) {
268 initrd_start = start_info.mod_start;
269 initrd_end = initrd_start + start_info.mod_len;
270 initrd_below_start_ok = 1;
271 }
272 else {
273 printk(KERN_ERR "initrd extends beyond end of memory "
274 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
275 __pa(start_info.mod_start) + start_info.mod_len,
276 max_low_pfn << PAGE_SHIFT);
277 initrd_start = 0;
278 }
279 }
280 #endif
282 paging_init();
283 }
285 static int cachesize_override __initdata = -1;
286 static int __init cachesize_setup(char *str)
287 {
288 get_option (&str, &cachesize_override);
289 return 1;
290 }
291 __setup("cachesize=", cachesize_setup);
294 static int __init get_model_name(struct cpuinfo_x86 *c)
295 {
296 unsigned int *v;
297 char *p, *q;
299 if (cpuid_eax(0x80000000) < 0x80000004)
300 return 0;
302 v = (unsigned int *) c->x86_model_id;
303 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
304 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
305 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
306 c->x86_model_id[48] = 0;
308 /* Intel chips right-justify this string for some dumb reason;
309 undo that brain damage */
310 p = q = &c->x86_model_id[0];
311 while ( *p == ' ' )
312 p++;
313 if ( p != q ) {
314 while ( *p )
315 *q++ = *p++;
316 while ( q <= &c->x86_model_id[48] )
317 *q++ = '\0'; /* Zero-pad the rest */
318 }
320 return 1;
321 }
324 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
325 {
326 unsigned int n, dummy, ecx, edx, l2size;
328 n = cpuid_eax(0x80000000);
330 if (n >= 0x80000005) {
331 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
332 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
333 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
334 c->x86_cache_size=(ecx>>24)+(edx>>24);
335 }
337 if (n < 0x80000006) /* Some chips just has a large L1. */
338 return;
340 ecx = cpuid_ecx(0x80000006);
341 l2size = ecx >> 16;
343 /* AMD errata T13 (order #21922) */
344 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
345 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
346 l2size = 64;
347 if (c->x86_model == 4 &&
348 (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
349 l2size = 256;
350 }
352 /* Intel PIII Tualatin. This comes in two flavours.
353 * One has 256kb of cache, the other 512. We have no way
354 * to determine which, so we use a boottime override
355 * for the 512kb model, and assume 256 otherwise.
356 */
357 if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&
358 (c->x86_model == 11) && (l2size == 0))
359 l2size = 256;
361 /* VIA C3 CPUs (670-68F) need further shifting. */
362 if (c->x86_vendor == X86_VENDOR_CENTAUR && (c->x86 == 6) &&
363 ((c->x86_model == 7) || (c->x86_model == 8))) {
364 l2size = l2size >> 8;
365 }
367 /* Allow user to override all this if necessary. */
368 if (cachesize_override != -1)
369 l2size = cachesize_override;
371 if ( l2size == 0 )
372 return; /* Again, no L2 cache is possible */
374 c->x86_cache_size = l2size;
376 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
377 l2size, ecx & 0xFF);
378 }
381 static int __init init_amd(struct cpuinfo_x86 *c)
382 {
383 int r;
385 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
386 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
387 clear_bit(0*32+31, &c->x86_capability);
389 r = get_model_name(c);
391 switch(c->x86)
392 {
393 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
394 break;
395 default:
396 panic("Unsupported AMD processor\n");
397 }
399 display_cacheinfo(c);
400 return r;
401 }
404 static void __init init_intel(struct cpuinfo_x86 *c)
405 {
406 char *p = NULL;
407 unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
409 if (c->cpuid_level > 1) {
410 /* supports eax=2 call */
411 int i, j, n;
412 int regs[4];
413 unsigned char *dp = (unsigned char *)regs;
415 /* Number of times to iterate */
416 n = cpuid_eax(2) & 0xFF;
418 for ( i = 0 ; i < n ; i++ ) {
419 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
421 /* If bit 31 is set, this is an unknown format */
422 for ( j = 0 ; j < 3 ; j++ ) {
423 if ( regs[j] < 0 ) regs[j] = 0;
424 }
426 /* Byte 0 is level count, not a descriptor */
427 for ( j = 1 ; j < 16 ; j++ ) {
428 unsigned char des = dp[j];
429 unsigned char dl, dh;
430 unsigned int cs;
432 dh = des >> 4;
433 dl = des & 0x0F;
435 /* Black magic... */
437 switch ( dh )
438 {
439 case 0:
440 switch ( dl ) {
441 case 6:
442 /* L1 I cache */
443 l1i += 8;
444 break;
445 case 8:
446 /* L1 I cache */
447 l1i += 16;
448 break;
449 case 10:
450 /* L1 D cache */
451 l1d += 8;
452 break;
453 case 12:
454 /* L1 D cache */
455 l1d += 16;
456 break;
457 default:;
458 /* TLB, or unknown */
459 }
460 break;
461 case 2:
462 if ( dl ) {
463 /* L3 cache */
464 cs = (dl-1) << 9;
465 l3 += cs;
466 }
467 break;
468 case 4:
469 if ( c->x86 > 6 && dl ) {
470 /* P4 family */
471 /* L3 cache */
472 cs = 128 << (dl-1);
473 l3 += cs;
474 break;
475 }
476 /* else same as 8 - fall through */
477 case 8:
478 if ( dl ) {
479 /* L2 cache */
480 cs = 128 << (dl-1);
481 l2 += cs;
482 }
483 break;
484 case 6:
485 if (dl > 5) {
486 /* L1 D cache */
487 cs = 8<<(dl-6);
488 l1d += cs;
489 }
490 break;
491 case 7:
492 if ( dl >= 8 )
493 {
494 /* L2 cache */
495 cs = 64<<(dl-8);
496 l2 += cs;
497 } else {
498 /* L0 I cache, count as L1 */
499 cs = dl ? (16 << (dl-1)) : 12;
500 l1i += cs;
501 }
502 break;
503 default:
504 /* TLB, or something else we don't know about */
505 break;
506 }
507 }
508 }
509 if ( l1i || l1d )
510 printk(KERN_INFO "CPU: L1 I cache: %dK, L1 D cache: %dK\n",
511 l1i, l1d);
512 if ( l2 )
513 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
514 if ( l3 )
515 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
517 /*
518 * This assumes the L3 cache is shared; it typically lives in
519 * the northbridge. The L1 caches are included by the L2
520 * cache, and so should not be included for the purpose of
521 * SMP switching weights.
522 */
523 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
524 }
526 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
527 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
528 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
530 /* Names for the Pentium II/Celeron processors
531 detectable only by also checking the cache size.
532 Dixon is NOT a Celeron. */
533 if (c->x86 == 6) {
534 switch (c->x86_model) {
535 case 5:
536 if (l2 == 0)
537 p = "Celeron (Covington)";
538 if (l2 == 256)
539 p = "Mobile Pentium II (Dixon)";
540 break;
542 case 6:
543 if (l2 == 128)
544 p = "Celeron (Mendocino)";
545 break;
547 case 8:
548 if (l2 == 128)
549 p = "Celeron (Coppermine)";
550 break;
551 }
552 }
554 if ( p )
555 strcpy(c->x86_model_id, p);
556 }
558 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
559 {
560 char *v = c->x86_vendor_id;
562 if (!strcmp(v, "GenuineIntel"))
563 c->x86_vendor = X86_VENDOR_INTEL;
564 else if (!strcmp(v, "AuthenticAMD"))
565 c->x86_vendor = X86_VENDOR_AMD;
566 else
567 c->x86_vendor = X86_VENDOR_UNKNOWN;
568 }
570 struct cpu_model_info {
571 int vendor;
572 int family;
573 char *model_names[16];
574 };
576 /* Naming convention should be: <Name> [(<Codename>)] */
577 /* This table only is used unless init_<vendor>() below doesn't set it; */
578 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
579 static struct cpu_model_info cpu_models[] __initdata = {
580 { X86_VENDOR_INTEL, 6,
581 { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
582 NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
583 "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
584 "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
585 { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
586 { "Athlon", "Athlon",
587 "Athlon", NULL, "Athlon", NULL,
588 NULL, NULL, NULL,
589 NULL, NULL, NULL, NULL, NULL, NULL, NULL }}
590 };
592 /* Look up CPU names by table lookup. */
593 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
594 {
595 struct cpu_model_info *info = cpu_models;
596 int i;
598 if ( c->x86_model >= 16 )
599 return NULL; /* Range check */
601 for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
602 if ( info->vendor == c->x86_vendor &&
603 info->family == c->x86 ) {
604 return info->model_names[c->x86_model];
605 }
606 info++;
607 }
608 return NULL; /* Not found */
609 }
613 /* Standard macro to see if a specific flag is changeable */
614 static inline int flag_is_changeable_p(u32 flag)
615 {
616 u32 f1, f2;
618 asm("pushfl\n\t"
619 "pushfl\n\t"
620 "popl %0\n\t"
621 "movl %0,%1\n\t"
622 "xorl %2,%0\n\t"
623 "pushl %0\n\t"
624 "popfl\n\t"
625 "pushfl\n\t"
626 "popl %0\n\t"
627 "popfl\n\t"
628 : "=&r" (f1), "=&r" (f2)
629 : "ir" (flag));
631 return ((f1^f2) & flag) != 0;
632 }
635 /* Probe for the CPUID instruction */
636 static int __init have_cpuid_p(void)
637 {
638 return flag_is_changeable_p(X86_EFLAGS_ID);
639 }
643 /*
644 * This does the hard work of actually picking apart the CPU stuff...
645 */
646 void __init identify_cpu(struct cpuinfo_x86 *c)
647 {
648 int junk, i;
649 u32 xlvl, tfms;
651 c->loops_per_jiffy = loops_per_jiffy;
652 c->x86_cache_size = -1;
653 c->x86_vendor = X86_VENDOR_UNKNOWN;
654 c->cpuid_level = -1; /* CPUID not detected */
655 c->x86_model = c->x86_mask = 0; /* So far unknown... */
656 c->x86_vendor_id[0] = '\0'; /* Unset */
657 c->x86_model_id[0] = '\0'; /* Unset */
658 memset(&c->x86_capability, 0, sizeof c->x86_capability);
659 c->hard_math = 1;
661 if ( !have_cpuid_p() ) {
662 panic("Processor must support CPUID\n");
663 } else {
664 /* CPU does have CPUID */
666 /* Get vendor name */
667 cpuid(0x00000000, &c->cpuid_level,
668 (int *)&c->x86_vendor_id[0],
669 (int *)&c->x86_vendor_id[8],
670 (int *)&c->x86_vendor_id[4]);
672 get_cpu_vendor(c);
673 /* Initialize the standard set of capabilities */
674 /* Note that the vendor-specific code below might override */
676 /* Intel-defined flags: level 0x00000001 */
677 if ( c->cpuid_level >= 0x00000001 ) {
678 cpuid(0x00000001, &tfms, &junk, &junk,
679 &c->x86_capability[0]);
680 c->x86 = (tfms >> 8) & 15;
681 c->x86_model = (tfms >> 4) & 15;
682 c->x86_mask = tfms & 15;
683 } else {
684 /* Have CPUID level 0 only - unheard of */
685 c->x86 = 4;
686 }
688 /* AMD-defined flags: level 0x80000001 */
689 xlvl = cpuid_eax(0x80000000);
690 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
691 if ( xlvl >= 0x80000001 )
692 c->x86_capability[1] = cpuid_edx(0x80000001);
693 if ( xlvl >= 0x80000004 )
694 get_model_name(c); /* Default name */
695 }
697 /* Transmeta-defined flags: level 0x80860001 */
698 xlvl = cpuid_eax(0x80860000);
699 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
700 if ( xlvl >= 0x80860001 )
701 c->x86_capability[2] = cpuid_edx(0x80860001);
702 }
703 }
705 printk(KERN_DEBUG "CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
706 c->x86_capability[0],
707 c->x86_capability[1],
708 c->x86_capability[2],
709 c->x86_vendor);
711 /*
712 * Vendor-specific initialization. In this section we
713 * canonicalize the feature flags, meaning if there are
714 * features a certain CPU supports which CPUID doesn't
715 * tell us, CPUID claiming incorrect flags, or other bugs,
716 * we handle them here.
717 *
718 * At the end of this section, c->x86_capability better
719 * indicate the features this CPU genuinely supports!
720 */
721 switch ( c->x86_vendor ) {
722 case X86_VENDOR_AMD:
723 init_amd(c);
724 break;
726 case X86_VENDOR_INTEL:
727 init_intel(c);
728 break;
730 default:
731 panic("Unsupported CPU vendor\n");
732 }
734 printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n",
735 c->x86_capability[0],
736 c->x86_capability[1],
737 c->x86_capability[2],
738 c->x86_capability[3]);
741 /* If the model name is still unset, do table lookup. */
742 if ( !c->x86_model_id[0] ) {
743 char *p;
744 p = table_lookup_model(c);
745 if ( p )
746 strcpy(c->x86_model_id, p);
747 else
748 /* Last resort... */
749 sprintf(c->x86_model_id, "%02x/%02x",
750 c->x86_vendor, c->x86_model);
751 }
753 /* Now the feature flags better reflect actual CPU features! */
755 printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
756 c->x86_capability[0],
757 c->x86_capability[1],
758 c->x86_capability[2],
759 c->x86_capability[3]);
761 /*
762 * On SMP, boot_cpu_data holds the common feature set between
763 * all CPUs; so make sure that we indicate which features are
764 * common between the CPUs. The first time this routine gets
765 * executed, c == &boot_cpu_data.
766 */
767 if ( c != &boot_cpu_data ) {
768 /* AND the already accumulated flags with these */
769 for ( i = 0 ; i < NCAPINTS ; i++ )
770 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
771 }
773 printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n",
774 boot_cpu_data.x86_capability[0],
775 boot_cpu_data.x86_capability[1],
776 boot_cpu_data.x86_capability[2],
777 boot_cpu_data.x86_capability[3]);
778 }
781 /* These need to match <asm/processor.h> */
782 static char *cpu_vendor_names[] __initdata = {
783 "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
786 void __init print_cpu_info(struct cpuinfo_x86 *c)
787 {
788 char *vendor = NULL;
790 if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
791 vendor = cpu_vendor_names[c->x86_vendor];
792 else if (c->cpuid_level >= 0)
793 vendor = c->x86_vendor_id;
795 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
796 printk("%s ", vendor);
798 if (!c->x86_model_id[0])
799 printk("%d86", c->x86);
800 else
801 printk("%s", c->x86_model_id);
803 if (c->x86_mask || c->cpuid_level >= 0)
804 printk(" stepping %02x\n", c->x86_mask);
805 else
806 printk("\n");
807 }
809 /*
810 * Get CPU information for use by the procfs.
811 */
812 static int show_cpuinfo(struct seq_file *m, void *v)
813 {
814 /*
815 * These flag bits must match the definitions in <asm/cpufeature.h>.
816 * NULL means this bit is undefined or reserved; either way it doesn't
817 * have meaning as far as Linux is concerned. Note that it's important
818 * to realize there is a difference between this table and CPUID -- if
819 * applications want to get the raw CPUID data, they should access
820 * /dev/cpu/<cpu_nr>/cpuid instead.
821 */
822 static char *x86_cap_flags[] = {
823 /* Intel-defined */
824 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
825 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
826 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
827 "fxsr", "sse", "sse2", "ss", NULL, "tm", "ia64", NULL,
829 /* AMD-defined */
830 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
831 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL, NULL, NULL, "mmxext", NULL,
833 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
835 /* Transmeta-defined */
836 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
837 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
838 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
839 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
841 /* Other (Linux-defined) */
842 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
843 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
844 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
845 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
846 };
847 struct cpuinfo_x86 *c = v;
848 int i, n = c - cpu_data;
849 int fpu_exception;
851 #ifdef CONFIG_SMP
852 if (!(cpu_online_map & (1<<n)))
853 return 0;
854 #endif
855 seq_printf(m, "processor\t: %d\n"
856 "vendor_id\t: %s\n"
857 "cpu family\t: %d\n"
858 "model\t\t: %d\n"
859 "model name\t: %s\n",
860 n,
861 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
862 c->x86,
863 c->x86_model,
864 c->x86_model_id[0] ? c->x86_model_id : "unknown");
866 if (c->x86_mask || c->cpuid_level >= 0)
867 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
868 else
869 seq_printf(m, "stepping\t: unknown\n");
871 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
872 seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
873 cpu_khz / 1000, (cpu_khz % 1000));
874 }
876 /* Cache size */
877 if (c->x86_cache_size >= 0)
878 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
880 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
881 fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
882 seq_printf(m, "fdiv_bug\t: %s\n"
883 "hlt_bug\t\t: %s\n"
884 "f00f_bug\t: %s\n"
885 "coma_bug\t: %s\n"
886 "fpu\t\t: %s\n"
887 "fpu_exception\t: %s\n"
888 "cpuid level\t: %d\n"
889 "wp\t\t: %s\n"
890 "flags\t\t:",
891 c->fdiv_bug ? "yes" : "no",
892 c->hlt_works_ok ? "no" : "yes",
893 c->f00f_bug ? "yes" : "no",
894 c->coma_bug ? "yes" : "no",
895 c->hard_math ? "yes" : "no",
896 fpu_exception ? "yes" : "no",
897 c->cpuid_level,
898 c->wp_works_ok ? "yes" : "no");
900 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
901 if ( test_bit(i, &c->x86_capability) &&
902 x86_cap_flags[i] != NULL )
903 seq_printf(m, " %s", x86_cap_flags[i]);
905 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
906 c->loops_per_jiffy/(500000/HZ),
907 (c->loops_per_jiffy/(5000/HZ)) % 100);
908 return 0;
909 }
911 static void *c_start(struct seq_file *m, loff_t *pos)
912 {
913 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
914 }
915 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
916 {
917 ++*pos;
918 return c_start(m, pos);
919 }
920 static void c_stop(struct seq_file *m, void *v)
921 {
922 }
923 struct seq_operations cpuinfo_op = {
924 start: c_start,
925 next: c_next,
926 stop: c_stop,
927 show: show_cpuinfo,
928 };
930 unsigned long cpu_initialized __initdata = 0;
932 /*
933 * cpu_init() initializes state that is per-CPU. Some data is already
934 * initialized (naturally) in the bootstrap process, such as the GDT
935 * and IDT. We reload them nevertheless, this function acts as a
936 * 'CPU state barrier', nothing should get across.
937 */
938 void __init cpu_init (void)
939 {
940 int nr = smp_processor_id();
942 if (test_and_set_bit(nr, &cpu_initialized)) {
943 printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
944 for (;;) __sti();
945 }
946 printk(KERN_INFO "Initializing CPU#%d\n", nr);
948 /*
949 * set up and load the per-CPU TSS and LDT
950 */
951 atomic_inc(&init_mm.mm_count);
952 current->active_mm = &init_mm;
953 if(current->mm)
954 BUG();
955 enter_lazy_tlb(&init_mm, current, nr);
957 HYPERVISOR_stack_and_ldt_switch(__KERNEL_DS, current->thread.esp0, 0);
959 /* Force FPU initialization. */
960 current->flags &= ~PF_USEDFPU;
961 current->used_math = 0;
962 stts();
963 }
966 /******************************************************************************
967 * Time-to-die callback handling.
968 */
970 static void time_to_die(int irq, void *unused, struct pt_regs *regs)
971 {
972 extern void ctrl_alt_del(void);
973 ctrl_alt_del();
974 }
976 static int __init setup_death_event(void)
977 {
978 (void)request_irq(_EVENT_DIE, time_to_die, 0, "die", NULL);
979 return 0;
980 }
982 __initcall(setup_death_event);