ia64/xen-unstable

view xen/arch/x86/cpu/common.c @ 6552:a9873d384da4

Merge.
author adsharma@los-vmm.sc.intel.com
date Thu Aug 25 12:24:48 2005 -0700 (2005-08-25)
parents 112d44270733 fa0754a9f64f
children dfaf788ab18c
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/string.h>
4 #include <xen/delay.h>
5 #include <xen/smp.h>
6 #include <asm/current.h>
7 #include <asm/processor.h>
8 #include <asm/i387.h>
9 #include <asm/msr.h>
10 #include <asm/io.h>
11 #include <asm/mpspec.h>
12 #include <asm/apic.h>
13 #include <mach_apic.h>
15 #include "cpu.h"
17 #define tsc_disable 0
18 #define disable_pse 0
20 static int cachesize_override __initdata = -1;
21 static int disable_x86_fxsr __initdata = 0;
22 static int disable_x86_serial_nr __initdata = 1;
24 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
26 extern void mcheck_init(struct cpuinfo_x86 *c);
28 static void default_init(struct cpuinfo_x86 * c)
29 {
30 /* Not much we can do here... */
31 /* Check if at least it has cpuid */
32 if (c->cpuid_level == -1) {
33 /* No cpuid. It must be an ancient CPU */
34 if (c->x86 == 4)
35 strcpy(c->x86_model_id, "486");
36 else if (c->x86 == 3)
37 strcpy(c->x86_model_id, "386");
38 }
39 }
41 static struct cpu_dev default_cpu = {
42 .c_init = default_init,
43 };
44 static struct cpu_dev * this_cpu = &default_cpu;
46 int __init get_model_name(struct cpuinfo_x86 *c)
47 {
48 unsigned int *v;
49 char *p, *q;
51 if (cpuid_eax(0x80000000) < 0x80000004)
52 return 0;
54 v = (unsigned int *) c->x86_model_id;
55 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
56 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
57 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
58 c->x86_model_id[48] = 0;
60 /* Intel chips right-justify this string for some dumb reason;
61 undo that brain damage */
62 p = q = &c->x86_model_id[0];
63 while ( *p == ' ' )
64 p++;
65 if ( p != q ) {
66 while ( *p )
67 *q++ = *p++;
68 while ( q <= &c->x86_model_id[48] )
69 *q++ = '\0'; /* Zero-pad the rest */
70 }
72 return 1;
73 }
76 void __init display_cacheinfo(struct cpuinfo_x86 *c)
77 {
78 unsigned int n, dummy, ecx, edx, l2size;
80 n = cpuid_eax(0x80000000);
82 if (n >= 0x80000005) {
83 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
84 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
85 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
86 c->x86_cache_size=(ecx>>24)+(edx>>24);
87 }
89 if (n < 0x80000006) /* Some chips just has a large L1. */
90 return;
92 ecx = cpuid_ecx(0x80000006);
93 l2size = ecx >> 16;
95 /* do processor-specific cache resizing */
96 if (this_cpu->c_size_cache)
97 l2size = this_cpu->c_size_cache(c,l2size);
99 /* Allow user to override all this if necessary. */
100 if (cachesize_override != -1)
101 l2size = cachesize_override;
103 if ( l2size == 0 )
104 return; /* Again, no L2 cache is possible */
106 c->x86_cache_size = l2size;
108 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
109 l2size, ecx & 0xFF);
110 }
112 /* Naming convention should be: <Name> [(<Codename>)] */
113 /* This table only is used unless init_<vendor>() below doesn't set it; */
114 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
116 /* Look up CPU names by table lookup. */
117 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
118 {
119 struct cpu_model_info *info;
121 if ( c->x86_model >= 16 )
122 return NULL; /* Range check */
124 if (!this_cpu)
125 return NULL;
127 info = this_cpu->c_models;
129 while (info && info->family) {
130 if (info->family == c->x86)
131 return info->model_names[c->x86_model];
132 info++;
133 }
134 return NULL; /* Not found */
135 }
138 void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
139 {
140 char *v = c->x86_vendor_id;
141 int i;
143 for (i = 0; i < X86_VENDOR_NUM; i++) {
144 if (cpu_devs[i]) {
145 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
146 (cpu_devs[i]->c_ident[1] &&
147 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
148 c->x86_vendor = i;
149 if (!early)
150 this_cpu = cpu_devs[i];
151 break;
152 }
153 }
154 }
155 }
158 static int __init x86_fxsr_setup(char * s)
159 {
160 disable_x86_fxsr = 1;
161 return 1;
162 }
163 __setup("nofxsr", x86_fxsr_setup);
166 /* Standard macro to see if a specific flag is changeable */
167 static inline int flag_is_changeable_p(unsigned long flag)
168 {
169 unsigned long f1, f2;
171 asm("pushf\n\t"
172 "pushf\n\t"
173 "pop %0\n\t"
174 "mov %0,%1\n\t"
175 "xor %2,%0\n\t"
176 "push %0\n\t"
177 "popf\n\t"
178 "pushf\n\t"
179 "pop %0\n\t"
180 "popf\n\t"
181 : "=&r" (f1), "=&r" (f2)
182 : "ir" (flag));
184 return ((f1^f2) & flag) != 0;
185 }
188 /* Probe for the CPUID instruction */
189 static int __init have_cpuid_p(void)
190 {
191 return flag_is_changeable_p(X86_EFLAGS_ID);
192 }
194 /* Do minimum CPU detection early.
195 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
196 The others are not touched to avoid unwanted side effects. */
197 static void __init early_cpu_detect(void)
198 {
199 struct cpuinfo_x86 *c = &boot_cpu_data;
201 c->x86_cache_alignment = 32;
203 if (!have_cpuid_p())
204 return;
206 /* Get vendor name */
207 cpuid(0x00000000, &c->cpuid_level,
208 (int *)&c->x86_vendor_id[0],
209 (int *)&c->x86_vendor_id[8],
210 (int *)&c->x86_vendor_id[4]);
212 get_cpu_vendor(c, 1);
214 c->x86 = 4;
215 if (c->cpuid_level >= 0x00000001) {
216 u32 junk, tfms, cap0, misc;
217 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
218 c->x86 = (tfms >> 8) & 15;
219 c->x86_model = (tfms >> 4) & 15;
220 if (c->x86 == 0xf) {
221 c->x86 += (tfms >> 20) & 0xff;
222 c->x86_model += ((tfms >> 16) & 0xF) << 4;
223 }
224 c->x86_mask = tfms & 15;
225 if (cap0 & (1<<19))
226 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
227 c->x86_capability[0] = cap0; /* Added for Xen bootstrap */
228 }
230 early_intel_workaround(c);
232 #ifdef CONFIG_X86_HT
233 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
234 #endif
235 }
237 void __init generic_identify(struct cpuinfo_x86 * c)
238 {
239 u32 tfms, xlvl;
240 int junk;
242 if (have_cpuid_p()) {
243 /* Get vendor name */
244 cpuid(0x00000000, &c->cpuid_level,
245 (int *)&c->x86_vendor_id[0],
246 (int *)&c->x86_vendor_id[8],
247 (int *)&c->x86_vendor_id[4]);
249 get_cpu_vendor(c, 0);
250 /* Initialize the standard set of capabilities */
251 /* Note that the vendor-specific code below might override */
253 /* Intel-defined flags: level 0x00000001 */
254 if ( c->cpuid_level >= 0x00000001 ) {
255 u32 capability, excap;
256 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
257 c->x86_capability[0] = capability;
258 c->x86_capability[4] = excap;
259 c->x86 = (tfms >> 8) & 15;
260 c->x86_model = (tfms >> 4) & 15;
261 if (c->x86 == 0xf) {
262 c->x86 += (tfms >> 20) & 0xff;
263 c->x86_model += ((tfms >> 16) & 0xF) << 4;
264 }
265 c->x86_mask = tfms & 15;
266 } else {
267 /* Have CPUID level 0 only - unheard of */
268 c->x86 = 4;
269 }
271 /* AMD-defined flags: level 0x80000001 */
272 xlvl = cpuid_eax(0x80000000);
273 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
274 if ( xlvl >= 0x80000001 ) {
275 c->x86_capability[1] = cpuid_edx(0x80000001);
276 c->x86_capability[6] = cpuid_ecx(0x80000001);
277 }
278 if ( xlvl >= 0x80000004 )
279 get_model_name(c); /* Default name */
280 }
281 }
282 }
284 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
285 {
286 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
287 /* Disable processor serial number */
288 unsigned long lo,hi;
289 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
290 lo |= 0x200000;
291 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
292 printk(KERN_NOTICE "CPU serial number disabled.\n");
293 clear_bit(X86_FEATURE_PN, c->x86_capability);
295 /* Disabling the serial number may affect the cpuid level */
296 c->cpuid_level = cpuid_eax(0);
297 }
298 }
300 static int __init x86_serial_nr_setup(char *s)
301 {
302 disable_x86_serial_nr = 0;
303 return 1;
304 }
305 __setup("serialnumber", x86_serial_nr_setup);
309 /*
310 * This does the hard work of actually picking apart the CPU stuff...
311 */
312 void __init identify_cpu(struct cpuinfo_x86 *c)
313 {
314 int i;
316 c->x86_cache_size = -1;
317 c->x86_vendor = X86_VENDOR_UNKNOWN;
318 c->cpuid_level = -1; /* CPUID not detected */
319 c->x86_model = c->x86_mask = 0; /* So far unknown... */
320 c->x86_vendor_id[0] = '\0'; /* Unset */
321 c->x86_model_id[0] = '\0'; /* Unset */
322 c->x86_num_cores = 1;
323 memset(&c->x86_capability, 0, sizeof c->x86_capability);
325 if (!have_cpuid_p()) {
326 /* First of all, decide if this is a 486 or higher */
327 /* It's a 486 if we can modify the AC flag */
328 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
329 c->x86 = 4;
330 else
331 c->x86 = 3;
332 }
334 generic_identify(c);
336 #ifdef NOISY_CAPS
337 printk(KERN_DEBUG "CPU: After generic identify, caps:");
338 for (i = 0; i < NCAPINTS; i++)
339 printk(" %08x", c->x86_capability[i]);
340 printk("\n");
341 #endif
343 if (this_cpu->c_identify) {
344 this_cpu->c_identify(c);
345 #ifdef NOISY_CAPS
346 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
347 for (i = 0; i < NCAPINTS; i++)
348 printk(" %08x", c->x86_capability[i]);
349 printk("\n");
350 #endif
351 }
353 /*
354 * Vendor-specific initialization. In this section we
355 * canonicalize the feature flags, meaning if there are
356 * features a certain CPU supports which CPUID doesn't
357 * tell us, CPUID claiming incorrect flags, or other bugs,
358 * we handle them here.
359 *
360 * At the end of this section, c->x86_capability better
361 * indicate the features this CPU genuinely supports!
362 */
363 if (this_cpu->c_init)
364 this_cpu->c_init(c);
366 /* Disable the PN if appropriate */
367 squash_the_stupid_serial_number(c);
369 /*
370 * The vendor-specific functions might have changed features. Now
371 * we do "generic changes."
372 */
374 /* TSC disabled? */
375 if ( tsc_disable )
376 clear_bit(X86_FEATURE_TSC, c->x86_capability);
378 /* FXSR disabled? */
379 if (disable_x86_fxsr) {
380 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
381 clear_bit(X86_FEATURE_XMM, c->x86_capability);
382 }
384 if (disable_pse)
385 clear_bit(X86_FEATURE_PSE, c->x86_capability);
387 /* If the model name is still unset, do table lookup. */
388 if ( !c->x86_model_id[0] ) {
389 char *p;
390 p = table_lookup_model(c);
391 if ( p )
392 strcpy(c->x86_model_id, p);
393 else
394 /* Last resort... */
395 sprintf(c->x86_model_id, "%02x/%02x",
396 c->x86_vendor, c->x86_model);
397 }
399 /* Now the feature flags better reflect actual CPU features! */
400 #ifdef NOISY_CAPS
401 printk(KERN_DEBUG "CPU: After all inits, caps:");
402 for (i = 0; i < NCAPINTS; i++)
403 printk(" %08x", c->x86_capability[i]);
404 printk("\n");
405 #endif
406 /*
407 * On SMP, boot_cpu_data holds the common feature set between
408 * all CPUs; so make sure that we indicate which features are
409 * common between the CPUs. The first time this routine gets
410 * executed, c == &boot_cpu_data.
411 */
412 if ( c != &boot_cpu_data ) {
413 /* AND the already accumulated flags with these */
414 for ( i = 0 ; i < NCAPINTS ; i++ )
415 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
416 }
418 /* Init Machine Check Exception if available. */
419 #ifdef CONFIG_X86_MCE
420 mcheck_init(c);
421 #endif
422 }
424 #ifdef CONFIG_X86_HT
425 void __init detect_ht(struct cpuinfo_x86 *c)
426 {
427 u32 eax, ebx, ecx, edx;
428 int index_msb, tmp;
429 int cpu = smp_processor_id();
431 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
432 return;
434 cpuid(1, &eax, &ebx, &ecx, &edx);
435 smp_num_siblings = (ebx & 0xff0000) >> 16;
437 if (smp_num_siblings == 1) {
438 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
439 } else if (smp_num_siblings > 1 ) {
440 index_msb = 31;
442 if (smp_num_siblings > NR_CPUS) {
443 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
444 smp_num_siblings = 1;
445 return;
446 }
447 tmp = smp_num_siblings;
448 while ((tmp & 0x80000000 ) == 0) {
449 tmp <<=1 ;
450 index_msb--;
451 }
452 if (smp_num_siblings & (smp_num_siblings - 1))
453 index_msb++;
454 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
456 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
457 phys_proc_id[cpu]);
459 smp_num_siblings = smp_num_siblings / c->x86_num_cores;
461 tmp = smp_num_siblings;
462 index_msb = 31;
463 while ((tmp & 0x80000000) == 0) {
464 tmp <<=1 ;
465 index_msb--;
466 }
468 if (smp_num_siblings & (smp_num_siblings - 1))
469 index_msb++;
471 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
473 if (c->x86_num_cores > 1)
474 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
475 cpu_core_id[cpu]);
476 }
477 }
478 #endif
480 void __init print_cpu_info(struct cpuinfo_x86 *c)
481 {
482 char *vendor = NULL;
484 if (c->x86_vendor < X86_VENDOR_NUM)
485 vendor = this_cpu->c_vendor;
486 else if (c->cpuid_level >= 0)
487 vendor = c->x86_vendor_id;
489 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
490 printk("%s ", vendor);
492 if (!c->x86_model_id[0])
493 printk("%d86", c->x86);
494 else
495 printk("%s", c->x86_model_id);
497 if (c->x86_mask || c->cpuid_level >= 0)
498 printk(" stepping %02x\n", c->x86_mask);
499 else
500 printk("\n");
501 }
503 cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
505 /* This is hacky. :)
506 * We're emulating future behavior.
507 * In the future, the cpu-specific init functions will be called implicitly
508 * via the magic of initcalls.
509 * They will insert themselves into the cpu_devs structure.
510 * Then, when cpu_init() is called, we can just iterate over that array.
511 */
513 extern int intel_cpu_init(void);
514 extern int cyrix_init_cpu(void);
515 extern int nsc_init_cpu(void);
516 extern int amd_init_cpu(void);
517 extern int centaur_init_cpu(void);
518 extern int transmeta_init_cpu(void);
519 extern int rise_init_cpu(void);
521 void __init early_cpu_init(void)
522 {
523 intel_cpu_init();
524 amd_init_cpu();
525 #ifdef CONFIG_X86_32
526 cyrix_init_cpu();
527 nsc_init_cpu();
528 centaur_init_cpu();
529 transmeta_init_cpu();
530 rise_init_cpu();
531 #endif
532 early_cpu_detect();
533 }
534 /*
535 * cpu_init() initializes state that is per-CPU. Some data is already
536 * initialized (naturally) in the bootstrap process, such as the GDT
537 * and IDT. We reload them nevertheless, this function acts as a
538 * 'CPU state barrier', nothing should get across.
539 */
540 void __init cpu_init (void)
541 {
542 int cpu = smp_processor_id();
543 struct tss_struct *t = &init_tss[cpu];
544 char gdt_load[10];
546 if (cpu_test_and_set(cpu, cpu_initialized)) {
547 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
548 for (;;) local_irq_enable();
549 }
550 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
552 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
553 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
555 *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
556 *(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(current);
557 __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
559 /* No nested task. */
560 __asm__("pushf ; andw $0xbfff,(%"__OP"sp) ; popf");
562 /* Ensure FPU gets initialised for each domain. */
563 stts();
565 /* Set up and load the per-CPU TSS and LDT. */
566 t->bitmap = IOBMP_INVALID_OFFSET;
567 #if defined(CONFIG_X86_32)
568 t->ss0 = __HYPERVISOR_DS;
569 t->esp0 = get_stack_bottom();
570 #elif defined(CONFIG_X86_64)
571 /* Bottom-of-stack must be 16-byte aligned! */
572 BUG_ON((get_stack_bottom() & 15) != 0);
573 t->rsp0 = get_stack_bottom();
574 #endif
575 set_tss_desc(cpu,t);
576 load_TR(cpu);
577 __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
579 /* Clear all 6 debug registers: */
580 #define CD(register) __asm__("mov %0,%%db" #register ::"r"(0UL) );
581 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
582 #undef CD
584 /* Install correct page table. */
585 write_ptbase(current);
586 }