ia64/xen-unstable

view xen/arch/x86/cpu/common.c @ 15790:86a02b7148fa

x86: Kill Rise iDragon support.

Don't carry dead code needlessly: this is a family 5 CPU, which Xen
doesn't support. Perhaps, other CPUs' files could use some cleanup in
that respect, too, but there it would increase the delta to the Linux
origin of these files, while here the entire file can go away.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Wed Aug 29 11:34:01 2007 +0100 (2007-08-29)
parents e704430b5b32
children 5ccf8bbf8628
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/string.h>
4 #include <xen/delay.h>
5 #include <xen/smp.h>
6 #include <asm/current.h>
7 #include <asm/processor.h>
8 #include <asm/i387.h>
9 #include <asm/msr.h>
10 #include <asm/io.h>
11 #include <asm/mpspec.h>
12 #include <asm/apic.h>
13 #include <mach_apic.h>
15 #include "cpu.h"
17 #define tsc_disable 0
18 #define disable_pse 0
20 static int cachesize_override __devinitdata = -1;
21 static int disable_x86_fxsr __devinitdata = 0;
22 static int disable_x86_serial_nr __devinitdata = 0;
24 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
26 static void default_init(struct cpuinfo_x86 * c)
27 {
28 /* Not much we can do here... */
29 /* Check if at least it has cpuid */
30 if (c->cpuid_level == -1) {
31 /* No cpuid. It must be an ancient CPU */
32 if (c->x86 == 4)
33 safe_strcpy(c->x86_model_id, "486");
34 else if (c->x86 == 3)
35 safe_strcpy(c->x86_model_id, "386");
36 }
37 }
39 static struct cpu_dev default_cpu = {
40 .c_init = default_init,
41 };
42 static struct cpu_dev * this_cpu = &default_cpu;
44 integer_param("cachesize", cachesize_override);
46 int __devinit get_model_name(struct cpuinfo_x86 *c)
47 {
48 unsigned int *v;
49 char *p, *q;
51 if (cpuid_eax(0x80000000) < 0x80000004)
52 return 0;
54 v = (unsigned int *) c->x86_model_id;
55 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
56 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
57 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
58 c->x86_model_id[48] = 0;
60 /* Intel chips right-justify this string for some dumb reason;
61 undo that brain damage */
62 p = q = &c->x86_model_id[0];
63 while ( *p == ' ' )
64 p++;
65 if ( p != q ) {
66 while ( *p )
67 *q++ = *p++;
68 while ( q <= &c->x86_model_id[48] )
69 *q++ = '\0'; /* Zero-pad the rest */
70 }
72 return 1;
73 }
76 void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
77 {
78 unsigned int n, dummy, ecx, edx, l2size;
80 n = cpuid_eax(0x80000000);
82 if (n >= 0x80000005) {
83 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
84 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
85 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
86 c->x86_cache_size=(ecx>>24)+(edx>>24);
87 }
89 if (n < 0x80000006) /* Some chips just has a large L1. */
90 return;
92 ecx = cpuid_ecx(0x80000006);
93 l2size = ecx >> 16;
95 /* do processor-specific cache resizing */
96 if (this_cpu->c_size_cache)
97 l2size = this_cpu->c_size_cache(c,l2size);
99 /* Allow user to override all this if necessary. */
100 if (cachesize_override != -1)
101 l2size = cachesize_override;
103 if ( l2size == 0 )
104 return; /* Again, no L2 cache is possible */
106 c->x86_cache_size = l2size;
108 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
109 l2size, ecx & 0xFF);
110 }
112 /* Naming convention should be: <Name> [(<Codename>)] */
113 /* This table only is used unless init_<vendor>() below doesn't set it; */
114 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
116 /* Look up CPU names by table lookup. */
117 static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
118 {
119 struct cpu_model_info *info;
121 if ( c->x86_model >= 16 )
122 return NULL; /* Range check */
124 if (!this_cpu)
125 return NULL;
127 info = this_cpu->c_models;
129 while (info && info->family) {
130 if (info->family == c->x86)
131 return info->model_names[c->x86_model];
132 info++;
133 }
134 return NULL; /* Not found */
135 }
138 static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
139 {
140 char *v = c->x86_vendor_id;
141 int i;
143 for (i = 0; i < X86_VENDOR_NUM; i++) {
144 if (cpu_devs[i]) {
145 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
146 (cpu_devs[i]->c_ident[1] &&
147 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
148 c->x86_vendor = i;
149 if (!early)
150 this_cpu = cpu_devs[i];
151 break;
152 }
153 }
154 }
155 }
158 boolean_param("nofxsr", disable_x86_fxsr);
161 /* Standard macro to see if a specific flag is changeable */
162 static inline int flag_is_changeable_p(unsigned long flag)
163 {
164 unsigned long f1, f2;
166 asm("pushf\n\t"
167 "pushf\n\t"
168 "pop %0\n\t"
169 "mov %0,%1\n\t"
170 "xor %2,%0\n\t"
171 "push %0\n\t"
172 "popf\n\t"
173 "pushf\n\t"
174 "pop %0\n\t"
175 "popf\n\t"
176 : "=&r" (f1), "=&r" (f2)
177 : "ir" (flag));
179 return ((f1^f2) & flag) != 0;
180 }
183 /* Probe for the CPUID instruction */
184 static int __devinit have_cpuid_p(void)
185 {
186 return flag_is_changeable_p(X86_EFLAGS_ID);
187 }
189 /* Do minimum CPU detection early.
190 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
191 The others are not touched to avoid unwanted side effects.
193 WARNING: this function is only called on the BP. Don't add code here
194 that is supposed to run on all CPUs. */
195 static void __init early_cpu_detect(void)
196 {
197 struct cpuinfo_x86 *c = &boot_cpu_data;
199 c->x86_cache_alignment = 32;
201 if (!have_cpuid_p())
202 return;
204 /* Get vendor name */
205 cpuid(0x00000000, &c->cpuid_level,
206 (int *)&c->x86_vendor_id[0],
207 (int *)&c->x86_vendor_id[8],
208 (int *)&c->x86_vendor_id[4]);
210 get_cpu_vendor(c, 1);
212 c->x86 = 4;
213 if (c->cpuid_level >= 0x00000001) {
214 u32 junk, tfms, cap0, misc;
215 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
216 c->x86 = (tfms >> 8) & 15;
217 c->x86_model = (tfms >> 4) & 15;
218 if (c->x86 == 0xf)
219 c->x86 += (tfms >> 20) & 0xff;
220 if (c->x86 >= 0x6)
221 c->x86_model += ((tfms >> 16) & 0xF) << 4;
222 c->x86_mask = tfms & 15;
223 if (cap0 & (1<<19))
224 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
225 c->x86_capability[0] = cap0; /* Added for Xen bootstrap */
226 }
227 }
229 void __devinit generic_identify(struct cpuinfo_x86 * c)
230 {
231 u32 tfms, xlvl;
232 int junk;
234 if (have_cpuid_p()) {
235 /* Get vendor name */
236 cpuid(0x00000000, &c->cpuid_level,
237 (int *)&c->x86_vendor_id[0],
238 (int *)&c->x86_vendor_id[8],
239 (int *)&c->x86_vendor_id[4]);
241 get_cpu_vendor(c, 0);
242 /* Initialize the standard set of capabilities */
243 /* Note that the vendor-specific code below might override */
245 /* Intel-defined flags: level 0x00000001 */
246 if ( c->cpuid_level >= 0x00000001 ) {
247 u32 capability, excap;
248 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
249 c->x86_capability[0] = capability;
250 c->x86_capability[4] = excap;
251 c->x86 = (tfms >> 8) & 15;
252 c->x86_model = (tfms >> 4) & 15;
253 if (c->x86 == 0xf) {
254 c->x86 += (tfms >> 20) & 0xff;
255 c->x86_model += ((tfms >> 16) & 0xF) << 4;
256 }
257 c->x86_mask = tfms & 15;
258 } else {
259 /* Have CPUID level 0 only - unheard of */
260 c->x86 = 4;
261 }
263 /* AMD-defined flags: level 0x80000001 */
264 xlvl = cpuid_eax(0x80000000);
265 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
266 if ( xlvl >= 0x80000001 ) {
267 c->x86_capability[1] = cpuid_edx(0x80000001);
268 c->x86_capability[6] = cpuid_ecx(0x80000001);
269 }
270 if ( xlvl >= 0x80000004 )
271 get_model_name(c); /* Default name */
272 }
273 }
275 early_intel_workaround(c);
277 #ifdef CONFIG_X86_HT
278 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
279 #endif
280 }
282 static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
283 {
284 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
285 /* Disable processor serial number */
286 unsigned long lo,hi;
287 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
288 lo |= 0x200000;
289 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
290 printk(KERN_NOTICE "CPU serial number disabled.\n");
291 clear_bit(X86_FEATURE_PN, c->x86_capability);
293 /* Disabling the serial number may affect the cpuid level */
294 c->cpuid_level = cpuid_eax(0);
295 }
296 }
298 boolean_param("noserialnumber", disable_x86_serial_nr);
302 /*
303 * This does the hard work of actually picking apart the CPU stuff...
304 */
305 void __devinit identify_cpu(struct cpuinfo_x86 *c)
306 {
307 int i;
309 c->x86_cache_size = -1;
310 c->x86_vendor = X86_VENDOR_UNKNOWN;
311 c->cpuid_level = -1; /* CPUID not detected */
312 c->x86_model = c->x86_mask = 0; /* So far unknown... */
313 c->x86_vendor_id[0] = '\0'; /* Unset */
314 c->x86_model_id[0] = '\0'; /* Unset */
315 c->x86_max_cores = 1;
316 memset(&c->x86_capability, 0, sizeof c->x86_capability);
318 if (!have_cpuid_p()) {
319 /* First of all, decide if this is a 486 or higher */
320 /* It's a 486 if we can modify the AC flag */
321 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
322 c->x86 = 4;
323 else
324 c->x86 = 3;
325 }
327 generic_identify(c);
329 #ifdef NOISY_CAPS
330 printk(KERN_DEBUG "CPU: After generic identify, caps:");
331 for (i = 0; i < NCAPINTS; i++)
332 printk(" %08x", c->x86_capability[i]);
333 printk("\n");
334 #endif
336 if (this_cpu->c_identify) {
337 this_cpu->c_identify(c);
339 #ifdef NOISY_CAPS
340 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
341 for (i = 0; i < NCAPINTS; i++)
342 printk(" %08x", c->x86_capability[i]);
343 printk("\n");
344 #endif
345 }
347 /*
348 * Vendor-specific initialization. In this section we
349 * canonicalize the feature flags, meaning if there are
350 * features a certain CPU supports which CPUID doesn't
351 * tell us, CPUID claiming incorrect flags, or other bugs,
352 * we handle them here.
353 *
354 * At the end of this section, c->x86_capability better
355 * indicate the features this CPU genuinely supports!
356 */
357 if (this_cpu->c_init)
358 this_cpu->c_init(c);
360 /* Disable the PN if appropriate */
361 squash_the_stupid_serial_number(c);
363 /*
364 * The vendor-specific functions might have changed features. Now
365 * we do "generic changes."
366 */
368 /* TSC disabled? */
369 if ( tsc_disable )
370 clear_bit(X86_FEATURE_TSC, c->x86_capability);
372 /* FXSR disabled? */
373 if (disable_x86_fxsr) {
374 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
375 clear_bit(X86_FEATURE_XMM, c->x86_capability);
376 }
378 if (disable_pse)
379 clear_bit(X86_FEATURE_PSE, c->x86_capability);
381 /* If the model name is still unset, do table lookup. */
382 if ( !c->x86_model_id[0] ) {
383 char *p;
384 p = table_lookup_model(c);
385 if ( p )
386 safe_strcpy(c->x86_model_id, p);
387 else
388 /* Last resort... */
389 snprintf(c->x86_model_id, sizeof(c->x86_model_id),
390 "%02x/%02x", c->x86_vendor, c->x86_model);
391 }
393 /* Now the feature flags better reflect actual CPU features! */
395 #ifdef NOISY_CAPS
396 printk(KERN_DEBUG "CPU: After all inits, caps:");
397 for (i = 0; i < NCAPINTS; i++)
398 printk(" %08x", c->x86_capability[i]);
399 printk("\n");
400 #endif
402 /*
403 * On SMP, boot_cpu_data holds the common feature set between
404 * all CPUs; so make sure that we indicate which features are
405 * common between the CPUs. The first time this routine gets
406 * executed, c == &boot_cpu_data.
407 */
408 if ( c != &boot_cpu_data ) {
409 /* AND the already accumulated flags with these */
410 for ( i = 0 ; i < NCAPINTS ; i++ )
411 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
412 }
414 /* Init Machine Check Exception if available. */
415 mcheck_init(c);
417 #if 0
418 if (c == &boot_cpu_data)
419 sysenter_setup();
420 enable_sep_cpu();
421 #endif
423 if (c == &boot_cpu_data)
424 mtrr_bp_init();
425 else
426 mtrr_ap_init();
427 }
429 #ifdef CONFIG_X86_HT
430 /* cpuid returns the value latched in the HW at reset, not the APIC ID
431 * register's value. For any box whose BIOS changes APIC IDs, like
432 * clustered APIC systems, we must use hard_smp_processor_id.
433 *
434 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
435 */
436 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
437 {
438 return hard_smp_processor_id() >> index_msb;
439 }
441 void __devinit detect_ht(struct cpuinfo_x86 *c)
442 {
443 u32 eax, ebx, ecx, edx;
444 int index_msb, core_bits;
445 int cpu = smp_processor_id();
447 cpuid(1, &eax, &ebx, &ecx, &edx);
449 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
451 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
452 return;
454 smp_num_siblings = (ebx & 0xff0000) >> 16;
456 if (smp_num_siblings == 1) {
457 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
458 } else if (smp_num_siblings > 1 ) {
460 if (smp_num_siblings > NR_CPUS) {
461 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
462 smp_num_siblings = 1;
463 return;
464 }
466 index_msb = get_count_order(smp_num_siblings);
467 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
469 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
470 phys_proc_id[cpu]);
472 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
474 index_msb = get_count_order(smp_num_siblings) ;
476 core_bits = get_count_order(c->x86_max_cores);
478 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
479 ((1 << core_bits) - 1);
481 if (c->x86_max_cores > 1)
482 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
483 cpu_core_id[cpu]);
484 }
485 }
486 #endif
488 void __devinit print_cpu_info(struct cpuinfo_x86 *c)
489 {
490 char *vendor = NULL;
492 if (c->x86_vendor < X86_VENDOR_NUM)
493 vendor = this_cpu->c_vendor;
494 else if (c->cpuid_level >= 0)
495 vendor = c->x86_vendor_id;
497 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
498 printk("%s ", vendor);
500 if (!c->x86_model_id[0])
501 printk("%d86", c->x86);
502 else
503 printk("%s", c->x86_model_id);
505 if (c->x86_mask || c->cpuid_level >= 0)
506 printk(" stepping %02x\n", c->x86_mask);
507 else
508 printk("\n");
509 }
511 cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
513 /* This is hacky. :)
514 * We're emulating future behavior.
515 * In the future, the cpu-specific init functions will be called implicitly
516 * via the magic of initcalls.
517 * They will insert themselves into the cpu_devs structure.
518 * Then, when cpu_init() is called, we can just iterate over that array.
519 */
521 extern int intel_cpu_init(void);
522 extern int cyrix_init_cpu(void);
523 extern int nsc_init_cpu(void);
524 extern int amd_init_cpu(void);
525 extern int centaur_init_cpu(void);
526 extern int transmeta_init_cpu(void);
528 void __init early_cpu_init(void)
529 {
530 intel_cpu_init();
531 amd_init_cpu();
532 #ifdef CONFIG_X86_32
533 cyrix_init_cpu();
534 nsc_init_cpu();
535 centaur_init_cpu();
536 transmeta_init_cpu();
537 #endif
538 early_cpu_detect();
539 }
540 /*
541 * cpu_init() initializes state that is per-CPU. Some data is already
542 * initialized (naturally) in the bootstrap process, such as the GDT
543 * and IDT. We reload them nevertheless, this function acts as a
544 * 'CPU state barrier', nothing should get across.
545 */
546 void __devinit cpu_init(void)
547 {
548 int cpu = smp_processor_id();
549 struct tss_struct *t = &init_tss[cpu];
550 char gdt_load[10];
552 if (cpu_test_and_set(cpu, cpu_initialized)) {
553 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
554 for (;;) local_irq_enable();
555 }
556 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
558 *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
559 *(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(current);
560 __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
562 /* No nested task. */
563 __asm__("pushf ; andw $0xbfff,(%"__OP"sp) ; popf");
565 /* Ensure FPU gets initialised for each domain. */
566 stts();
568 /* Set up and load the per-CPU TSS and LDT. */
569 t->bitmap = IOBMP_INVALID_OFFSET;
570 #if defined(CONFIG_X86_32)
571 t->ss0 = __HYPERVISOR_DS;
572 t->esp0 = get_stack_bottom();
573 if ( supervisor_mode_kernel && cpu_has_sep )
574 wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0);
575 #elif defined(CONFIG_X86_64)
576 /* Bottom-of-stack must be 16-byte aligned! */
577 BUG_ON((get_stack_bottom() & 15) != 0);
578 t->rsp0 = get_stack_bottom();
579 #endif
580 set_tss_desc(cpu,t);
581 load_TR(cpu);
582 __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
584 /* Clear all 6 debug registers: */
585 #define CD(register) __asm__("mov %0,%%db" #register ::"r"(0UL) );
586 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
587 #undef CD
589 /* Install correct page table. */
590 write_ptbase(current);
591 }
593 #ifdef CONFIG_HOTPLUG_CPU
594 void __cpuinit cpu_uninit(void)
595 {
596 int cpu = raw_smp_processor_id();
597 cpu_clear(cpu, cpu_initialized);
598 }
599 #endif