ia64/xen-unstable

view xen/arch/x86/cpu/amd.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 44fe7ad6fee8
children
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/bitops.h>
4 #include <xen/mm.h>
5 #include <xen/smp.h>
6 #include <xen/pci.h>
7 #include <asm/io.h>
8 #include <asm/msr.h>
9 #include <asm/processor.h>
10 #include <asm/hvm/support.h>
12 #include "cpu.h"
13 #include "amd.h"
15 void start_svm(struct cpuinfo_x86 *c);
17 /*
18 * Pre-canned values for overriding the CPUID features
19 * and extended features masks.
20 *
21 * Currently supported processors:
22 *
23 * "fam_0f_rev_c"
24 * "fam_0f_rev_d"
25 * "fam_0f_rev_e"
26 * "fam_0f_rev_f"
27 * "fam_0f_rev_g"
28 * "fam_10_rev_b"
29 * "fam_10_rev_c"
30 * "fam_11_rev_b"
31 */
32 static char opt_famrev[14];
33 string_param("cpuid_mask_cpu", opt_famrev);
35 /* Finer-grained CPUID feature control. */
36 static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
37 integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
38 integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
39 static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
40 integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
41 integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
43 static inline void wrmsr_amd(unsigned int index, unsigned int lo,
44 unsigned int hi)
45 {
46 asm volatile (
47 "wrmsr"
48 : /* No outputs */
49 : "c" (index), "a" (lo),
50 "d" (hi), "D" (0x9c5a203a)
51 );
52 }
54 /*
55 * Mask the features and extended features returned by CPUID. Parameters are
56 * set from the boot line via two methods:
57 *
58 * 1) Specific processor revision string
59 * 2) User-defined masks
60 *
61 * The processor revision string parameter has precedene.
62 */
63 static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
64 {
65 static unsigned int feat_ecx, feat_edx;
66 static unsigned int extfeat_ecx, extfeat_edx;
67 static enum { not_parsed, no_mask, set_mask } status;
69 if (status == no_mask)
70 return;
72 if (status == set_mask)
73 goto setmask;
75 ASSERT((status == not_parsed) && (smp_processor_id() == 0));
76 status = no_mask;
78 if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
79 opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) {
80 feat_ecx = opt_cpuid_mask_ecx ? : ~0U;
81 feat_edx = opt_cpuid_mask_edx ? : ~0U;
82 extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U;
83 extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U;
84 } else if (*opt_famrev == '\0') {
85 return;
86 } else if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
87 feat_ecx = AMD_FEATURES_K8_REV_C_ECX;
88 feat_edx = AMD_FEATURES_K8_REV_C_EDX;
89 extfeat_ecx = AMD_EXTFEATURES_K8_REV_C_ECX;
90 extfeat_edx = AMD_EXTFEATURES_K8_REV_C_EDX;
91 } else if (!strcmp(opt_famrev, "fam_0f_rev_d")) {
92 feat_ecx = AMD_FEATURES_K8_REV_D_ECX;
93 feat_edx = AMD_FEATURES_K8_REV_D_EDX;
94 extfeat_ecx = AMD_EXTFEATURES_K8_REV_D_ECX;
95 extfeat_edx = AMD_EXTFEATURES_K8_REV_D_EDX;
96 } else if (!strcmp(opt_famrev, "fam_0f_rev_e")) {
97 feat_ecx = AMD_FEATURES_K8_REV_E_ECX;
98 feat_edx = AMD_FEATURES_K8_REV_E_EDX;
99 extfeat_ecx = AMD_EXTFEATURES_K8_REV_E_ECX;
100 extfeat_edx = AMD_EXTFEATURES_K8_REV_E_EDX;
101 } else if (!strcmp(opt_famrev, "fam_0f_rev_f")) {
102 feat_ecx = AMD_FEATURES_K8_REV_F_ECX;
103 feat_edx = AMD_FEATURES_K8_REV_F_EDX;
104 extfeat_ecx = AMD_EXTFEATURES_K8_REV_F_ECX;
105 extfeat_edx = AMD_EXTFEATURES_K8_REV_F_EDX;
106 } else if (!strcmp(opt_famrev, "fam_0f_rev_g")) {
107 feat_ecx = AMD_FEATURES_K8_REV_G_ECX;
108 feat_edx = AMD_FEATURES_K8_REV_G_EDX;
109 extfeat_ecx = AMD_EXTFEATURES_K8_REV_G_ECX;
110 extfeat_edx = AMD_EXTFEATURES_K8_REV_G_EDX;
111 } else if (!strcmp(opt_famrev, "fam_10_rev_b")) {
112 feat_ecx = AMD_FEATURES_FAM10h_REV_B_ECX;
113 feat_edx = AMD_FEATURES_FAM10h_REV_B_EDX;
114 extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_B_ECX;
115 extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_B_EDX;
116 } else if (!strcmp(opt_famrev, "fam_10_rev_c")) {
117 feat_ecx = AMD_FEATURES_FAM10h_REV_C_ECX;
118 feat_edx = AMD_FEATURES_FAM10h_REV_C_EDX;
119 extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_C_ECX;
120 extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_C_EDX;
121 } else if (!strcmp(opt_famrev, "fam_11_rev_b")) {
122 feat_ecx = AMD_FEATURES_FAM11h_REV_B_ECX;
123 feat_edx = AMD_FEATURES_FAM11h_REV_B_EDX;
124 extfeat_ecx = AMD_EXTFEATURES_FAM11h_REV_B_ECX;
125 extfeat_edx = AMD_EXTFEATURES_FAM11h_REV_B_EDX;
126 } else {
127 printk("Invalid processor string: %s\n", opt_famrev);
128 printk("CPUID will not be masked\n");
129 return;
130 }
132 status = set_mask;
133 printk("Writing CPUID feature mask ECX:EDX -> %08Xh:%08Xh\n",
134 feat_ecx, feat_edx);
135 printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n",
136 extfeat_ecx, extfeat_edx);
138 setmask:
139 /* FIXME check if processor supports CPUID masking */
140 /* AMD processors prior to family 10h required a 32-bit password */
141 if (c->x86 >= 0x10) {
142 wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
143 wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
144 } else if (c->x86 == 0x0f) {
145 wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
146 wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
147 }
148 }
150 /*
151 * amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush
152 * filter on AMD 64-bit processors.
153 */
154 static int flush_filter_force;
155 static void flush_filter(char *s)
156 {
157 if (!strcmp(s, "off"))
158 flush_filter_force = -1;
159 if (!strcmp(s, "on"))
160 flush_filter_force = 1;
161 }
162 custom_param("amd_flush_filter", flush_filter);
164 #define num_physpages 0
166 /*
167 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
168 * misexecution of code under Linux. Owners of such processors should
169 * contact AMD for precise details and a CPU swap.
170 *
171 * See http://www.multimania.com/poulot/k6bug.html
172 * http://www.amd.com/K6/k6docs/revgd.html
173 *
174 * The following test is erm.. interesting. AMD neglected to up
175 * the chip setting when fixing the bug but they also tweaked some
176 * performance at the same time..
177 */
179 extern void vide(void);
180 __asm__(".text\n.align 4\nvide: ret");
182 /* Can this system suffer from TSC drift due to C1 clock ramping? */
183 static int c1_ramping_may_cause_clock_drift(struct cpuinfo_x86 *c)
184 {
185 if (c->x86 < 0xf) {
186 /*
187 * TSC drift doesn't exist on 7th Gen or less
188 * However, OS still needs to consider effects
189 * of P-state changes on TSC
190 */
191 return 0;
192 } else if (cpuid_edx(0x80000007) & (1<<8)) {
193 /*
194 * CPUID.AdvPowerMgmtInfo.TscInvariant
195 * EDX bit 8, 8000_0007
196 * Invariant TSC on 8th Gen or newer, use it
197 * (assume all cores have invariant TSC)
198 */
199 return 0;
200 }
201 return 1;
202 }
204 /*
205 * Disable C1-Clock ramping if enabled in PMM7.CpuLowPwrEnh on 8th-generation
206 * cores only. Assume BIOS has setup all Northbridges equivalently.
207 */
208 static void disable_c1_ramping(void)
209 {
210 u8 pmm7;
211 int node, nr_nodes;
213 /* Read the number of nodes from the first Northbridge. */
214 nr_nodes = ((pci_conf_read32(0, 0x18, 0x0, 0x60)>>4)&0x07)+1;
215 for (node = 0; node < nr_nodes; node++) {
216 /* PMM7: bus=0, dev=0x18+node, function=0x3, register=0x87. */
217 pmm7 = pci_conf_read8(0, 0x18+node, 0x3, 0x87);
218 /* Invalid read means we've updated every Northbridge. */
219 if (pmm7 == 0xFF)
220 break;
221 pmm7 &= 0xFC; /* clear pmm7[1:0] */
222 pci_conf_write8(0, 0x18+node, 0x3, 0x87, pmm7);
223 printk ("AMD: Disabling C1 Clock Ramping Node #%x\n", node);
224 }
225 }
227 int force_mwait __cpuinitdata;
229 static void disable_c1e(void *unused)
230 {
231 u32 lo, hi;
233 /*
234 * Disable C1E mode, as the APIC timer stops in that mode.
235 * The MSR does not exist in all FamilyF CPUs (only Rev F and above),
236 * but we safely catch the #GP in that case.
237 */
238 if ((rdmsr_safe(MSR_K8_ENABLE_C1E, lo, hi) == 0) &&
239 (lo & (3u << 27)) &&
240 (wrmsr_safe(MSR_K8_ENABLE_C1E, lo & ~(3u << 27), hi) != 0))
241 printk(KERN_ERR "Failed to disable C1E on CPU#%u (%08x)\n",
242 smp_processor_id(), lo);
243 }
245 static void check_disable_c1e(unsigned int port, u8 value)
246 {
247 /* C1E is sometimes enabled during entry to ACPI mode. */
248 if ((port == acpi_smi_cmd) && (value == acpi_enable_value))
249 on_each_cpu(disable_c1e, NULL, 1);
250 }
252 static void __devinit init_amd(struct cpuinfo_x86 *c)
253 {
254 u32 l, h;
255 int mbytes = num_physpages >> (20-PAGE_SHIFT);
256 int r;
258 #ifdef CONFIG_SMP
259 unsigned long long value;
261 /* Disable TLB flush filter by setting HWCR.FFDIS on K8
262 * bit 6 of msr C001_0015
263 *
264 * Errata 63 for SH-B3 steppings
265 * Errata 122 for all steppings (F+ have it disabled by default)
266 */
267 if (c->x86 == 15) {
268 rdmsrl(MSR_K7_HWCR, value);
269 value |= 1 << 6;
270 wrmsrl(MSR_K7_HWCR, value);
271 }
272 #endif
274 /*
275 * FIXME: We should handle the K5 here. Set up the write
276 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
277 * no bus pipeline)
278 */
280 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
281 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
282 clear_bit(0*32+31, c->x86_capability);
284 r = get_model_name(c);
286 switch(c->x86)
287 {
288 case 4:
289 /*
290 * General Systems BIOSen alias the cpu frequency registers
291 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
292 * drivers subsequently pokes it, and changes the CPU speed.
293 * Workaround : Remove the unneeded alias.
294 */
295 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
296 #define CBAR_ENB (0x80000000)
297 #define CBAR_KEY (0X000000CB)
298 if (c->x86_model==9 || c->x86_model == 10) {
299 if (inl (CBAR) & CBAR_ENB)
300 outl (0 | CBAR_KEY, CBAR);
301 }
302 break;
303 case 5:
304 if( c->x86_model < 6 )
305 {
306 /* Based on AMD doc 20734R - June 2000 */
307 if ( c->x86_model == 0 ) {
308 clear_bit(X86_FEATURE_APIC, c->x86_capability);
309 set_bit(X86_FEATURE_PGE, c->x86_capability);
310 }
311 break;
312 }
314 if ( c->x86_model == 6 && c->x86_mask == 1 ) {
315 const int K6_BUG_LOOP = 1000000;
316 int n;
317 void (*f_vide)(void);
318 unsigned long d, d2;
320 printk(KERN_INFO "AMD K6 stepping B detected - ");
322 /*
323 * It looks like AMD fixed the 2.6.2 bug and improved indirect
324 * calls at the same time.
325 */
327 n = K6_BUG_LOOP;
328 f_vide = vide;
329 rdtscl(d);
330 while (n--)
331 f_vide();
332 rdtscl(d2);
333 d = d2-d;
335 if (d > 20*K6_BUG_LOOP)
336 printk("system stability may be impaired when more than 32 MB are used.\n");
337 else
338 printk("probably OK (after B9730xxxx).\n");
339 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
340 }
342 /* K6 with old style WHCR */
343 if (c->x86_model < 8 ||
344 (c->x86_model== 8 && c->x86_mask < 8)) {
345 /* We can only write allocate on the low 508Mb */
346 if(mbytes>508)
347 mbytes=508;
349 rdmsr(MSR_K6_WHCR, l, h);
350 if ((l&0x0000FFFF)==0) {
351 unsigned long flags;
352 l=(1<<0)|((mbytes/4)<<1);
353 local_irq_save(flags);
354 wbinvd();
355 wrmsr(MSR_K6_WHCR, l, h);
356 local_irq_restore(flags);
357 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
358 mbytes);
359 }
360 break;
361 }
363 if ((c->x86_model == 8 && c->x86_mask >7) ||
364 c->x86_model == 9 || c->x86_model == 13) {
365 /* The more serious chips .. */
367 if(mbytes>4092)
368 mbytes=4092;
370 rdmsr(MSR_K6_WHCR, l, h);
371 if ((l&0xFFFF0000)==0) {
372 unsigned long flags;
373 l=((mbytes>>2)<<22)|(1<<16);
374 local_irq_save(flags);
375 wbinvd();
376 wrmsr(MSR_K6_WHCR, l, h);
377 local_irq_restore(flags);
378 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
379 mbytes);
380 }
382 /* Set MTRR capability flag if appropriate */
383 if (c->x86_model == 13 || c->x86_model == 9 ||
384 (c->x86_model == 8 && c->x86_mask >= 8))
385 set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
386 break;
387 }
389 if (c->x86_model == 10) {
390 /* AMD Geode LX is model 10 */
391 /* placeholder for any needed mods */
392 break;
393 }
394 break;
395 case 6: /* An Athlon/Duron */
397 /* Bit 15 of Athlon specific MSR 15, needs to be 0
398 * to enable SSE on Palomino/Morgan/Barton CPU's.
399 * If the BIOS didn't enable it already, enable it here.
400 */
401 if (c->x86_model >= 6 && c->x86_model <= 10) {
402 if (!cpu_has(c, X86_FEATURE_XMM)) {
403 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
404 rdmsr(MSR_K7_HWCR, l, h);
405 l &= ~0x00008000;
406 wrmsr(MSR_K7_HWCR, l, h);
407 set_bit(X86_FEATURE_XMM, c->x86_capability);
408 }
409 }
411 /* It's been determined by AMD that Athlons since model 8 stepping 1
412 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
413 * As per AMD technical note 27212 0.2
414 */
415 if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
416 rdmsr(MSR_K7_CLK_CTL, l, h);
417 if ((l & 0xfff00000) != 0x20000000) {
418 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
419 ((l & 0x000fffff)|0x20000000));
420 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
421 }
422 }
423 break;
424 }
426 switch (c->x86) {
427 case 15:
428 /* Use K8 tuning for Fam10h and Fam11h */
429 case 0x10:
430 case 0x11:
431 set_bit(X86_FEATURE_K8, c->x86_capability);
432 disable_c1e(NULL);
433 if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value))
434 pv_post_outb_hook = check_disable_c1e;
435 break;
436 case 6:
437 set_bit(X86_FEATURE_K7, c->x86_capability);
438 break;
439 }
441 if (c->x86 == 15) {
442 rdmsr(MSR_K7_HWCR, l, h);
443 printk(KERN_INFO "CPU%d: AMD Flush Filter %sabled",
444 smp_processor_id(), (l & (1<<6)) ? "dis" : "en");
445 if ((flush_filter_force > 0) && (l & (1<<6))) {
446 l &= ~(1<<6);
447 printk(" -> Forcibly enabled");
448 } else if ((flush_filter_force < 0) && !(l & (1<<6))) {
449 l |= 1<<6;
450 printk(" -> Forcibly disabled");
451 }
452 wrmsr(MSR_K7_HWCR, l, h);
453 printk("\n");
454 }
456 display_cacheinfo(c);
458 if (cpuid_eax(0x80000000) >= 0x80000008) {
459 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
460 }
462 if (cpuid_eax(0x80000000) >= 0x80000007) {
463 c->x86_power = cpuid_edx(0x80000007);
464 if (c->x86_power & (1<<8)) {
465 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
466 set_bit(X86_FEATURE_NOSTOP_TSC, c->x86_capability);
467 }
468 }
470 #ifdef CONFIG_X86_HT
471 /*
472 * On a AMD multi core setup the lower bits of the APIC id
473 * distingush the cores.
474 */
475 if (c->x86_max_cores > 1) {
476 int cpu = smp_processor_id();
477 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
479 if (bits == 0) {
480 while ((1 << bits) < c->x86_max_cores)
481 bits++;
482 }
483 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
484 phys_proc_id[cpu] >>= bits;
485 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
486 cpu, c->x86_max_cores, cpu_core_id[cpu]);
487 }
488 #endif
490 /* Pointless to use MWAIT on Family10 as it does not deep sleep. */
491 if (c->x86 == 0x10 && !force_mwait)
492 clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
494 /* K6s reports MCEs but don't actually have all the MSRs */
495 if (c->x86 < 6)
496 clear_bit(X86_FEATURE_MCE, c->x86_capability);
498 #ifdef __x86_64__
499 /* AMD CPUs do not support SYSENTER outside of legacy mode. */
500 clear_bit(X86_FEATURE_SEP, c->x86_capability);
501 #endif
503 /* Prevent TSC drift in non single-processor, single-core platforms. */
504 if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
505 disable_c1_ramping();
507 set_cpuidmask(c);
509 start_svm(c);
510 }
512 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
513 {
514 /* AMD errata T13 (order #21922) */
515 if ((c->x86 == 6)) {
516 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
517 size = 64;
518 if (c->x86_model == 4 &&
519 (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
520 size = 256;
521 }
522 return size;
523 }
525 static struct cpu_dev amd_cpu_dev __cpuinitdata = {
526 .c_vendor = "AMD",
527 .c_ident = { "AuthenticAMD" },
528 .c_models = {
529 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
530 {
531 [3] = "486 DX/2",
532 [7] = "486 DX/2-WB",
533 [8] = "486 DX/4",
534 [9] = "486 DX/4-WB",
535 [14] = "Am5x86-WT",
536 [15] = "Am5x86-WB"
537 }
538 },
539 },
540 .c_init = init_amd,
541 .c_identify = generic_identify,
542 .c_size_cache = amd_size_cache,
543 };
545 int __init amd_init_cpu(void)
546 {
547 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
548 return 0;
549 }