}
#endif
+ /* As a rule processors have APIC timer running in deep C states */
+ if (c->x86 >= 0xf && !cpu_has_amd_erratum(c, AMD_ERRATUM_400))
+ set_bit(X86_FEATURE_ARAT, c->x86_capability);
+
/* Prevent TSC drift in non single-processor, single-core platforms. */
if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
disable_c1_ramping();
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf), \
AMD_MODEL_RANGE(0x12, 0x0, 0x0, 0x1, 0x0))
+#define AMD_ERRATUM_400 \
+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), \
+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf))
+
struct cpuinfo_x86;
int cpu_has_amd_erratum(const struct cpuinfo_x86 *, int, ...);