return -1;
}
- set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+ __set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/* The BIOS may have set up the APIC at some other address */
int __init APIC_init_uniprocessor (void)
{
if (enable_local_apic < 0)
- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+ __clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
if (!smp_found_config && !cpu_has_apic) {
skip_ioapic_setup = 1;
if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_physical_apicid);
- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
skip_ioapic_setup = 1;
return -1;
}
wrmsrl(MSR_K7_HWCR, value);
}
- /*
- * FIXME: We should handle the K5 here. Set up the write
- * range and also turn on MSR 83 bits 4 and 31 (write alloc,
- * no bus pipeline)
- */
-
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
- clear_bit(0*32+31, c->x86_capability);
+ __clear_bit(0*32+31, c->x86_capability);
if (c->x86 == 0xf && c->x86_model < 0x14
&& cpu_has(c, X86_FEATURE_LAHF_LM)) {
* revision D (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/
- clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability);
+ __clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability);
if (!rdmsr_amd_safe(0xc001100d, &l, &h))
wrmsr_amd_safe(0xc001100d, l, h & ~1);
}
if (c->extended_cpuid_level >= 0x80000007) {
c->x86_power = cpuid_edx(0x80000007);
if (c->x86_power & (1<<8)) {
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
if (c->x86 != 0x11)
- set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+ __set_bit(X86_FEATURE_TSC_RELIABLE,
+ c->x86_capability);
}
}
wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value);
rdmsrl(MSR_K8_EXT_FEATURE_MASK, value);
if (value & (1ULL << 54)) {
- set_bit(X86_FEATURE_TOPOEXT, c->x86_capability);
+ __set_bit(X86_FEATURE_TOPOEXT, c->x86_capability);
printk(KERN_INFO "CPU: Re-enabling disabled "
"Topology Extensions Support\n");
}
/* Pointless to use MWAIT on Family10 as it does not deep sleep. */
if (c->x86 >= 0x10 && !force_mwait)
- clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
+ __clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
opt_allow_unsafe = 1;
}
/* AMD CPUs do not support SYSENTER outside of legacy mode. */
- clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ __clear_bit(X86_FEATURE_SEP, c->x86_capability);
if (c->x86 == 0x10) {
/* do this for boot cpu */
* running in deep C states.
*/
if ( opt_arat && c->x86 > 0x11 )
- set_bit(X86_FEATURE_ARAT, c->x86_capability);
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
/*
* Prior to Family 0x14, perf counters are not reset during warm reboot.
if (c->x86 == 0x6 && c->x86_model >= 0xf) {
c->x86_cache_alignment = c->x86_clflush_size * 2;
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
}
get_model_name(c);
/* Initialize xsave/xrstor features */
if ( !use_xsave )
- clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability);
+ __clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability);
if ( cpu_has_xsave )
xstate_init(c);
if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) )
return;
- set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability);
+ __set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability);
initial_apicid = edx;
if (c->x86 == 6 && cpu_has_clflush &&
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
- set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
+ __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
}
unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
- set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
+ __set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
}
if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) )
if (c == &boot_cpu_data && c->x86 == 6) {
if (probe_intel_cpuid_faulting())
- set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ __set_bit(X86_FEATURE_CPUID_FAULTING,
+ c->x86_capability);
} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
BUG_ON(!probe_intel_cpuid_faulting());
- set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
}
if (!cpu_has_cpuid_faulting)
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
if (cpuid_edx(0x80000007) & (1u<<8)) {
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
- set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
}
if ( opt_arat &&
( c->cpuid_level >= 0x00000006 ) &&
( cpuid_eax(0x00000006) & (1u<<2) ) )
- set_bit(X86_FEATURE_ARAT, c->x86_capability);
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
}
static const struct cpu_dev intel_cpu_dev = {