u64 host_pat = 0x050100070406;
static unsigned int cleared_caps[NCAPINTS];
+static unsigned int forced_caps[NCAPINTS];
void __init setup_clear_cpu_cap(unsigned int cap)
{
if (__test_and_set_bit(cap, cleared_caps))
return;
+ if (test_bit(cap, forced_caps))
+ printk("%pS clearing previously forced feature %#x\n",
+ __builtin_return_address(0), cap);
+
__clear_bit(cap, boot_cpu_data.x86_capability);
dfs = lookup_deep_deps(cap);
for (i = 0; i < FSCAPINTS; ++i) {
cleared_caps[i] |= dfs[i];
boot_cpu_data.x86_capability[i] &= ~dfs[i];
+ if (!(forced_caps[i] & dfs[i]))
+ continue;
+ printk("%pS implicitly clearing previously forced feature(s) %u:%#x\n",
+ __builtin_return_address(0),
+ i, forced_caps[i] & dfs[i]);
}
}
+void __init setup_force_cpu_cap(unsigned int cap)
+{
+ if (__test_and_set_bit(cap, forced_caps))
+ return;
+
+ if (test_bit(cap, cleared_caps)) {
+ printk("%pS tries to force previously cleared feature %#x\n",
+ __builtin_return_address(0), cap);
+ return;
+ }
+
+ __set_bit(cap, boot_cpu_data.x86_capability);
+}
+
static void default_init(struct cpuinfo_x86 * c)
{
/* Not much we can do here... */
for (i = 0; i < FSCAPINTS; ++i)
c->x86_capability[i] &= known_features[i];
- for (i = 0 ; i < NCAPINTS ; ++i)
+ for (i = 0 ; i < NCAPINTS ; ++i) {
+ c->x86_capability[i] |= forced_caps[i];
c->x86_capability[i] &= ~cleared_caps[i];
+ }
/* If the model name is still unset, do table lookup. */
if ( !c->x86_model_id[0] ) {
expected_levelling_cap |= LCAP_faulting;
levelling_caps |= LCAP_faulting;
- __set_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability);
+ setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
return 1;
}
if (c == &boot_cpu_data)
intel_init_levelling();
- if (test_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
-
intel_ctxt_switch_levelling(NULL);
}
if ( !opt_smep )
setup_clear_cpu_cap(X86_FEATURE_SMEP);
if ( cpu_has_smep && opt_smep != SMEP_HVM_ONLY )
- __set_bit(X86_FEATURE_XEN_SMEP, boot_cpu_data.x86_capability);
+ setup_force_cpu_cap(X86_FEATURE_XEN_SMEP);
if ( boot_cpu_has(X86_FEATURE_XEN_SMEP) )
set_in_cr4(X86_CR4_SMEP);
if ( !opt_smap )
setup_clear_cpu_cap(X86_FEATURE_SMAP);
if ( cpu_has_smap && opt_smap != SMAP_HVM_ONLY )
- __set_bit(X86_FEATURE_XEN_SMAP, boot_cpu_data.x86_capability);
+ setup_force_cpu_cap(X86_FEATURE_XEN_SMAP);
if ( boot_cpu_has(X86_FEATURE_XEN_SMAP) )
set_in_cr4(X86_CR4_SMAP);