case ACPI_ADR_SPACE_FIXED_HARDWARE:
if ( xen_cx->reg.bit_width == VENDOR_INTEL &&
xen_cx->reg.bit_offset == NATIVE_CSTATE_BEYOND_HALT &&
- boot_cpu_has(X86_FEATURE_MWAIT) )
+ boot_cpu_has(X86_FEATURE_MONITOR) )
cx->entry_method = ACPI_CSTATE_EM_FFH;
else
cx->entry_method = ACPI_CSTATE_EM_HALT;
struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
if (cpu->x86_vendor != X86_VENDOR_INTEL ||
- !cpu_has(cpu, X86_FEATURE_EST))
+ !cpu_has(cpu, X86_FEATURE_EIST))
return 0;
return 1;
pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask;
- if (cpu_has(c, X86_FEATURE_EST))
+ if (cpu_has(c, X86_FEATURE_EIST))
pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask;
if (cpu_has(c, X86_FEATURE_ACPI))
* If mwait/monitor or its break-on-interrupt extension are
* unsupported, Cx_FFH will be disabled.
*/
- if (!cpu_has(c, X86_FEATURE_MWAIT) ||
+ if (!cpu_has(c, X86_FEATURE_MONITOR) ||
c->cpuid_level < CPUID_MWAIT_LEAF)
ecx = 0;
else if (c == &boot_cpu_data || cpu == smp_processor_id())
/* Pointless to use MWAIT on Family10 as it does not deep sleep. */
if (c->x86 == 0x10)
- __clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
+ __clear_bit(X86_FEATURE_MONITOR, c->x86_capability);
if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
opt_allow_unsafe = 1;
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
- if (!cpu_has(c, X86_FEATURE_HT) ||
+ if (!cpu_has(c, X86_FEATURE_HTT) ||
cpu_has(c, X86_FEATURE_CMP_LEGACY) ||
cpu_has(c, X86_FEATURE_XTOPOLOGY))
return;
return _phys_pkg_id(apicid, core_plus_mask_width);
}
- if (boot_cpu_has(X86_FEATURE_HT) &&
+ if (boot_cpu_has(X86_FEATURE_HTT) &&
!boot_cpu_has(X86_FEATURE_CMP_LEGACY)) {
unsigned int num_siblings = (cpuid_ebx(1) & 0xff0000) >> 16;
{
if (!cpu_has_apic)
return 0;
- if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_TM1))
return 0;
return 1;
}
};
#define ICPU(model, cpu) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MONITOR, \
&idle_cpu_##cpu}
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
(leaf1_edx & cpufeat_mask(X86_FEATURE_SSE) ?
X86_CR4_OSXMMEXCPT : 0) |
((restore || nestedhvm_enabled(v->domain)) &&
- (leaf1_ecx & cpufeat_mask(X86_FEATURE_VMXE)) ?
+ (leaf1_ecx & cpufeat_mask(X86_FEATURE_VMX)) ?
X86_CR4_VMXE : 0) |
(leaf7_0_ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) ?
X86_CR4_FSGSBASE : 0) |
{
eax = IA32_FEATURE_CONTROL_MSR_LOCK;
eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX;
- if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) )
+ if ( test_bit(X86_FEATURE_SMX, &boot_cpu_data.x86_capability) )
eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX;
wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
}
{
case -2: /* #UD or #GP */
if ( bios_locked &&
- test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) &&
+ test_bit(X86_FEATURE_SMX, &boot_cpu_data.x86_capability) &&
(!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ||
!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
{
/* VMX capablity MSRs are available only when guest supports VMX. */
hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
- if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
+ if ( !(ecx & cpufeat_mask(X86_FEATURE_VMX)) )
return 0;
/*
data |= X86_CR4_OSFXSR;
if ( edx & cpufeat_mask(X86_FEATURE_SSE) )
data |= X86_CR4_OSXMMEXCPT;
- if ( ecx & cpufeat_mask(X86_FEATURE_VMXE) )
+ if ( ecx & cpufeat_mask(X86_FEATURE_VMX) )
data |= X86_CR4_VMXE;
- if ( ecx & cpufeat_mask(X86_FEATURE_SMXE) )
+ if ( ecx & cpufeat_mask(X86_FEATURE_SMX) )
data |= X86_CR4_SMXE;
if ( ecx & cpufeat_mask(X86_FEATURE_PCID) )
data |= X86_CR4_PCIDE;
unsigned int eax, ebx, ecx, edx;
unsigned int rmid;
- if ( !boot_cpu_has(X86_FEATURE_CMT) )
+ if ( !boot_cpu_has(X86_FEATURE_PQM) )
return;
cpuid_count(0xf, 0, &eax, &ebx, &ecx, &edx);
uint64_t val;
const struct cpuinfo_x86 *c = cpu_data + cpu;
- if ( !cpu_has(c, X86_FEATURE_CAT) || c->cpuid_level < PSR_CPUID_LEVEL_CAT )
+ if ( !cpu_has(c, X86_FEATURE_PQE) || c->cpuid_level < PSR_CPUID_LEVEL_CAT )
return;
socket = cpu_to_socket(cpu);
if ( !cpu_has_sep )
__clear_bit(X86_FEATURE_SEP, &d);
__clear_bit(X86_FEATURE_DS, &d);
- __clear_bit(X86_FEATURE_ACC, &d);
+ __clear_bit(X86_FEATURE_TM1, &d);
__clear_bit(X86_FEATURE_PBE, &d);
if ( is_pvh_domain(currd) )
__clear_bit(X86_FEATURE_MTRR, &d);
__clear_bit(X86_FEATURE_DTES64 % 32, &c);
- __clear_bit(X86_FEATURE_MWAIT % 32, &c);
+ __clear_bit(X86_FEATURE_MONITOR % 32, &c);
__clear_bit(X86_FEATURE_DSCPL % 32, &c);
- __clear_bit(X86_FEATURE_VMXE % 32, &c);
- __clear_bit(X86_FEATURE_SMXE % 32, &c);
+ __clear_bit(X86_FEATURE_VMX % 32, &c);
+ __clear_bit(X86_FEATURE_SMX % 32, &c);
__clear_bit(X86_FEATURE_TM2 % 32, &c);
if ( is_pv_32bit_domain(currd) )
__clear_bit(X86_FEATURE_CX16 % 32, &c);
__clear_bit(X86_FEATURE_LWP % 32, &c);
__clear_bit(X86_FEATURE_NODEID_MSR % 32, &c);
__clear_bit(X86_FEATURE_TOPOEXT % 32, &c);
- __clear_bit(X86_FEATURE_MWAITX % 32, &c);
+ __clear_bit(X86_FEATURE_MONITORX % 32, &c);
break;
case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
#define AMD_FEATURES_K8_REV_E_ECX (AMD_FEATURES_K8_REV_D_ECX | \
cpufeat_mask(X86_FEATURE_SSE3))
#define AMD_FEATURES_K8_REV_E_EDX (AMD_FEATURES_K8_REV_D_EDX | \
- cpufeat_mask(X86_FEATURE_HT))
+ cpufeat_mask(X86_FEATURE_HTT))
#define AMD_EXTFEATURES_K8_REV_E_ECX (AMD_EXTFEATURES_K8_REV_D_ECX |\
cpufeat_mask(X86_FEATURE_CMP_LEGACY))
#define AMD_EXTFEATURES_K8_REV_E_EDX AMD_EXTFEATURES_K8_REV_D_EDX
/* Family 10h, Revision B */
#define AMD_FEATURES_FAM10h_REV_B_ECX (AMD_FEATURES_K8_REV_F_ECX | \
- cpufeat_mask(X86_FEATURE_POPCNT) | cpufeat_mask(X86_FEATURE_MWAIT))
+ cpufeat_mask(X86_FEATURE_POPCNT) | cpufeat_mask(X86_FEATURE_MONITOR))
#define AMD_FEATURES_FAM10h_REV_B_EDX AMD_FEATURES_K8_REV_F_EDX
#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
cpufeat_mask(X86_FEATURE_ABM) | cpufeat_mask(X86_FEATURE_SSE4A) | \
/* of FPU context), and CR4.OSFXSR available */
#define X86_FEATURE_SSE (0*32+25) /* Streaming SIMD Extensions */
#define X86_FEATURE_SSE2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
+#define X86_FEATURE_HTT (0*32+28) /* Hyper-Threading Technology */
+#define X86_FEATURE_TM1 (0*32+29) /* Thermal Monitor 1 */
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
#define X86_FEATURE_SSE3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* Carry-less mulitplication */
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
+#define X86_FEATURE_MONITOR (4*32+ 3) /* Monitor/Mwait support */
#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
-#define X86_FEATURE_VMXE (4*32+ 5) /* Virtual Machine Extensions */
-#define X86_FEATURE_SMXE (4*32+ 6) /* Safer Mode Extensions */
-#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_VMX (4*32+ 5) /* Virtual Machine Extensions */
+#define X86_FEATURE_SMX (4*32+ 6) /* Safer Mode Extensions */
+#define X86_FEATURE_EIST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental Streaming SIMD Extensions-3 */
#define X86_FEATURE_FMA (4*32+12) /* Fused Multiply Add */
#define X86_FEATURE_MOVBE (4*32+22) /* movbe instruction */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE (4*32+24) /* "tdt" TSC Deadline Timer */
-#define X86_FEATURE_AES (4*32+25) /* AES instructions */
+#define X86_FEATURE_AESNI (4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE (4*32+27) /* OSXSAVE */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_DBEXT (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_MWAITX (6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+#define X86_FEATURE_MONITORX (6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
#define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
#define X86_FEATURE_ERMS (7*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID (7*32+10) /* Invalidate Process Context ID */
#define X86_FEATURE_RTM (7*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CMT (7*32+12) /* Cache Monitoring Technology */
+#define X86_FEATURE_PQM (7*32+12) /* Platform QoS Monitoring */
#define X86_FEATURE_NO_FPU_SEL (7*32+13) /* FPU CS/DS stored as zero */
#define X86_FEATURE_MPX (7*32+14) /* Memory Protection Extensions */
-#define X86_FEATURE_CAT (7*32+15) /* Cache Allocation Technology */
+#define X86_FEATURE_PQE (7*32+15) /* Platform QoS Enforcement */
#define X86_FEATURE_RDSEED (7*32+18) /* RDSEED instruction */
#define X86_FEATURE_ADX (7*32+19) /* ADCX, ADOX instructions */
#define X86_FEATURE_SMAP (7*32+20) /* Supervisor Mode Access Prevention */
#define cpu_has_sse boot_cpu_has(X86_FEATURE_SSE)
#define cpu_has_sse2 boot_cpu_has(X86_FEATURE_SSE2)
#define cpu_has_sse3 boot_cpu_has(X86_FEATURE_SSE3)
-#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
+#define cpu_has_htt boot_cpu_has(X86_FEATURE_HTT)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_page1gb boot_cpu_has(X86_FEATURE_PAGE1GB)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP)
#define cpu_has_svm boot_cpu_has(X86_FEATURE_SVM)
-#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMXE)
+#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMX)
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)