int ret;
/* Currently we only handle Intel and AMD processor */
- if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) ||
- (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) )
+ if ( boot_cpu_data.x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_AMD) )
ret = cpufreq_add_cpu(cpuid);
else
ret = -EFAULT;
rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
rdmsrl(MSR_CSTAR, saved_cstar);
rdmsrl(MSR_LSTAR, saved_lstar);
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
- boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
+ if ( boot_cpu_data.x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
{
rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
wrgsbase(saved_gs_base);
wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
- boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
+ if ( boot_cpu_data.x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
{
/* Recover sysenter MSRs */
wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
case MSR_IA32_MPERF:
case MSR_IA32_APERF:
- if ( (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) &&
- (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) )
+ if ( !(boot_cpu_data.x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_AMD)) )
break;
if ( likely(!is_cpufreq_controller(currd)) ||
wrmsr_safe(reg, val) == 0 )
(unsigned long)lstar_enter);
stub_va += offset;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
- boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
+ if ( boot_cpu_data.x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
{
/* SYSENTER entry. */
wrmsrl(MSR_IA32_SYSENTER_ESP, stack_bottom);
/*
* CPU vendor IDs
*
- * - X86_VENDOR_* are Xen-internal identifiers. Values and order are
- * arbitrary.
+ * - X86_VENDOR_* are Xen-internal identifiers. The order is arbitrary, but
+ * values form a bitmap so vendor checks can be made against multiple
+ * vendors at once.
* - X86_VENDOR_*_E?X are architectural information from CPUID leaf 0
*/
#define X86_VENDOR_UNKNOWN 0
-#define X86_VENDOR_INTEL 1
+#define X86_VENDOR_INTEL (1 << 0)
#define X86_VENDOR_INTEL_EBX 0x756e6547U /* "GenuineIntel" */
#define X86_VENDOR_INTEL_ECX 0x6c65746eU
#define X86_VENDOR_INTEL_EDX 0x49656e69U
-#define X86_VENDOR_AMD 2
+#define X86_VENDOR_AMD (1 << 1)
#define X86_VENDOR_AMD_EBX 0x68747541U /* "AuthenticAMD" */
#define X86_VENDOR_AMD_ECX 0x444d4163U
#define X86_VENDOR_AMD_EDX 0x69746e65U
-#define X86_VENDOR_CENTAUR 3
+#define X86_VENDOR_CENTAUR (1 << 2)
#define X86_VENDOR_CENTAUR_EBX 0x746e6543U /* "CentaurHauls" */
#define X86_VENDOR_CENTAUR_ECX 0x736c7561U
#define X86_VENDOR_CENTAUR_EDX 0x48727561U
-#define X86_VENDOR_SHANGHAI 4
+#define X86_VENDOR_SHANGHAI (1 << 3)
#define X86_VENDOR_SHANGHAI_EBX 0x68532020U /* " Shanghai " */
#define X86_VENDOR_SHANGHAI_ECX 0x20206961U
#define X86_VENDOR_SHANGHAI_EDX 0x68676e61U