]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
x86: helpers for cpu feature manipulation
authorAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 26 Oct 2015 13:01:50 +0000 (14:01 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 26 Oct 2015 13:01:50 +0000 (14:01 +0100)
Expose them to assembly code, and replace open-coded versions.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/boot/head.S
xen/arch/x86/boot/trampoline.S
xen/arch/x86/cpu/common.c
xen/arch/x86/efi/efi-boot.h
xen/arch/x86/hvm/hvm.c
xen/arch/x86/traps.c
xen/arch/x86/xstate.c
xen/include/asm-x86/amd.h
xen/include/asm-x86/asm_defns.h
xen/include/asm-x86/cpufeature.h

index f63b3492139e9065007e31643bffaeec6689c539..ac4962bb07ed1a0f2557511a989942929f8b3db1 100644 (file)
@@ -142,7 +142,7 @@ __start:
         mov     %edx,sym_phys(boot_cpu_data)+CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM)
 
         /* Check for availability of long mode. */
-        bt      $X86_FEATURE_LM & 0x1f,%edx
+        bt      $cpufeat_bit(X86_FEATURE_LM),%edx
         jnc     bad_cpu
 
         /* Stash TSC to calculate a good approximation of time-since-boot */
index 8b0d9c19f0d9c9ec7efda14cbbaa8c690738b7b8..c8f32cd183dd3332417bafe841df2c9e679bbb1b 100644 (file)
@@ -89,7 +89,7 @@ trampoline_protmode_entry:
         movl    $MSR_EFER,%ecx
         rdmsr
         or      $EFER_LME|EFER_SCE,%eax   /* Long Mode + SYSCALL/SYSRET */
-        bt      $X86_FEATURE_NX % 32,%edi /* No Execute? */
+        bt      $cpufeat_bit(X86_FEATURE_NX),%edi /* No Execute? */
         jnc     1f
         btsl    $_EFER_NX,%eax  /* No Execute     */
 1:      wrmsr
index f256444d7556cae20ac6ba4e6efd36ea90f1f665..a5caa84107e701c90663581dd2fdaed0444d9740 100644 (file)
@@ -202,7 +202,7 @@ static void __init early_cpu_detect(void)
        c->x86_mask = tfms & 15;
        cap0 &= ~cleared_caps[0];
        cap4 &= ~cleared_caps[4];
-       if (cap0 & (1<<19))
+       if (cap0 & cpufeat_mask(X86_FEATURE_CLFLSH))
                c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
        /* Leaf 0x1 capabilities filled in early for Xen. */
        c->x86_capability[0] = cap0;
@@ -263,7 +263,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
        if ( c->cpuid_level >= 0x00000007 ) {
                u32 dummy;
                cpuid_count(0x00000007, 0, &dummy, &ebx, &dummy, &dummy);
-               c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
+               c->x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)] = ebx;
        }
 }
 
index 4c7f383c0438087ac0ca25c1d4bd51897a8991b3..d8ca86278a745e15c20c182dcb42ed0fbb982d25 100644 (file)
@@ -225,7 +225,7 @@ static void __init noreturn efi_arch_post_exit_boot(void)
     asm volatile("pushq $0\n\tpopfq");
     rdmsrl(MSR_EFER, efer);
     efer |= EFER_SCE;
-    if ( cpuid_ext_features & (1 << (X86_FEATURE_NX & 0x1f)) )
+    if ( cpuid_ext_features & cpufeat_mask(X86_FEATURE_NX) )
         efer |= EFER_NX;
     wrmsrl(MSR_EFER, efer);
     write_cr0(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP |
index 3fa2280cdc401b01c2a5ce51bfee6defb88ed204..965bfbfbaf24528f9e47c7e9c7f20423fa17cf02 100644 (file)
@@ -1850,8 +1850,8 @@ static const char * hvm_efer_valid(const struct vcpu *v, uint64_t value,
     }
     else
     {
-        ext1_edx = boot_cpu_data.x86_capability[X86_FEATURE_LM / 32];
-        ext1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_SVM / 32];
+        ext1_edx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_LM)];
+        ext1_ecx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_SVM)];
     }
 
     /*
@@ -1917,9 +1917,9 @@ static unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v,
     }
     else
     {
-        leaf1_edx = boot_cpu_data.x86_capability[X86_FEATURE_VME / 32];
-        leaf1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_PCID / 32];
-        leaf7_0_ebx = boot_cpu_data.x86_capability[X86_FEATURE_FSGSBASE / 32];
+        leaf1_edx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_VME)];
+        leaf1_ecx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_PCID)];
+        leaf7_0_ebx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)];
     }
 
     return ~(unsigned long)
index 80935358bf7a82211d6882756723296138444c92..b32f6960a2464a91922082dc242dbff902ea025b 100644 (file)
@@ -935,7 +935,7 @@ void pv_cpuid(struct cpu_user_regs *regs)
             goto unsupported;
         if ( regs->_ecx == 1 )
         {
-            a &= boot_cpu_data.x86_capability[X86_FEATURE_XSAVEOPT / 32];
+            a &= boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)];
             if ( !cpu_has_xsaves )
                 b = c = d = 0;
         }
index 9ddff90433a77fdc93ff1782ee6165f456d6b3f5..c6238d9eb16b385ace00b53f626518d7deb31f91 100644 (file)
@@ -326,9 +326,9 @@ void xstate_init(struct cpuinfo_x86 *c)
     eax &= (cpufeat_mask(X86_FEATURE_XSAVEOPT) |
             cpufeat_mask(X86_FEATURE_XSAVEC));
 
-    c->x86_capability[X86_FEATURE_XSAVEOPT / 32] = eax;
+    c->x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)] = eax;
 
-    BUG_ON(eax != boot_cpu_data.x86_capability[X86_FEATURE_XSAVEOPT / 32]);
+    BUG_ON(eax != boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)]);
 }
 
 static bool_t valid_xcr0(u64 xcr0)
index 87b6e3e7a8176739a3dc9982bc2ba7c8b161bf81..bb23bea76a4c9d46983162fda742ac26f43d82df 100644 (file)
@@ -9,95 +9,92 @@
 
 /* CPUID masked for use by AMD-V Extended Migration */
 
-#define X86_FEATURE_BITPOS(_feature_) ((_feature_) % 32)
-#define __bit(_x_) (1U << X86_FEATURE_BITPOS(_x_))
-
 /* Family 0Fh, Revision C */
 #define AMD_FEATURES_K8_REV_C_ECX  0
-#define AMD_FEATURES_K8_REV_C_EDX (                                    \
-       __bit(X86_FEATURE_FPU)      | __bit(X86_FEATURE_VME)   |        \
-       __bit(X86_FEATURE_DE)       | __bit(X86_FEATURE_PSE)   |        \
-       __bit(X86_FEATURE_TSC)      | __bit(X86_FEATURE_MSR)   |        \
-       __bit(X86_FEATURE_PAE)      | __bit(X86_FEATURE_MCE)   |        \
-       __bit(X86_FEATURE_CX8)      | __bit(X86_FEATURE_APIC)  |        \
-       __bit(X86_FEATURE_SEP)      | __bit(X86_FEATURE_MTRR)  |        \
-       __bit(X86_FEATURE_PGE)      | __bit(X86_FEATURE_MCA)   |        \
-       __bit(X86_FEATURE_CMOV)     | __bit(X86_FEATURE_PAT)   |        \
-       __bit(X86_FEATURE_PSE36)    | __bit(X86_FEATURE_CLFLSH)|        \
-       __bit(X86_FEATURE_MMX)      | __bit(X86_FEATURE_FXSR)  |        \
-       __bit(X86_FEATURE_XMM)      | __bit(X86_FEATURE_XMM2))
-#define AMD_EXTFEATURES_K8_REV_C_ECX  0 
-#define AMD_EXTFEATURES_K8_REV_C_EDX  (                                        \
-       __bit(X86_FEATURE_FPU)      | __bit(X86_FEATURE_VME)   |        \
-       __bit(X86_FEATURE_DE)       | __bit(X86_FEATURE_PSE)   |        \
-       __bit(X86_FEATURE_TSC)      | __bit(X86_FEATURE_MSR)   |        \
-       __bit(X86_FEATURE_PAE)      | __bit(X86_FEATURE_MCE)   |        \
-       __bit(X86_FEATURE_CX8)      | __bit(X86_FEATURE_APIC)  |        \
-       __bit(X86_FEATURE_SYSCALL)  | __bit(X86_FEATURE_MTRR)  |        \
-       __bit(X86_FEATURE_PGE)      | __bit(X86_FEATURE_MCA)   |        \
-       __bit(X86_FEATURE_CMOV)     | __bit(X86_FEATURE_PAT)   |        \
-       __bit(X86_FEATURE_PSE36)    | __bit(X86_FEATURE_NX)    |        \
-       __bit(X86_FEATURE_MMXEXT)   | __bit(X86_FEATURE_MMX)   |        \
-       __bit(X86_FEATURE_FXSR)     | __bit(X86_FEATURE_LM)    |        \
-       __bit(X86_FEATURE_3DNOWEXT) | __bit(X86_FEATURE_3DNOW))
+#define AMD_FEATURES_K8_REV_C_EDX (                                        \
+       cpufeat_mask(X86_FEATURE_FPU)   | cpufeat_mask(X86_FEATURE_VME)   | \
+       cpufeat_mask(X86_FEATURE_DE)    | cpufeat_mask(X86_FEATURE_PSE)   | \
+       cpufeat_mask(X86_FEATURE_TSC)   | cpufeat_mask(X86_FEATURE_MSR)   | \
+       cpufeat_mask(X86_FEATURE_PAE)   | cpufeat_mask(X86_FEATURE_MCE)   | \
+       cpufeat_mask(X86_FEATURE_CX8)   | cpufeat_mask(X86_FEATURE_APIC)  | \
+       cpufeat_mask(X86_FEATURE_SEP)   | cpufeat_mask(X86_FEATURE_MTRR)  | \
+       cpufeat_mask(X86_FEATURE_PGE)   | cpufeat_mask(X86_FEATURE_MCA)   | \
+       cpufeat_mask(X86_FEATURE_CMOV)  | cpufeat_mask(X86_FEATURE_PAT)   | \
+       cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_CLFLSH)| \
+       cpufeat_mask(X86_FEATURE_MMX)   | cpufeat_mask(X86_FEATURE_FXSR)  | \
+       cpufeat_mask(X86_FEATURE_XMM)   | cpufeat_mask(X86_FEATURE_XMM2))
+#define AMD_EXTFEATURES_K8_REV_C_ECX  0
+#define AMD_EXTFEATURES_K8_REV_C_EDX  (                                               \
+       cpufeat_mask(X86_FEATURE_FPU)      | cpufeat_mask(X86_FEATURE_VME)   | \
+       cpufeat_mask(X86_FEATURE_DE)       | cpufeat_mask(X86_FEATURE_PSE)   | \
+       cpufeat_mask(X86_FEATURE_TSC)      | cpufeat_mask(X86_FEATURE_MSR)   | \
+       cpufeat_mask(X86_FEATURE_PAE)      | cpufeat_mask(X86_FEATURE_MCE)   | \
+       cpufeat_mask(X86_FEATURE_CX8)      | cpufeat_mask(X86_FEATURE_APIC)  | \
+       cpufeat_mask(X86_FEATURE_SYSCALL)  | cpufeat_mask(X86_FEATURE_MTRR)  | \
+       cpufeat_mask(X86_FEATURE_PGE)      | cpufeat_mask(X86_FEATURE_MCA)   | \
+       cpufeat_mask(X86_FEATURE_CMOV)     | cpufeat_mask(X86_FEATURE_PAT)   | \
+       cpufeat_mask(X86_FEATURE_PSE36)    | cpufeat_mask(X86_FEATURE_NX)    | \
+       cpufeat_mask(X86_FEATURE_MMXEXT)   | cpufeat_mask(X86_FEATURE_MMX)   | \
+       cpufeat_mask(X86_FEATURE_FXSR)     | cpufeat_mask(X86_FEATURE_LM)    | \
+       cpufeat_mask(X86_FEATURE_3DNOWEXT) | cpufeat_mask(X86_FEATURE_3DNOW))
 
 /* Family 0Fh, Revision D */
 #define AMD_FEATURES_K8_REV_D_ECX         AMD_FEATURES_K8_REV_C_ECX
 #define AMD_FEATURES_K8_REV_D_EDX         AMD_FEATURES_K8_REV_C_EDX
 #define AMD_EXTFEATURES_K8_REV_D_ECX     (AMD_EXTFEATURES_K8_REV_C_ECX |\
-       __bit(X86_FEATURE_LAHF_LM))
+       cpufeat_mask(X86_FEATURE_LAHF_LM))
 #define AMD_EXTFEATURES_K8_REV_D_EDX     (AMD_EXTFEATURES_K8_REV_C_EDX |\
-       __bit(X86_FEATURE_FFXSR))
+       cpufeat_mask(X86_FEATURE_FFXSR))
 
 /* Family 0Fh, Revision E */
 #define AMD_FEATURES_K8_REV_E_ECX        (AMD_FEATURES_K8_REV_D_ECX |  \
-       __bit(X86_FEATURE_XMM3))
+       cpufeat_mask(X86_FEATURE_XMM3))
 #define AMD_FEATURES_K8_REV_E_EDX        (AMD_FEATURES_K8_REV_D_EDX |  \
-       __bit(X86_FEATURE_HT))
+       cpufeat_mask(X86_FEATURE_HT))
 #define AMD_EXTFEATURES_K8_REV_E_ECX     (AMD_EXTFEATURES_K8_REV_D_ECX |\
-       __bit(X86_FEATURE_CMP_LEGACY)) 
+       cpufeat_mask(X86_FEATURE_CMP_LEGACY))
 #define AMD_EXTFEATURES_K8_REV_E_EDX      AMD_EXTFEATURES_K8_REV_D_EDX
 
 /* Family 0Fh, Revision F */
 #define AMD_FEATURES_K8_REV_F_ECX        (AMD_FEATURES_K8_REV_E_ECX |  \
-       __bit(X86_FEATURE_CX16))
+       cpufeat_mask(X86_FEATURE_CX16))
 #define AMD_FEATURES_K8_REV_F_EDX         AMD_FEATURES_K8_REV_E_EDX
 #define AMD_EXTFEATURES_K8_REV_F_ECX     (AMD_EXTFEATURES_K8_REV_E_ECX |\
-       __bit(X86_FEATURE_SVM) | __bit(X86_FEATURE_EXTAPIC) |           \
-       __bit(X86_FEATURE_CR8_LEGACY))
+       cpufeat_mask(X86_FEATURE_SVM) | cpufeat_mask(X86_FEATURE_EXTAPIC) | \
+       cpufeat_mask(X86_FEATURE_CR8_LEGACY))
 #define AMD_EXTFEATURES_K8_REV_F_EDX     (AMD_EXTFEATURES_K8_REV_E_EDX |\
-       __bit(X86_FEATURE_RDTSCP))
+       cpufeat_mask(X86_FEATURE_RDTSCP))
 
 /* Family 0Fh, Revision G */
 #define AMD_FEATURES_K8_REV_G_ECX         AMD_FEATURES_K8_REV_F_ECX
 #define AMD_FEATURES_K8_REV_G_EDX         AMD_FEATURES_K8_REV_F_EDX
 #define AMD_EXTFEATURES_K8_REV_G_ECX     (AMD_EXTFEATURES_K8_REV_F_ECX |\
-       __bit(X86_FEATURE_3DNOWPREFETCH))
+       cpufeat_mask(X86_FEATURE_3DNOWPREFETCH))
 #define AMD_EXTFEATURES_K8_REV_G_EDX      AMD_EXTFEATURES_K8_REV_F_EDX
 
 /* Family 10h, Revision B */
 #define AMD_FEATURES_FAM10h_REV_B_ECX    (AMD_FEATURES_K8_REV_F_ECX |  \
-       __bit(X86_FEATURE_POPCNT) | __bit(X86_FEATURE_MWAIT))
+       cpufeat_mask(X86_FEATURE_POPCNT) | cpufeat_mask(X86_FEATURE_MWAIT))
 #define AMD_FEATURES_FAM10h_REV_B_EDX     AMD_FEATURES_K8_REV_F_EDX
 #define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
-       __bit(X86_FEATURE_ABM) | __bit(X86_FEATURE_SSE4A) |             \
-       __bit(X86_FEATURE_MISALIGNSSE) | __bit(X86_FEATURE_OSVW) |      \
-       __bit(X86_FEATURE_IBS))
+       cpufeat_mask(X86_FEATURE_ABM) | cpufeat_mask(X86_FEATURE_SSE4A) | \
+       cpufeat_mask(X86_FEATURE_MISALIGNSSE) | cpufeat_mask(X86_FEATURE_OSVW) |\
+       cpufeat_mask(X86_FEATURE_IBS))
 #define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\
-       __bit(X86_FEATURE_PAGE1GB))
+       cpufeat_mask(X86_FEATURE_PAGE1GB))
 
 /* Family 10h, Revision C */
 #define AMD_FEATURES_FAM10h_REV_C_ECX     AMD_FEATURES_FAM10h_REV_B_ECX
 #define AMD_FEATURES_FAM10h_REV_C_EDX     AMD_FEATURES_FAM10h_REV_B_EDX
 #define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\
-       __bit(X86_FEATURE_SKINIT) | __bit(X86_FEATURE_WDT))
+       cpufeat_mask(X86_FEATURE_SKINIT) | cpufeat_mask(X86_FEATURE_WDT))
 #define AMD_EXTFEATURES_FAM10h_REV_C_EDX  AMD_EXTFEATURES_FAM10h_REV_B_EDX
 
 /* Family 11h, Revision B */
 #define AMD_FEATURES_FAM11h_REV_B_ECX     AMD_FEATURES_K8_REV_G_ECX
 #define AMD_FEATURES_FAM11h_REV_B_EDX     AMD_FEATURES_K8_REV_G_EDX
 #define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\
-       __bit(X86_FEATURE_SKINIT))
+       cpufeat_mask(X86_FEATURE_SKINIT))
 #define AMD_EXTFEATURES_FAM11h_REV_B_EDX  AMD_EXTFEATURES_K8_REV_G_EDX
 
 /* AMD errata checking
index 7c8c2c0bc71b994c3f0aef0524924e05e784364a..d750f03bcbc76d0fe404de83f182d7ae9916ce97 100644 (file)
@@ -137,7 +137,7 @@ void ret_from_intr(void);
 #endif
 
 #define CPUINFO_FEATURE_OFFSET(feature)           \
-        ((((feature) >> 3) & ~3) + CPUINFO_features)
+    (CPUINFO_features + (cpufeat_word(feature) * 4))
 
 #else
 
index e94b04d881fb49fc32af40846a72a729592fe3d5..e823954c49ccdc0312e8be58633903e9275a247c 100644 (file)
@@ -9,6 +9,8 @@
 #define __ASM_I386_CPUFEATURE_H
 #endif
 
+#include <xen/const.h>
+
 #define NCAPINTS       8       /* N 32-bit words worth of info */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
 #define X86_FEATURE_ADX                (7*32+19) /* ADCX, ADOX instructions */
 #define X86_FEATURE_SMAP       (7*32+20) /* Supervisor Mode Access Prevention */
 
+#define cpufeat_word(idx)      ((idx) / 32)
+#define cpufeat_bit(idx)       ((idx) % 32)
+#define cpufeat_mask(idx)      (_AC(1, U) << cpufeat_bit(idx))
+
 #if !defined(__ASSEMBLY__) && !defined(X86_FEATURES_ONLY)
 #include <xen/bitops.h>
 
 #define cpu_has(c, bit)                test_bit(bit, (c)->x86_capability)
 #define boot_cpu_has(bit)      test_bit(bit, boot_cpu_data.x86_capability)
-#define cpufeat_mask(idx)       (1u << ((idx) & 31))
 
 #define CPUID_MWAIT_LEAF                5
 #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1