]> xenbits.xensource.com Git - people/aperard/xen-unstable.git/commitdiff
x86: Refactor conditional guard in probe_cpuid_faulting()
authorAlejandro Vallejo <alejandro.vallejo@cloud.com>
Tue, 16 May 2023 15:18:31 +0000 (17:18 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 16 May 2023 15:18:31 +0000 (17:18 +0200)
Move vendor-specific checks to the vendor-specific callers. While at it
move the synth cap setters to the callers too, as it's needed for a later
patch and it's not a functional change either.

No functional change.

Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/cpu/amd.c
xen/arch/x86/cpu/common.c
xen/arch/x86/cpu/intel.c

index 9a1a3858edd4998509f8f9662b18f622f6ab7563..98fb80ee881886d3ab47a54499be10a58f00ae1b 100644 (file)
@@ -271,8 +271,19 @@ static void __init noinline amd_init_levelling(void)
 {
        const struct cpuidmask *m = NULL;
 
-       if (probe_cpuid_faulting())
+       /*
+        * If there's support for CpuidUserDis or CPUID faulting then
+        * we can skip levelling because CPUID accesses are trapped anyway.
+        *
+        * CPUID faulting is an Intel feature analogous to CpuidUserDis, so
+        * that can only be present when Xen is itself virtualized (because
+        * it can be emulated)
+        */
+       if (cpu_has_hypervisor && probe_cpuid_faulting()) {
+               expected_levelling_cap |= LCAP_faulting;
+               levelling_caps |= LCAP_faulting;
                return;
+       }
 
        probe_masking_msrs();
 
index edc4db1335ebc89f4829e54e1a802995449ed979..52646f7dfb782d694281b3d47fb9e4221c56439d 100644 (file)
@@ -131,17 +131,6 @@ bool __init probe_cpuid_faulting(void)
        uint64_t val;
        int rc;
 
-       /*
-        * Don't bother looking for CPUID faulting if we aren't virtualised on
-        * AMD or Hygon hardware - it won't be present.  Likewise for Fam0F
-        * Intel hardware.
-        */
-       if (((boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
-            ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
-             boot_cpu_data.x86 == 0xf)) &&
-           !cpu_has_hypervisor)
-               return false;
-
        if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
                raw_cpu_policy.platform_info.cpuid_faulting =
                        val & MSR_PLATFORM_INFO_CPUID_FAULTING;
@@ -155,8 +144,6 @@ bool __init probe_cpuid_faulting(void)
                return false;
        }
 
-       expected_levelling_cap |= LCAP_faulting;
-       levelling_caps |=  LCAP_faulting;
        setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
 
        return true;
index 71fc1a1e189947a1ed9a8bc296fcc1fa6fece3ef..168cd58f36134b0891af233b31be10892864d55e 100644 (file)
@@ -226,8 +226,18 @@ static void cf_check intel_ctxt_switch_masking(const struct vcpu *next)
  */
 static void __init noinline intel_init_levelling(void)
 {
-       if (probe_cpuid_faulting())
+       /*
+        * Intel Fam0f is old enough that probing for CPUID faulting support
+        * introduces spurious #GP(0) when the appropriate MSRs are read,
+        * so skip it altogether. In the case where Xen is virtualized these
+        * MSRs may be emulated though, so we allow it in that case.
+        */
+       if ((boot_cpu_data.x86 != 0xf || cpu_has_hypervisor) &&
+           probe_cpuid_faulting()) {
+               expected_levelling_cap |= LCAP_faulting;
+               levelling_caps |= LCAP_faulting;
                return;
+       }
 
        probe_masking_msrs();