]> xenbits.xensource.com Git - xen.git/commitdiff
x86/CPU: convert vendor hook invocations to altcall
authorJan Beulich <jbeulich@suse.com>
Mon, 5 Feb 2024 09:48:11 +0000 (10:48 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 9 Apr 2024 15:48:19 +0000 (16:48 +0100)
While not performance critical, these hook invocations still want
converting: This way all pre-filled struct cpu_dev instances can become
__initconst_cf_clobber, thus allowing to eliminate further 8 ENDBR
during the 2nd phase of alternatives patching (besides moving previously
resident data to .init.*).

Since all use sites need touching anyway, take the opportunity and also
address a Misra C:2012 Rule 5.5 violation: Rename the this_cpu static
variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
(cherry picked from commit 660f8a75013c947fbe5358a640032a1f9f1eece5)

xen/arch/x86/cpu/amd.c
xen/arch/x86/cpu/centaur.c
xen/arch/x86/cpu/common.c
xen/arch/x86/cpu/hygon.c
xen/arch/x86/cpu/intel.c
xen/arch/x86/cpu/shanghai.c

index d5e9ad75987ed0b1b26833211d0a2e1529d01864..2838725bab98671bc82c4326f78c2d929b4f892e 100644 (file)
@@ -1286,7 +1286,7 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
        amd_log_freq(c);
 }
 
-const struct cpu_dev amd_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber amd_cpu_dev = {
        .c_early_init   = early_init_amd,
        .c_init         = init_amd,
 };
index eac49d78db6244bed5ada2c4942497aa73024717..750168d1e81a1618b1e3a4a0169177d7ff86f4bd 100644 (file)
@@ -54,6 +54,6 @@ static void cf_check init_centaur(struct cpuinfo_x86 *c)
                init_c3(c);
 }
 
-const struct cpu_dev centaur_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber centaur_cpu_dev = {
        .c_init         = init_centaur,
 };
index 60e472da268f878b95674a1259ed47d2577b9e56..88855f5773ee9bdf996fbfcd7941451b38700c37 100644 (file)
@@ -115,10 +115,10 @@ static void cf_check default_init(struct cpuinfo_x86 * c)
        __clear_bit(X86_FEATURE_SEP, c->x86_capability);
 }
 
-static const struct cpu_dev default_cpu = {
+static const struct cpu_dev __initconst_cf_clobber __used default_cpu = {
        .c_init = default_init,
 };
-static const struct cpu_dev *this_cpu = &default_cpu;
+static struct cpu_dev __ro_after_init actual_cpu;
 
 static DEFINE_PER_CPU(uint64_t, msr_misc_features);
 void (* __ro_after_init ctxt_switch_masking)(const struct vcpu *next);
@@ -343,12 +343,13 @@ void __init early_cpu_init(void)
 
        c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
        switch (c->x86_vendor) {
-       case X86_VENDOR_INTEL:    this_cpu = &intel_cpu_dev;    break;
-       case X86_VENDOR_AMD:      this_cpu = &amd_cpu_dev;      break;
-       case X86_VENDOR_CENTAUR:  this_cpu = &centaur_cpu_dev;  break;
-       case X86_VENDOR_SHANGHAI: this_cpu = &shanghai_cpu_dev; break;
-       case X86_VENDOR_HYGON:    this_cpu = &hygon_cpu_dev;    break;
+       case X86_VENDOR_INTEL:    actual_cpu = intel_cpu_dev;    break;
+       case X86_VENDOR_AMD:      actual_cpu = amd_cpu_dev;      break;
+       case X86_VENDOR_CENTAUR:  actual_cpu = centaur_cpu_dev;  break;
+       case X86_VENDOR_SHANGHAI: actual_cpu = shanghai_cpu_dev; break;
+       case X86_VENDOR_HYGON:    actual_cpu = hygon_cpu_dev;    break;
        default:
+               actual_cpu = default_cpu;
                printk(XENLOG_ERR
                       "Unrecognised or unsupported CPU vendor '%.12s'\n",
                       c->x86_vendor_id);
@@ -434,8 +435,8 @@ static void generic_identify(struct cpuinfo_x86 *c)
        c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
        c->phys_proc_id = c->apicid;
 
-       if (this_cpu->c_early_init)
-               this_cpu->c_early_init(c);
+       if (actual_cpu.c_early_init)
+               alternative_vcall(actual_cpu.c_early_init, c);
 
        /* c_early_init() may have adjusted cpuid levels/features.  Reread. */
        c->cpuid_level = cpuid_eax(0);
@@ -540,9 +541,8 @@ void identify_cpu(struct cpuinfo_x86 *c)
         * At the end of this section, c->x86_capability better
         * indicate the features this CPU genuinely supports!
         */
-       if (this_cpu->c_init)
-               this_cpu->c_init(c);
-
+       if (actual_cpu.c_init)
+               alternative_vcall(actual_cpu.c_init, c);
 
        if (c == &boot_cpu_data && !opt_pku)
                setup_clear_cpu_cap(X86_FEATURE_PKU);
index 361eb6fd411b2a35a11aed7d4f44d93791b2a4fe..0c7c97ebb74c90dc9deac8184910e2d264d4027c 100644 (file)
@@ -88,7 +88,7 @@ static void cf_check init_hygon(struct cpuinfo_x86 *c)
        amd_log_freq(c);
 }
 
-const struct cpu_dev hygon_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber hygon_cpu_dev = {
        .c_early_init   = early_init_amd,
        .c_init         = init_hygon,
 };
index 532e845f66838473db9e93af7ef51271d61fee3b..2d439e0bd2fd54d4ca6c4b52554fccde46610b0a 100644 (file)
@@ -598,7 +598,7 @@ static void cf_check init_intel(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_CLWB);
 }
 
-const struct cpu_dev intel_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber intel_cpu_dev = {
        .c_early_init   = early_init_intel,
        .c_init         = init_intel,
 };
index 95ae544f8c54f62b33ebf592270911b749465869..910f2c32f3372fd474023c4240f803c007b39f0f 100644 (file)
@@ -15,6 +15,6 @@ static void cf_check init_shanghai(struct cpuinfo_x86 *c)
     init_intel_cacheinfo(c);
 }
 
-const struct cpu_dev shanghai_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber shanghai_cpu_dev = {
     .c_init     = init_shanghai,
 };