]> xenbits.xensource.com Git - xen.git/commitdiff
x86/CPU: convert vendor hook invocations to altcall
authorJan Beulich <jbeulich@suse.com>
Mon, 5 Feb 2024 09:48:11 +0000 (10:48 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 9 Apr 2024 15:45:01 +0000 (16:45 +0100)
While not performance critical, these hook invocations still want
converting: This way all pre-filled struct cpu_dev instances can become
__initconst_cf_clobber, thus allowing to eliminate further 8 ENDBR
during the 2nd phase of alternatives patching (besides moving previously
resident data to .init.*).

Since all use sites need touching anyway, take the opportunity and also
address a Misra C:2012 Rule 5.5 violation: Rename the this_cpu static
variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
(cherry picked from commit 660f8a75013c947fbe5358a640032a1f9f1eece5)

xen/arch/x86/cpu/amd.c
xen/arch/x86/cpu/centaur.c
xen/arch/x86/cpu/common.c
xen/arch/x86/cpu/hygon.c
xen/arch/x86/cpu/intel.c
xen/arch/x86/cpu/shanghai.c

index 808cda46bc0624a023a8e3d701eb5853636d9ec7..ab92333673b9dd6f0e399e0db2e23ee74da0d7b6 100644 (file)
@@ -1307,7 +1307,7 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
        amd_log_freq(c);
 }
 
-const struct cpu_dev amd_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber amd_cpu_dev = {
        .c_early_init   = early_init_amd,
        .c_init         = init_amd,
 };
index eac49d78db6244bed5ada2c4942497aa73024717..750168d1e81a1618b1e3a4a0169177d7ff86f4bd 100644 (file)
@@ -54,6 +54,6 @@ static void cf_check init_centaur(struct cpuinfo_x86 *c)
                init_c3(c);
 }
 
-const struct cpu_dev centaur_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber centaur_cpu_dev = {
        .c_init         = init_centaur,
 };
index 61e5e52151d5be17fede6f66d3a8272fe42e6e7e..26eed2ade1e3f3e714e858ecac9c4f3cdf789d32 100644 (file)
@@ -113,10 +113,10 @@ static void cf_check default_init(struct cpuinfo_x86 * c)
        __clear_bit(X86_FEATURE_SEP, c->x86_capability);
 }
 
-static const struct cpu_dev default_cpu = {
+static const struct cpu_dev __initconst_cf_clobber __used default_cpu = {
        .c_init = default_init,
 };
-static const struct cpu_dev *this_cpu = &default_cpu;
+static struct cpu_dev __ro_after_init actual_cpu;
 
 static DEFINE_PER_CPU(uint64_t, msr_misc_features);
 void (* __ro_after_init ctxt_switch_masking)(const struct vcpu *next);
@@ -336,12 +336,13 @@ void __init early_cpu_init(bool verbose)
 
        c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
        switch (c->x86_vendor) {
-       case X86_VENDOR_INTEL:    this_cpu = &intel_cpu_dev;    break;
-       case X86_VENDOR_AMD:      this_cpu = &amd_cpu_dev;      break;
-       case X86_VENDOR_CENTAUR:  this_cpu = &centaur_cpu_dev;  break;
-       case X86_VENDOR_SHANGHAI: this_cpu = &shanghai_cpu_dev; break;
-       case X86_VENDOR_HYGON:    this_cpu = &hygon_cpu_dev;    break;
+       case X86_VENDOR_INTEL:    actual_cpu = intel_cpu_dev;    break;
+       case X86_VENDOR_AMD:      actual_cpu = amd_cpu_dev;      break;
+       case X86_VENDOR_CENTAUR:  actual_cpu = centaur_cpu_dev;  break;
+       case X86_VENDOR_SHANGHAI: actual_cpu = shanghai_cpu_dev; break;
+       case X86_VENDOR_HYGON:    actual_cpu = hygon_cpu_dev;    break;
        default:
+               actual_cpu = default_cpu;
                if (!verbose)
                        break;
                printk(XENLOG_ERR
@@ -448,8 +449,8 @@ static void generic_identify(struct cpuinfo_x86 *c)
        if (c->extended_cpuid_level >= 0x80000021)
                c->x86_capability[FEATURESET_e21a] = cpuid_eax(0x80000021);
 
-       if (this_cpu->c_early_init)
-               this_cpu->c_early_init(c);
+       if (actual_cpu.c_early_init)
+               alternative_vcall(actual_cpu.c_early_init, c);
 
        /* c_early_init() may have adjusted cpuid levels/features.  Reread. */
        c->cpuid_level = cpuid_eax(0);
@@ -546,9 +547,8 @@ void identify_cpu(struct cpuinfo_x86 *c)
         * At the end of this section, c->x86_capability better
         * indicate the features this CPU genuinely supports!
         */
-       if (this_cpu->c_init)
-               this_cpu->c_init(c);
-
+       if (actual_cpu.c_init)
+               alternative_vcall(actual_cpu.c_init, c);
 
        /*
         * The vendor-specific functions might have changed features.  Now
index 42029f214563fac13f4079079d5b7c9cd722abd4..f7508cc8fcb97ad7190bd948a35e584bfd49889d 100644 (file)
@@ -87,7 +87,7 @@ static void cf_check init_hygon(struct cpuinfo_x86 *c)
        amd_log_freq(c);
 }
 
-const struct cpu_dev hygon_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber hygon_cpu_dev = {
        .c_early_init   = early_init_amd,
        .c_init         = init_hygon,
 };
index 19466b89e8af069eddee3f99fbca47268b0a4ff4..deb7b7046497af653da21c6006d7d361ecde8b59 100644 (file)
@@ -603,7 +603,7 @@ static void cf_check init_intel(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_CLWB);
 }
 
-const struct cpu_dev intel_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber intel_cpu_dev = {
        .c_early_init   = early_init_intel,
        .c_init         = init_intel,
 };
index 95ae544f8c54f62b33ebf592270911b749465869..910f2c32f3372fd474023c4240f803c007b39f0f 100644 (file)
@@ -15,6 +15,6 @@ static void cf_check init_shanghai(struct cpuinfo_x86 *c)
     init_intel_cacheinfo(c);
 }
 
-const struct cpu_dev shanghai_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber shanghai_cpu_dev = {
     .c_init     = init_shanghai,
 };