Even functions used on infrequently executed paths want converting: This
way all pre-filled struct cpufreq_driver instances can become
__initconst_cf_clobber, thus allowing to eliminate another 15 ENDBR
during the 2nd phase of alternatives patching.
For acpi-cpufreq's optionally populated .get hook make sure alternatives
patching can actually see the pointer. See also the code comment.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
(cherry picked from commit
467ae515caee491e9b6ae1da8b9b98d094955822)
return 0;
}
-static const struct cpufreq_driver __initconstrel acpi_cpufreq_driver = {
+static const struct cpufreq_driver __initconst_cf_clobber
+acpi_cpufreq_driver = {
.name = "acpi-cpufreq",
.verify = acpi_cpufreq_verify,
.target = acpi_cpufreq_target,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
+ .get = get_cur_freq_on_cpu,
};
static int __init cf_check cpufreq_driver_init(void)
}
presmp_initcall(cpufreq_driver_init);
+static int __init cf_check cpufreq_driver_late_init(void)
+{
+ /*
+ * While acpi_cpufreq_driver wants to unconditionally have all hooks
+ * populated for __initconst_cf_clobber to have as much of an effect as
+ * possible, zap the .get hook here (but not in cpufreq_driver_init()),
+ * until acpi_cpufreq_cpu_init() knows whether it's wanted / needed.
+ */
+ cpufreq_driver.get = NULL;
+ return 0;
+}
+__initcall(cpufreq_driver_late_init);
+
int cpufreq_cpu_init(unsigned int cpuid)
{
int ret;
return 0;
}
-static const struct cpufreq_driver __initconstrel powernow_cpufreq_driver = {
+static const struct cpufreq_driver __initconst_cf_clobber
+powernow_cpufreq_driver = {
.name = "powernow",
.verify = powernow_cpufreq_verify,
.target = powernow_cpufreq_target,
return ret;
op->u.get_para.cpuinfo_cur_freq =
- cpufreq_driver.get ? cpufreq_driver.get(op->cpuid) : policy->cur;
+ cpufreq_driver.get ? alternative_call(cpufreq_driver.get, op->cpuid)
+ : policy->cur;
op->u.get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq;
op->u.get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq;
op->u.get_para.scaling_cur_freq = policy->cur;
policy->cpu = cpu;
per_cpu(cpufreq_cpu_policy, cpu) = policy;
- ret = cpufreq_driver.init(policy);
+ ret = alternative_call(cpufreq_driver.init, policy);
if (ret) {
free_cpumask_var(policy->cpus);
xfree(policy);
cpumask_clear_cpu(cpu, cpufreq_dom->map);
if (cpumask_empty(policy->cpus)) {
- cpufreq_driver.exit(policy);
+ alternative_call(cpufreq_driver.exit, policy);
free_cpumask_var(policy->cpus);
xfree(policy);
}
cpumask_clear_cpu(cpu, cpufreq_dom->map);
if (cpumask_empty(policy->cpus)) {
- cpufreq_driver.exit(policy);
+ alternative_call(cpufreq_driver.exit, policy);
free_cpumask_var(policy->cpus);
xfree(policy);
}
policy->turbo = new_state;
if (cpufreq_driver.update)
{
- ret = cpufreq_driver.update(cpuid, policy);
+ ret = alternative_call(cpufreq_driver.update, cpuid, policy);
if (ret)
policy->turbo = curr_state;
}
return -EINVAL;
/* verify the cpu speed can be set within this limit */
- ret = cpufreq_driver.verify(policy);
+ ret = alternative_call(cpufreq_driver.verify, policy);
if (ret)
return ret;
data->max = policy->max;
data->limits = policy->limits;
if (cpufreq_driver.setpolicy)
- return cpufreq_driver.setpolicy(data);
+ return alternative_call(cpufreq_driver.setpolicy, data);
if (policy->governor != data->governor) {
/* save old, working values */