vendor_override = -1;
}
-uint32_t pmstat_get_cx_nr(uint32_t cpuid)
+uint32_t pmstat_get_cx_nr(unsigned int cpu)
{
- return processor_powers[cpuid] ? processor_powers[cpuid]->count : 0;
+ return processor_powers[cpu] ? processor_powers[cpu]->count : 0;
}
-int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
+int pmstat_get_cx_stat(unsigned int cpu, struct pm_cx_stat *stat)
{
- struct acpi_processor_power *power = processor_powers[cpuid];
+ struct acpi_processor_power *power = processor_powers[cpu];
uint64_t idle_usage = 0, idle_res = 0;
uint64_t last_state_update_tick, current_stime, current_tick;
uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = { 0 };
return 0;
}
- stat->idle_time = get_cpu_idle_time(cpuid);
+ stat->idle_time = get_cpu_idle_time(cpu);
nr = min(stat->nr, power->count);
/* mimic the stat when detail info hasn't been registered by dom0 */
idle_res += res[i];
}
- get_hw_residencies(cpuid, &hw_res);
+ get_hw_residencies(cpu, &hw_res);
#define PUT_xC(what, n) do { \
if ( stat->nr_##what >= n && \
return 0;
}
-int pmstat_reset_cx_stat(uint32_t cpuid)
+int pmstat_reset_cx_stat(unsigned int cpu)
{
return 0;
}
static bool __read_mostly acpi_pstate_strict;
boolean_param("acpi_pstate_strict", acpi_pstate_strict);
-static int check_est_cpu(unsigned int cpuid)
-{
- struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
-
- if (cpu->x86_vendor != X86_VENDOR_INTEL ||
- !cpu_has(cpu, X86_FEATURE_EIST))
- return 0;
-
- return 1;
-}
-
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
{
struct processor_performance *perf;
if (cpufreq_verbose)
printk("xen_pminfo: @acpi_cpufreq_cpu_init,"
"HARDWARE addr space\n");
- if (!check_est_cpu(cpu)) {
+ if (!cpu_has(c, X86_FEATURE_EIST)) {
result = -ENODEV;
goto err_unreg;
}
}
__initcall(cpufreq_driver_late_init);
-int cpufreq_cpu_init(unsigned int cpuid)
+int cpufreq_cpu_init(unsigned int cpu)
{
- int ret;
-
/* Currently we only handle Intel, AMD and Hygon processor */
if ( boot_cpu_data.x86_vendor &
(X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
- ret = cpufreq_add_cpu(cpuid);
- else
- ret = -EFAULT;
- return ret;
+ return cpufreq_add_cpu(cpu);
+
+ return -EOPNOTSUPP;
}
}
}
-static int cf_check hwp_cpufreq_update(int cpuid, struct cpufreq_policy *policy)
+static int cf_check hwp_cpufreq_update(unsigned int cpu, struct cpufreq_policy *policy)
{
- on_selected_cpus(cpumask_of(cpuid), hwp_set_misc_turbo, policy, 1);
+ on_selected_cpus(cpumask_of(cpu), hwp_set_misc_turbo, policy, 1);
- return per_cpu(hwp_drv_data, cpuid)->ret;
+ return per_cpu(hwp_drv_data, cpu)->ret;
}
static const struct cpufreq_driver __initconst_cf_clobber
}
static int cf_check powernow_cpufreq_update(
- int cpuid, struct cpufreq_policy *policy)
+ unsigned int cpu, struct cpufreq_policy *policy)
{
- if (!cpumask_test_cpu(cpuid, &cpu_online_map))
+ if ( !cpu_online(cpu) )
return -EINVAL;
- on_selected_cpus(cpumask_of(cpuid), update_cpb, policy, 1);
+ on_selected_cpus(cpumask_of(cpu), update_cpb, policy, 1);
return 0;
}
int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *perf)
{
- int ret=0, cpuid;
+ int ret = 0, cpu;
struct processor_pminfo *pmpt;
struct processor_performance *pxpt;
- cpuid = get_cpu_id(acpi_id);
- if ( cpuid < 0 || !perf )
+ cpu = get_cpu_id(acpi_id);
+ if ( cpu < 0 || !perf )
{
ret = -EINVAL;
goto out;
}
if ( cpufreq_verbose )
- printk("Set CPU acpi_id(%d) cpuid(%d) Px State info:\n",
- acpi_id, cpuid);
+ printk("Set CPU acpi_id(%d) cpu(%d) Px State info:\n",
+ acpi_id, cpu);
- pmpt = processor_pminfo[cpuid];
+ pmpt = processor_pminfo[cpu];
if ( !pmpt )
{
pmpt = xzalloc(struct processor_pminfo);
ret = -ENOMEM;
goto out;
}
- processor_pminfo[cpuid] = pmpt;
+ processor_pminfo[cpu] = pmpt;
}
pxpt = &pmpt->perf;
pmpt->acpi_id = acpi_id;
- pmpt->id = cpuid;
+ pmpt->id = cpu;
if ( perf->flags & XEN_PX_PCT )
{
if ( pxpt->init == XEN_PX_INIT )
{
- ret = cpufreq_limit_change(cpuid);
+ ret = cpufreq_limit_change(cpu);
goto out;
}
}
{
pxpt->init = XEN_PX_INIT;
- ret = cpufreq_cpu_init(cpuid);
+ ret = cpufreq_cpu_init(cpu);
goto out;
}
spin_unlock(cpufreq_statistic_lock);
}
-int cpufreq_statistic_init(unsigned int cpuid)
+int cpufreq_statistic_init(unsigned int cpu)
{
uint32_t i, count;
struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
- spinlock_t *cpufreq_statistic_lock =
- &per_cpu(cpufreq_statistic_lock, cpuid);
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
spin_lock_init(cpufreq_statistic_lock);
spin_lock(cpufreq_statistic_lock);
- pxpt = per_cpu(cpufreq_statistic_data, cpuid);
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
if ( pxpt ) {
spin_unlock(cpufreq_statistic_lock);
return 0;
spin_unlock(cpufreq_statistic_lock);
return -ENOMEM;
}
- per_cpu(cpufreq_statistic_data, cpuid) = pxpt;
+ per_cpu(cpufreq_statistic_data, cpu) = pxpt;
pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
if (!pxpt->u.trans_pt) {
pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
spin_unlock(cpufreq_statistic_lock);
return 0;
}
-void cpufreq_statistic_exit(unsigned int cpuid)
+void cpufreq_statistic_exit(unsigned int cpu)
{
struct pm_px *pxpt;
- spinlock_t *cpufreq_statistic_lock =
- &per_cpu(cpufreq_statistic_lock, cpuid);
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
spin_lock(cpufreq_statistic_lock);
- pxpt = per_cpu(cpufreq_statistic_data, cpuid);
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
if (!pxpt) {
spin_unlock(cpufreq_statistic_lock);
return;
xfree(pxpt->u.trans_pt);
xfree(pxpt->u.pt);
xfree(pxpt);
- per_cpu(cpufreq_statistic_data, cpuid) = NULL;
+ per_cpu(cpufreq_statistic_data, cpu) = NULL;
spin_unlock(cpufreq_statistic_lock);
}
-void cpufreq_statistic_reset(unsigned int cpuid)
+void cpufreq_statistic_reset(unsigned int cpu)
{
uint32_t i, j, count;
struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
- spinlock_t *cpufreq_statistic_lock =
- &per_cpu(cpufreq_statistic_lock, cpuid);
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
spin_lock(cpufreq_statistic_lock);
- pxpt = per_cpu(cpufreq_statistic_data, cpuid);
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
spin_unlock(cpufreq_statistic_lock);
return;
}
pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
spin_unlock(cpufreq_statistic_lock);
}
return policy->cur;
}
-int cpufreq_update_turbo(int cpuid, int new_state)
+int cpufreq_update_turbo(unsigned int cpu, int new_state)
{
struct cpufreq_policy *policy;
int curr_state;
new_state != CPUFREQ_TURBO_DISABLED)
return -EINVAL;
- policy = per_cpu(cpufreq_cpu_policy, cpuid);
+ policy = per_cpu(cpufreq_cpu_policy, cpu);
if (!policy)
return -EACCES;
policy->turbo = new_state;
if (cpufreq_driver.update)
{
- ret = alternative_call(cpufreq_driver.update, cpuid, policy);
+ ret = alternative_call(cpufreq_driver.update, cpu, policy);
if (ret)
policy->turbo = curr_state;
}
}
-int cpufreq_get_turbo_status(int cpuid)
+int cpufreq_get_turbo_status(unsigned int cpu)
{
struct cpufreq_policy *policy;
- policy = per_cpu(cpufreq_cpu_policy, cpuid);
+ policy = per_cpu(cpufreq_cpu_policy, cpu);
return policy && policy->turbo == CPUFREQ_TURBO_ENABLED;
}
#define CPUFREQ_TURBO_UNSUPPORTED 0
#define CPUFREQ_TURBO_ENABLED 1
-extern int cpufreq_update_turbo(int cpuid, int new_state);
-extern int cpufreq_get_turbo_status(int cpuid);
+int cpufreq_update_turbo(unsigned int cpu, int new_state);
+int cpufreq_get_turbo_status(unsigned int cpu);
static inline int
__cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
int (*init)(struct cpufreq_policy *policy);
int (*verify)(struct cpufreq_policy *policy);
int (*setpolicy)(struct cpufreq_policy *policy);
- int (*update)(int cpuid, struct cpufreq_policy *policy);
+ int (*update)(unsigned int cpu, struct cpufreq_policy *policy);
int (*target)(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag);
void cpufreq_residency_update(unsigned int cpu, uint8_t state);
void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to);
-int cpufreq_statistic_init(unsigned int cpuid);
-void cpufreq_statistic_exit(unsigned int cpuid);
-void cpufreq_statistic_reset(unsigned int cpuid);
+int cpufreq_statistic_init(unsigned int cpu);
+void cpufreq_statistic_exit(unsigned int cpu);
+void cpufreq_statistic_reset(unsigned int cpu);
int cpufreq_limit_change(unsigned int cpu);
DECLARE_PER_CPU(struct pm_px *, cpufreq_statistic_data);
-int cpufreq_cpu_init(unsigned int cpuid);
+int cpufreq_cpu_init(unsigned int cpu);
#endif /* __XEN_PROCESSOR_PM_H__ */
int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *perf);
long set_cx_pminfo(uint32_t acpi_id, struct xen_processor_power *power);
-uint32_t pmstat_get_cx_nr(uint32_t cpuid);
-int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat);
-int pmstat_reset_cx_stat(uint32_t cpuid);
+uint32_t pmstat_get_cx_nr(unsigned int cpu);
+int pmstat_get_cx_stat(unsigned int cpu, struct pm_cx_stat *stat);
+int pmstat_reset_cx_stat(unsigned int cpu);
int do_get_pm_info(struct xen_sysctl_get_pmstat *op);
int do_pm_op(struct xen_sysctl_pm_op *op);