static int local_apic_timer_c2_ok __read_mostly = 0;
boolean_param("lapic_timer_c2_ok", local_apic_timer_c2_ok);
-static struct acpi_processor_power processor_powers[NR_CPUS];
+static struct acpi_processor_power *__read_mostly processor_powers[NR_CPUS];
static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power)
{
static void dump_cx(unsigned char key)
{
- for( int i = 0; i < num_online_cpus(); i++ )
- print_acpi_power(i, &processor_powers[i]);
+ unsigned int cpu;
+
+ for_each_online_cpu ( cpu )
+ if (processor_powers[cpu])
+ print_acpi_power(cpu, processor_powers[cpu]);
}
static int __init cpu_idle_key_init(void)
static void acpi_processor_idle(void)
{
- struct acpi_processor_power *power = NULL;
+ struct acpi_processor_power *power = processor_powers[smp_processor_id()];
struct acpi_processor_cx *cx = NULL;
int next_state;
int sleep_ticks = 0;
u32 t1, t2 = 0;
- power = &processor_powers[smp_processor_id()];
-
/*
* Interrupts must be disabled during bus mastering calculations and
* for C2/C3 transitions.
return;
}
- next_state = cpuidle_current_governor->select(power);
+ next_state = power ? cpuidle_current_governor->select(power) : -1;
if ( next_state > 0 )
{
cx = &power->states[next_state];
return -EFAULT;
}
- acpi_power = &processor_powers[cpu_id];
+ acpi_power = processor_powers[cpu_id];
+ if ( !acpi_power )
+ {
+ acpi_power = xmalloc(struct acpi_processor_power);
+ if ( !acpi_power )
+ return -ENOMEM;
+ memset(acpi_power, 0, sizeof(*acpi_power));
+ processor_powers[cpu_id] = acpi_power;
+ }
init_cx_pminfo(acpi_power);
uint32_t pmstat_get_cx_nr(uint32_t cpuid)
{
- return processor_powers[cpuid].count;
+ return processor_powers[cpuid] ? processor_powers[cpuid]->count : 0;
}
int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
{
- struct acpi_processor_power *power = &processor_powers[cpuid];
+ const struct acpi_processor_power *power = processor_powers[cpuid];
struct vcpu *v = idle_vcpu[cpuid];
uint64_t usage;
int i;
+ if ( power == NULL )
+ {
+ stat->last = 0;
+ stat->nr = 0;
+ stat->idle_time = 0;
+ return 0;
+ }
+
stat->last = (power->last_state) ?
(int)(power->last_state - &power->states[0]) : 0;
- stat->nr = processor_powers[cpuid].count;
+ stat->nr = power->count;
stat->idle_time = v->runstate.time[RUNSTATE_running];
if ( v->is_running )
stat->idle_time += NOW() - v->runstate.state_entry_time;
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = drv_data[policy->cpu];
- struct processor_performance *perf = &processor_pminfo[policy->cpu].perf;
+ struct acpi_cpufreq_data *data;
+ struct processor_performance *perf;
- if (!policy || !data)
+ if (!policy || !(data = drv_data[policy->cpu]) ||
+ !processor_pminfo[policy->cpu])
return -EINVAL;
+ perf = &processor_pminfo[policy->cpu]->perf;
+
cpufreq_verify_within_limits(policy, 0,
perf->states[perf->platform_limit].core_frequency * 1000);
drv_data[cpu] = data;
- data->acpi_data = &processor_pminfo[cpu].perf;
+ data->acpi_data = &processor_pminfo[cpu]->perf;
perf = data->acpi_data;
policy->shared_type = perf->shared_type;
int cpufreq_limit_change(unsigned int cpu)
{
- struct processor_performance *perf = &processor_pminfo[cpu].perf;
+ struct processor_performance *perf = &processor_pminfo[cpu]->perf;
struct cpufreq_policy *data = cpufreq_cpu_policy[cpu];
struct cpufreq_policy policy;
- if (!cpu_online(cpu) || !data)
+ if (!cpu_online(cpu) || !data || !processor_pminfo[cpu])
return -ENODEV;
if ((perf->platform_limit < 0) ||
unsigned int j;
struct cpufreq_policy new_policy;
struct cpufreq_policy *policy;
- struct processor_performance *perf = &processor_pminfo[cpu].perf;
+ struct processor_performance *perf = &processor_pminfo[cpu]->perf;
/* to protect the case when Px was not controlled by xen */
- if (!(perf->init & XEN_PX_INIT))
+ if (!processor_pminfo[cpu] || !(perf->init & XEN_PX_INIT))
return 0;
if (cpu_is_offline(cpu) || cpufreq_cpu_policy[cpu])
{
unsigned int dom;
struct cpufreq_policy *policy;
- struct processor_performance *perf = &processor_pminfo[cpu].perf;
+ struct processor_performance *perf = &processor_pminfo[cpu]->perf;
/* to protect the case when Px was not controlled by xen */
- if (!(perf->init & XEN_PX_INIT))
+ if (!processor_pminfo[cpu] || !(perf->init & XEN_PX_INIT))
return 0;
if (cpu_is_offline(cpu) || !cpufreq_cpu_policy[cpu])
#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
-extern struct processor_pminfo processor_pminfo[NR_CPUS];
-extern struct cpufreq_policy *cpufreq_cpu_policy[NR_CPUS];
-
struct powernow_cpufreq_data {
struct processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
drv_data[cpu] = data;
- data->acpi_data = &processor_pminfo[cpu].perf;
+ data->acpi_data = &processor_pminfo[cpu]->perf;
perf = data->acpi_data;
policy->shared_type = perf->shared_type;
}
if (ret)
return ret;
- if (max_dom < processor_pminfo[i].perf.domain_info.domain)
- max_dom = processor_pminfo[i].perf.domain_info.domain;
+ if (max_dom < processor_pminfo[i]->perf.domain_info.domain)
+ max_dom = processor_pminfo[i]->perf.domain_info.domain;
}
max_dom++;
/* get cpumask of each psd domain */
for_each_online_cpu(i) {
- __set_bit(processor_pminfo[i].perf.domain_info.domain, dom_mask);
- cpu_set(i, pt[processor_pminfo[i].perf.domain_info.domain]);
+ __set_bit(processor_pminfo[i]->perf.domain_info.domain, dom_mask);
+ cpu_set(i, pt[processor_pminfo[i]->perf.domain_info.domain]);
}
for_each_online_cpu(i)
- processor_pminfo[i].perf.shared_cpu_map =
- pt[processor_pminfo[i].perf.domain_info.domain];
+ processor_pminfo[i]->perf.shared_cpu_map =
+ pt[processor_pminfo[i]->perf.domain_info.domain];
cpufreq_driver = &powernow_cpufreq_driver;
#include <public/sysctl.h>
struct cpufreq_driver *cpufreq_driver;
-struct processor_pminfo processor_pminfo[NR_CPUS];
-struct cpufreq_policy *cpufreq_cpu_policy[NR_CPUS];
+struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
+struct cpufreq_policy *__read_mostly cpufreq_cpu_policy[NR_CPUS];
/*********************************************************************
* Px STATISTIC INFO *
now = NOW();
for_each_cpu_mask(i, cpumask) {
- struct pm_px *pxpt = &px_statistic_data[i];
- uint32_t statnum = processor_pminfo[i].perf.state_count;
+ struct pm_px *pxpt = px_statistic_data[i];
+ struct processor_pminfo *pmpt = processor_pminfo[i];
uint64_t total_idle_ns;
uint64_t tmp_idle_ns;
+ if ( !pxpt || !pmpt )
+ continue;
+
total_idle_ns = get_cpu_idle_time(i);
tmp_idle_ns = total_idle_ns - pxpt->prev_idle_wall;
pxpt->u.pt[from].residency += now - pxpt->prev_state_wall;
pxpt->u.pt[from].residency -= tmp_idle_ns;
- (*(pxpt->u.trans_pt + from*statnum + to))++;
+ (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
pxpt->prev_state_wall = now;
pxpt->prev_idle_wall = total_idle_ns;
int px_statistic_init(unsigned int cpuid)
{
uint32_t i, count;
- struct pm_px *pxpt = &px_statistic_data[cpuid];
- struct processor_pminfo *pmpt = &processor_pminfo[cpuid];
+ struct pm_px *pxpt = px_statistic_data[cpuid];
+ const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
count = pmpt->perf.state_count;
+ if ( !pmpt )
+ return -EINVAL;
+
+ if ( !pxpt )
+ {
+ pxpt = xmalloc(struct pm_px);
+ if ( !pxpt )
+ return -ENOMEM;
+ memset(pxpt, 0, sizeof(*pxpt));
+ px_statistic_data[cpuid] = pxpt;
+ }
+
pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count);
if (!pxpt->u.trans_pt)
return -ENOMEM;
void px_statistic_exit(unsigned int cpuid)
{
- struct pm_px *pxpt = &px_statistic_data[cpuid];
+ struct pm_px *pxpt = px_statistic_data[cpuid];
+ if (!pxpt)
+ return;
xfree(pxpt->u.trans_pt);
xfree(pxpt->u.pt);
memset(pxpt, 0, sizeof(struct pm_px));
void px_statistic_reset(unsigned int cpuid)
{
uint32_t i, j, count;
- struct pm_px *pxpt = &px_statistic_data[cpuid];
+ struct pm_px *pxpt = px_statistic_data[cpuid];
+ const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
- count = processor_pminfo[cpuid].perf.state_count;
+ if ( !pxpt || !pmpt )
+ return;
+
+ count = pmpt->perf.state_count;
for (i=0; i < count; i++) {
pxpt->u.pt[i].residency = 0;
#include <public/sysctl.h>
#include <acpi/cpufreq/cpufreq.h>
-struct pm_px px_statistic_data[NR_CPUS];
+struct pm_px *__read_mostly px_statistic_data[NR_CPUS];
extern uint32_t pmstat_get_cx_nr(uint32_t cpuid);
extern int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat);
int do_get_pm_info(struct xen_sysctl_get_pmstat *op)
{
int ret = 0;
- struct pm_px *pxpt = &px_statistic_data[op->cpuid];
- struct processor_pminfo *pmpt = &processor_pminfo[op->cpuid];
+ const struct processor_pminfo *pmpt = processor_pminfo[op->cpuid];
/* to protect the case when Px was not controlled by xen */
- if ( (!(pmpt->perf.init & XEN_PX_INIT)) &&
+ if ( (!pmpt || !(pmpt->perf.init & XEN_PX_INIT)) &&
(op->type & PMSTAT_CATEGORY_MASK) == PMSTAT_PX )
return -EINVAL;
- if ( !cpu_online(op->cpuid) )
+ if ( op->cpuid >= NR_CPUS || !cpu_online(op->cpuid) )
return -EINVAL;
switch( op->type )
uint64_t now, ct;
uint64_t total_idle_ns;
uint64_t tmp_idle_ns;
+ struct pm_px *pxpt = px_statistic_data[op->cpuid];
+
+ if ( !pxpt )
+ return -ENODATA;
total_idle_ns = get_cpu_idle_time(op->cpuid);
tmp_idle_ns = total_idle_ns - pxpt->prev_idle_wall;
ret = -EINVAL;
break;
}
- pmpt = &processor_pminfo[cpuid];
- pxpt = &processor_pminfo[cpuid].perf;
+ pmpt = processor_pminfo[cpuid];
+ if ( !pmpt )
+ {
+ pmpt = xmalloc(struct processor_pminfo);
+ if ( !pmpt )
+ {
+ ret = -ENOMEM;
+ break;
+ }
+ memset(pmpt, 0, sizeof(*pmpt));
+ processor_pminfo[cpuid] = pmpt;
+ }
+ pxpt = &pmpt->perf;
pmpt->acpi_id = xenpmpt->id;
pmpt->id = cpuid;
struct processor_performance perf;
};
-extern struct processor_pminfo processor_pminfo[NR_CPUS];
+extern struct processor_pminfo *processor_pminfo[NR_CPUS];
struct px_stat {
uint8_t total; /* total Px states */
uint64_t prev_idle_wall;
};
-extern struct pm_px px_statistic_data[NR_CPUS];
+extern struct pm_px *px_statistic_data[NR_CPUS];
#endif /* __XEN_PROCESSOR_PM_H__ */