ia64/xen-unstable

changeset 18740:2c7665f04038

cpufreq: domain structure update from array to linked list

Current cpufreq domain is an array cpufreq_dom_map[NR_CPUS].
However, domain number may be sparse or bigger than NR_CPUS.

Signed-off-by: Jinsong Liu <jinsong.liu@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 29 10:14:13 2008 +0000 (2008-10-29)
parents 19549b9766fd
children bec755616e8e
files xen/drivers/cpufreq/cpufreq.c
line diff
     1.1 --- a/xen/drivers/cpufreq/cpufreq.c	Tue Oct 28 11:25:20 2008 +0000
     1.2 +++ b/xen/drivers/cpufreq/cpufreq.c	Wed Oct 29 10:14:13 2008 +0000
     1.3 @@ -31,6 +31,7 @@
     1.4  #include <xen/errno.h>
     1.5  #include <xen/delay.h>
     1.6  #include <xen/cpumask.h>
     1.7 +#include <xen/list.h>
     1.8  #include <xen/sched.h>
     1.9  #include <xen/timer.h>
    1.10  #include <xen/xmalloc.h>
    1.11 @@ -44,8 +45,12 @@
    1.12  #include <acpi/acpi.h>
    1.13  #include <acpi/cpufreq/cpufreq.h>
    1.14  
    1.15 -/* TODO: change to link list later as domain number may be sparse */
    1.16 -static cpumask_t cpufreq_dom_map[NR_CPUS];
    1.17 +struct cpufreq_dom {
    1.18 +    unsigned int	dom;
    1.19 +    cpumask_t		map;
    1.20 +    struct list_head	node;
    1.21 +};
    1.22 +static LIST_HEAD(cpufreq_dom_list_head);
    1.23  
    1.24  int cpufreq_limit_change(unsigned int cpu)
    1.25  {
    1.26 @@ -72,48 +77,71 @@ int cpufreq_add_cpu(unsigned int cpu)
    1.27  {
    1.28      int ret = 0;
    1.29      unsigned int firstcpu;
    1.30 -    unsigned int dom;
    1.31 +    unsigned int dom, domexist = 0;
    1.32      unsigned int j;
    1.33 +    struct list_head *pos;
    1.34 +    struct cpufreq_dom *cpufreq_dom;
    1.35      struct cpufreq_policy new_policy;
    1.36      struct cpufreq_policy *policy;
    1.37      struct processor_performance *perf = &processor_pminfo[cpu]->perf;
    1.38  
    1.39      /* to protect the case when Px was not controlled by xen */
    1.40 -    if (!processor_pminfo[cpu] || !(perf->init & XEN_PX_INIT))
    1.41 -        return 0;
    1.42 +    if (!processor_pminfo[cpu]      ||
    1.43 +        !(perf->init & XEN_PX_INIT) ||
    1.44 +        !cpu_online(cpu))
    1.45 +        return -EINVAL;
    1.46  
    1.47 -    if (!cpu_online(cpu) || cpufreq_cpu_policy[cpu])
    1.48 -        return -EINVAL;
    1.49 +    if (cpufreq_cpu_policy[cpu])
    1.50 +        return 0;
    1.51  
    1.52      ret = cpufreq_statistic_init(cpu);
    1.53      if (ret)
    1.54          return ret;
    1.55  
    1.56      dom = perf->domain_info.domain;
    1.57 -    if (cpus_weight(cpufreq_dom_map[dom])) {
    1.58 +
    1.59 +    list_for_each(pos, &cpufreq_dom_list_head) {
    1.60 +        cpufreq_dom = list_entry(pos, struct cpufreq_dom, node);
    1.61 +        if (dom == cpufreq_dom->dom) {
    1.62 +            domexist = 1;
    1.63 +            break;
    1.64 +        }
    1.65 +    }
    1.66 +
    1.67 +    if (domexist) {
    1.68          /* share policy with the first cpu since on same boat */
    1.69 -        firstcpu = first_cpu(cpufreq_dom_map[dom]);
    1.70 +        firstcpu = first_cpu(cpufreq_dom->map);
    1.71          policy = cpufreq_cpu_policy[firstcpu];
    1.72  
    1.73          cpufreq_cpu_policy[cpu] = policy;
    1.74 -        cpu_set(cpu, cpufreq_dom_map[dom]);
    1.75 +        cpu_set(cpu, cpufreq_dom->map);
    1.76          cpu_set(cpu, policy->cpus);
    1.77  
    1.78          printk(KERN_EMERG"adding CPU %u\n", cpu);
    1.79      } else {
    1.80 +        cpufreq_dom = xmalloc(struct cpufreq_dom);
    1.81 +        if (!cpufreq_dom) {
    1.82 +            cpufreq_statistic_exit(cpu);
    1.83 +            return -ENOMEM;
    1.84 +        }
    1.85 +        memset(cpufreq_dom, 0, sizeof(struct cpufreq_dom));
    1.86 +        cpufreq_dom->dom = dom;
    1.87 +        cpu_set(cpu, cpufreq_dom->map);
    1.88 +        list_add(&cpufreq_dom->node, &cpufreq_dom_list_head);
    1.89 +
    1.90          /* for the first cpu, setup policy and do init work */
    1.91          policy = xmalloc(struct cpufreq_policy);
    1.92          if (!policy) {
    1.93 +            list_del(&cpufreq_dom->node);
    1.94 +            xfree(cpufreq_dom);
    1.95              cpufreq_statistic_exit(cpu);
    1.96              return -ENOMEM;
    1.97          }
    1.98          memset(policy, 0, sizeof(struct cpufreq_policy));
    1.99 -
   1.100 +        policy->cpu = cpu;
   1.101 +        cpu_set(cpu, policy->cpus);
   1.102          cpufreq_cpu_policy[cpu] = policy;
   1.103 -        cpu_set(cpu, cpufreq_dom_map[dom]);
   1.104 -        cpu_set(cpu, policy->cpus);
   1.105  
   1.106 -        policy->cpu = cpu;
   1.107          ret = cpufreq_driver->init(policy);
   1.108          if (ret)
   1.109              goto err1;
   1.110 @@ -124,7 +152,7 @@ int cpufreq_add_cpu(unsigned int cpu)
   1.111       * After get full cpumap of the coordination domain,
   1.112       * we can safely start gov here.
   1.113       */
   1.114 -    if (cpus_weight(cpufreq_dom_map[dom]) ==
   1.115 +    if (cpus_weight(cpufreq_dom->map) ==
   1.116          perf->domain_info.num_processors) {
   1.117          memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
   1.118          policy->governor = NULL;
   1.119 @@ -138,51 +166,68 @@ int cpufreq_add_cpu(unsigned int cpu)
   1.120  err2:
   1.121      cpufreq_driver->exit(policy);
   1.122  err1:
   1.123 -    for_each_cpu_mask(j, cpufreq_dom_map[dom]) {
   1.124 +    for_each_cpu_mask(j, cpufreq_dom->map) {
   1.125          cpufreq_cpu_policy[j] = NULL;
   1.126          cpufreq_statistic_exit(j);
   1.127      }
   1.128  
   1.129 -    cpus_clear(cpufreq_dom_map[dom]);
   1.130 +    list_del(&cpufreq_dom->node);
   1.131 +    xfree(cpufreq_dom);
   1.132      xfree(policy);
   1.133      return ret;
   1.134  }
   1.135  
   1.136  int cpufreq_del_cpu(unsigned int cpu)
   1.137  {
   1.138 -    unsigned int dom;
   1.139 +    unsigned int dom, domexist = 0;
   1.140 +    struct list_head *pos;
   1.141 +    struct cpufreq_dom *cpufreq_dom;
   1.142      struct cpufreq_policy *policy;
   1.143      struct processor_performance *perf = &processor_pminfo[cpu]->perf;
   1.144  
   1.145      /* to protect the case when Px was not controlled by xen */
   1.146 -    if (!processor_pminfo[cpu] || !(perf->init & XEN_PX_INIT))
   1.147 -        return 0;
   1.148 +    if (!processor_pminfo[cpu]      ||
   1.149 +        !(perf->init & XEN_PX_INIT) ||
   1.150 +        !cpu_online(cpu))
   1.151 +        return -EINVAL;
   1.152  
   1.153 -    if (!cpu_online(cpu) || !cpufreq_cpu_policy[cpu])
   1.154 -        return -EINVAL;
   1.155 +    if (!cpufreq_cpu_policy[cpu])
   1.156 +        return 0;
   1.157  
   1.158      dom = perf->domain_info.domain;
   1.159      policy = cpufreq_cpu_policy[cpu];
   1.160  
   1.161 -    printk(KERN_EMERG"deleting CPU %u\n", cpu);
   1.162 +    list_for_each(pos, &cpufreq_dom_list_head) {
   1.163 +        cpufreq_dom = list_entry(pos, struct cpufreq_dom, node);
   1.164 +        if (dom == cpufreq_dom->dom) {
   1.165 +            domexist = 1;
   1.166 +            break;
   1.167 +        }
   1.168 +    }
   1.169 +
   1.170 +    if (!domexist)
   1.171 +        return -EINVAL;
   1.172  
   1.173      /* for the first cpu of the domain, stop gov */
   1.174 -    if (cpus_weight(cpufreq_dom_map[dom]) ==
   1.175 +    if (cpus_weight(cpufreq_dom->map) ==
   1.176          perf->domain_info.num_processors)
   1.177          __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
   1.178  
   1.179      cpufreq_cpu_policy[cpu] = NULL;
   1.180      cpu_clear(cpu, policy->cpus);
   1.181 -    cpu_clear(cpu, cpufreq_dom_map[dom]);
   1.182 +    cpu_clear(cpu, cpufreq_dom->map);
   1.183      cpufreq_statistic_exit(cpu);
   1.184  
   1.185      /* for the last cpu of the domain, clean room */
   1.186      /* It's safe here to free freq_table, drv_data and policy */
   1.187 -    if (!cpus_weight(cpufreq_dom_map[dom])) {
   1.188 +    if (!cpus_weight(cpufreq_dom->map)) {
   1.189          cpufreq_driver->exit(policy);
   1.190 +        list_del(&cpufreq_dom->node);
   1.191 +        xfree(cpufreq_dom);
   1.192          xfree(policy);
   1.193      }
   1.194  
   1.195 +    printk(KERN_EMERG"deleting CPU %u\n", cpu);
   1.196      return 0;
   1.197  }
   1.198