ia64/xen-unstable

view xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c @ 17947:6cac9c3ee2aa

x86: Remove inline declaration to fix build.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jul 02 10:56:37 2008 +0100 (2008-07-02)
parents 0b4dbd9a9896
children f125e481d8b6
line source
1 /*
2 * xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * Feb 2008 Liu Jinsong <jinsong.liu@intel.com>
8 * Porting cpufreq_ondemand.c from Liunx 2.6.23 to Xen hypervisor
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
15 #include <xen/types.h>
16 #include <xen/percpu.h>
17 #include <xen/cpumask.h>
18 #include <xen/types.h>
19 #include <xen/sched.h>
20 #include <xen/timer.h>
21 #include <asm/config.h>
22 #include <acpi/cpufreq/cpufreq.h>
24 #define DEF_FREQUENCY_UP_THRESHOLD (80)
26 #define MIN_DBS_INTERVAL (MICROSECS(100))
27 #define MIN_SAMPLING_MILLISECS (20)
28 #define MIN_STAT_SAMPLING_RATE \
29 (MIN_SAMPLING_MILLISECS * MILLISECS(1))
30 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
31 #define TRANSITION_LATENCY_LIMIT (10 * 1000 )
33 static uint64_t def_sampling_rate;
35 /* Sampling types */
36 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
38 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
40 static unsigned int dbs_enable; /* number of CPUs using this policy */
42 static struct dbs_tuners {
43 uint64_t sampling_rate;
44 unsigned int up_threshold;
45 unsigned int ignore_nice;
46 unsigned int powersave_bias;
47 } dbs_tuners_ins = {
48 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
49 .ignore_nice = 0,
50 .powersave_bias = 0,
51 };
53 static struct timer dbs_timer[NR_CPUS];
55 uint64_t get_cpu_idle_time(unsigned int cpu)
56 {
57 uint64_t idle_ns;
58 struct vcpu *v;
60 if ((v = idle_vcpu[cpu]) == NULL)
61 return 0;
63 idle_ns = v->runstate.time[RUNSTATE_running];
64 if (v->is_running)
65 idle_ns += NOW() - v->runstate.state_entry_time;
67 return idle_ns;
68 }
70 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
71 {
72 unsigned int load = 0;
73 uint64_t cur_ns, idle_ns, total_ns;
75 struct cpufreq_policy *policy;
76 unsigned int j;
78 if (!this_dbs_info->enable)
79 return;
81 policy = this_dbs_info->cur_policy;
83 if (unlikely(policy->resume)) {
84 __cpufreq_driver_target(policy, policy->max,CPUFREQ_RELATION_H);
85 return;
86 }
88 cur_ns = NOW();
89 total_ns = cur_ns - this_dbs_info->prev_cpu_wall;
90 this_dbs_info->prev_cpu_wall = NOW();
92 if (total_ns < MIN_DBS_INTERVAL)
93 return;
95 /* Get Idle Time */
96 idle_ns = UINT_MAX;
97 for_each_cpu_mask(j, policy->cpus) {
98 uint64_t total_idle_ns;
99 unsigned int tmp_idle_ns;
100 struct cpu_dbs_info_s *j_dbs_info;
102 j_dbs_info = &per_cpu(cpu_dbs_info, j);
103 total_idle_ns = get_cpu_idle_time(j);
104 tmp_idle_ns = total_idle_ns - j_dbs_info->prev_cpu_idle;
105 j_dbs_info->prev_cpu_idle = total_idle_ns;
107 if (tmp_idle_ns < idle_ns)
108 idle_ns = tmp_idle_ns;
109 }
111 if (likely(total_ns > idle_ns))
112 load = (100 * (total_ns - idle_ns)) / total_ns;
114 /* Check for frequency increase */
115 if (load > dbs_tuners_ins.up_threshold) {
116 /* if we are already at full speed then break out early */
117 if (policy->cur == policy->max)
118 return;
119 __cpufreq_driver_target(policy, policy->max,CPUFREQ_RELATION_H);
120 return;
121 }
123 /* Check for frequency decrease */
124 /* if we cannot reduce the frequency anymore, break out early */
125 if (policy->cur == policy->min)
126 return;
128 /*
129 * The optimal frequency is the frequency that is the lowest that
130 * can support the current CPU usage without triggering the up
131 * policy. To be safe, we focus 10 points under the threshold.
132 */
133 if (load < (dbs_tuners_ins.up_threshold - 10)) {
134 unsigned int freq_next, freq_cur;
136 freq_cur = __cpufreq_driver_getavg(policy);
137 if (!freq_cur)
138 freq_cur = policy->cur;
140 freq_next = (freq_cur * load) / (dbs_tuners_ins.up_threshold - 10);
142 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
143 }
144 }
146 static void do_dbs_timer(void *dbs)
147 {
148 struct cpu_dbs_info_s *dbs_info = (struct cpu_dbs_info_s *)dbs;
150 if (!dbs_info->enable)
151 return;
153 dbs_check_cpu(dbs_info);
155 set_timer(&dbs_timer[dbs_info->cpu], NOW()+dbs_tuners_ins.sampling_rate);
156 }
158 static void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
159 {
160 dbs_info->enable = 1;
162 init_timer(&dbs_timer[dbs_info->cpu], do_dbs_timer,
163 (void *)dbs_info, dbs_info->cpu);
165 set_timer(&dbs_timer[dbs_info->cpu], NOW()+dbs_tuners_ins.sampling_rate);
166 }
168 static void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
169 {
170 dbs_info->enable = 0;
171 stop_timer(&dbs_timer[dbs_info->cpu]);
172 }
174 int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
175 {
176 unsigned int cpu = policy->cpu;
177 struct cpu_dbs_info_s *this_dbs_info;
178 unsigned int j;
180 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
182 switch (event) {
183 case CPUFREQ_GOV_START:
184 if ((!cpu_online(cpu)) || (!policy->cur))
185 return -EINVAL;
187 if (policy->cpuinfo.transition_latency >
188 (TRANSITION_LATENCY_LIMIT * 1000)) {
189 printk(KERN_WARNING "ondemand governor failed to load "
190 "due to too long transition latency\n");
191 return -EINVAL;
192 }
193 if (this_dbs_info->enable)
194 /* Already enabled */
195 break;
197 dbs_enable++;
199 for_each_cpu_mask(j, policy->cpus) {
200 struct cpu_dbs_info_s *j_dbs_info;
201 j_dbs_info = &per_cpu(cpu_dbs_info, j);
202 j_dbs_info->cur_policy = policy;
204 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
205 j_dbs_info->prev_cpu_wall = NOW();
206 }
207 this_dbs_info->cpu = cpu;
208 /*
209 * Start the timerschedule work, when this governor
210 * is used for first time
211 */
212 if (dbs_enable == 1) {
213 def_sampling_rate = policy->cpuinfo.transition_latency *
214 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
216 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
217 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
219 dbs_tuners_ins.sampling_rate = def_sampling_rate;
220 }
221 dbs_timer_init(this_dbs_info);
223 break;
225 case CPUFREQ_GOV_STOP:
226 dbs_timer_exit(this_dbs_info);
227 dbs_enable--;
229 break;
231 case CPUFREQ_GOV_LIMITS:
232 if (policy->max < this_dbs_info->cur_policy->cur)
233 __cpufreq_driver_target(this_dbs_info->cur_policy,
234 policy->max, CPUFREQ_RELATION_H);
235 else if (policy->min > this_dbs_info->cur_policy->cur)
236 __cpufreq_driver_target(this_dbs_info->cur_policy,
237 policy->min, CPUFREQ_RELATION_L);
238 break;
239 }
240 return 0;
241 }