ia64/xen-unstable

view linux-2.6.10-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c @ 3469:bf476d24f798

bitkeeper revision 1.1159.223.7 (41f02e18RcwEdT0BXMaReeOUBf3KVw)

timer_tsc.c, time.c:
Fix time restore bug.
author kaf24@scramble.cl.cam.ac.uk
date Thu Jan 20 22:18:00 2005 +0000 (2005-01-20)
parents 56a5e9ed0e89
children c90e94e09339
line source
1 /*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 */
6 #include <linux/spinlock.h>
7 #include <linux/init.h>
8 #include <linux/timex.h>
9 #include <linux/errno.h>
10 #include <linux/cpufreq.h>
11 #include <linux/string.h>
12 #include <linux/jiffies.h>
14 #include <asm/timer.h>
15 #include <asm/io.h>
16 /* processor.h for distable_tsc flag */
17 #include <asm/processor.h>
19 #include "io_ports.h"
20 #include "mach_timer.h"
22 #include <asm/hpet.h>
24 #ifdef CONFIG_HPET_TIMER
25 static unsigned long hpet_usec_quotient;
26 static unsigned long hpet_last;
27 static struct timer_opts timer_tsc;
28 #endif
30 static inline void cpufreq_delayed_get(void);
32 int tsc_disable __initdata = 0;
34 extern spinlock_t i8253_lock;
36 static int use_tsc;
38 static unsigned long long monotonic_base;
39 static u32 monotonic_offset;
40 static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
42 /* convert from cycles(64bits) => nanoseconds (64bits)
43 * basic equation:
44 * ns = cycles / (freq / ns_per_sec)
45 * ns = cycles * (ns_per_sec / freq)
46 * ns = cycles * (10^9 / (cpu_mhz * 10^6))
47 * ns = cycles * (10^3 / cpu_mhz)
48 *
49 * Then we use scaling math (suggested by george@mvista.com) to get:
50 * ns = cycles * (10^3 * SC / cpu_mhz) / SC
51 * ns = cycles * cyc2ns_scale / SC
52 *
53 * And since SC is a constant power of two, we can convert the div
54 * into a shift.
55 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
56 */
57 static unsigned long cyc2ns_scale;
58 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
60 static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
61 {
62 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
63 }
65 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
66 {
67 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
68 }
70 /* Cached *multiplier* to convert TSC counts to microseconds.
71 * (see the equation below).
72 * Equal to 2^32 * (1 / (clocks per usec) ).
73 * Initialized in time_init.
74 */
75 static unsigned long fast_gettimeoffset_quotient;
77 extern u32 shadow_tsc_stamp;
78 extern u64 shadow_system_time;
80 static unsigned long get_offset_tsc(void)
81 {
82 register unsigned long eax, edx;
84 /* Read the Time Stamp Counter */
86 rdtsc(eax,edx);
88 /* .. relative to previous jiffy (32 bits is enough) */
89 eax -= shadow_tsc_stamp;
91 /*
92 * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
93 * = (tsc_low delta) * (usecs_per_clock)
94 * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
95 *
96 * Using a mull instead of a divl saves up to 31 clock cycles
97 * in the critical path.
98 */
100 __asm__("mull %2"
101 :"=a" (eax), "=d" (edx)
102 :"rm" (fast_gettimeoffset_quotient),
103 "0" (eax));
105 /* our adjusted time offset in microseconds */
106 return edx;
107 }
109 static unsigned long long monotonic_clock_tsc(void)
110 {
111 unsigned long long last_offset, this_offset, base;
112 unsigned seq;
114 /* atomically read monotonic base & last_offset */
115 do {
116 seq = read_seqbegin(&monotonic_lock);
117 last_offset = monotonic_offset;
118 base = monotonic_base;
119 } while (read_seqretry(&monotonic_lock, seq));
121 /* Read the Time Stamp Counter */
122 rdtscll(this_offset);
124 /* return the value in ns */
125 return base + cycles_2_ns(this_offset - last_offset);
126 }
128 /*
129 * Scheduler clock - returns current time in nanosec units.
130 */
131 unsigned long long sched_clock(void)
132 {
133 unsigned long long this_offset;
135 /*
136 * In the NUMA case we dont use the TSC as they are not
137 * synchronized across all CPUs.
138 */
139 #ifndef CONFIG_NUMA
140 if (!use_tsc)
141 #endif
142 /* no locking but a rare wrong value is not a big deal */
143 return jiffies_64 * (1000000000 / HZ);
145 /* Read the Time Stamp Counter */
146 rdtscll(this_offset);
148 /* return the value in ns */
149 return cycles_2_ns(this_offset);
150 }
153 static void mark_offset_tsc(void)
154 {
156 /* update the monotonic base value */
157 write_seqlock(&monotonic_lock);
158 monotonic_base = shadow_system_time;
159 monotonic_offset = shadow_tsc_stamp;
160 write_sequnlock(&monotonic_lock);
161 }
163 static void delay_tsc(unsigned long loops)
164 {
165 unsigned long bclock, now;
167 rdtscl(bclock);
168 do
169 {
170 rep_nop();
171 rdtscl(now);
172 } while ((now-bclock) < loops);
173 }
175 #ifdef CONFIG_HPET_TIMER
176 static void mark_offset_tsc_hpet(void)
177 {
178 unsigned long long this_offset, last_offset;
179 unsigned long offset, temp, hpet_current;
181 write_seqlock(&monotonic_lock);
182 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
183 /*
184 * It is important that these two operations happen almost at
185 * the same time. We do the RDTSC stuff first, since it's
186 * faster. To avoid any inconsistencies, we need interrupts
187 * disabled locally.
188 */
189 /*
190 * Interrupts are just disabled locally since the timer irq
191 * has the SA_INTERRUPT flag set. -arca
192 */
193 /* read Pentium cycle counter */
195 hpet_current = hpet_readl(HPET_COUNTER);
196 rdtsc(last_tsc_low, last_tsc_high);
198 /* lost tick compensation */
199 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
200 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) {
201 int lost_ticks = (offset - hpet_last) / hpet_tick;
202 jiffies_64 += lost_ticks;
203 }
204 hpet_last = hpet_current;
206 /* update the monotonic base value */
207 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
208 monotonic_base += cycles_2_ns(this_offset - last_offset);
209 write_sequnlock(&monotonic_lock);
211 /* calculate delay_at_last_interrupt */
212 /*
213 * Time offset = (hpet delta) * ( usecs per HPET clock )
214 * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
215 * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
216 * Where,
217 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
218 */
219 delay_at_last_interrupt = hpet_current - offset;
220 ASM_MUL64_REG(temp, delay_at_last_interrupt,
221 hpet_usec_quotient, delay_at_last_interrupt);
222 }
223 #endif
226 #ifdef CONFIG_CPU_FREQ
227 #include <linux/workqueue.h>
229 static unsigned int cpufreq_delayed_issched = 0;
230 static unsigned int cpufreq_init = 0;
231 static struct work_struct cpufreq_delayed_get_work;
233 static void handle_cpufreq_delayed_get(void *v)
234 {
235 unsigned int cpu;
236 for_each_online_cpu(cpu) {
237 cpufreq_get(cpu);
238 }
239 cpufreq_delayed_issched = 0;
240 }
242 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
243 * to verify the CPU frequency the timing core thinks the CPU is running
244 * at is still correct.
245 */
246 static inline void cpufreq_delayed_get(void)
247 {
248 if (cpufreq_init && !cpufreq_delayed_issched) {
249 cpufreq_delayed_issched = 1;
250 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
251 schedule_work(&cpufreq_delayed_get_work);
252 }
253 }
255 /* If the CPU frequency is scaled, TSC-based delays will need a different
256 * loops_per_jiffy value to function properly.
257 */
259 static unsigned int ref_freq = 0;
260 static unsigned long loops_per_jiffy_ref = 0;
262 #ifndef CONFIG_SMP
263 static unsigned long fast_gettimeoffset_ref = 0;
264 static unsigned long cpu_khz_ref = 0;
265 #endif
267 static int
268 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
269 void *data)
270 {
271 struct cpufreq_freqs *freq = data;
273 if (val != CPUFREQ_RESUMECHANGE)
274 write_seqlock_irq(&xtime_lock);
275 if (!ref_freq) {
276 ref_freq = freq->old;
277 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
278 #ifndef CONFIG_SMP
279 fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
280 cpu_khz_ref = cpu_khz;
281 #endif
282 }
284 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
285 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
286 (val == CPUFREQ_RESUMECHANGE)) {
287 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
288 cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
289 #ifndef CONFIG_SMP
290 if (cpu_khz)
291 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
292 if (use_tsc) {
293 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
294 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
295 set_cyc2ns_scale(cpu_khz/1000);
296 }
297 }
298 #endif
299 }
301 if (val != CPUFREQ_RESUMECHANGE)
302 write_sequnlock_irq(&xtime_lock);
304 return 0;
305 }
307 static struct notifier_block time_cpufreq_notifier_block = {
308 .notifier_call = time_cpufreq_notifier
309 };
312 static int __init cpufreq_tsc(void)
313 {
314 int ret;
315 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
316 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
317 CPUFREQ_TRANSITION_NOTIFIER);
318 if (!ret)
319 cpufreq_init = 1;
320 return ret;
321 }
322 core_initcall(cpufreq_tsc);
324 #else /* CONFIG_CPU_FREQ */
325 static inline void cpufreq_delayed_get(void) { return; }
326 #endif
329 static int init_tsc(char* override)
330 {
331 u64 __cpu_khz;
333 __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
334 do_div(__cpu_khz, 1000);
335 cpu_khz = (u32)__cpu_khz;
336 printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n",
337 cpu_khz / 1000, cpu_khz % 1000);
339 /* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz =
340 (2^32 * 1 / (clocks/us)) */
341 {
342 unsigned long eax=0, edx=1000;
343 __asm__("divl %2"
344 :"=a" (fast_gettimeoffset_quotient), "=d" (edx)
345 :"r" (cpu_khz),
346 "0" (eax), "1" (edx));
347 }
349 set_cyc2ns_scale(cpu_khz/1000);
351 use_tsc = 1;
353 return 0;
354 }
356 static int __init tsc_setup(char *str)
357 {
358 printk(KERN_WARNING "notsc: cannot disable TSC in Xen/Linux.\n");
359 return 1;
360 }
361 __setup("notsc", tsc_setup);
365 /************************************************************/
367 /* tsc timer_opts struct */
368 struct timer_opts timer_tsc = {
369 .name = "tsc",
370 .mark_offset = mark_offset_tsc,
371 .get_offset = get_offset_tsc,
372 .monotonic_clock = monotonic_clock_tsc,
373 .delay = delay_tsc,
374 };
376 struct init_timer_opts timer_tsc_init = {
377 .init = init_tsc,
378 .opts = &timer_tsc,
379 };