ia64/xen-unstable

view xen/arch/x86/time.c @ 6062:7d84bc707736

Fix the x86/64 build.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Aug 08 12:07:19 2005 +0000 (2005-08-08)
parents 6fc0b68b0a9c
children 2360c4d7bb2f
line source
1 /******************************************************************************
2 * arch/x86/time.c
3 *
4 * Per-CPU time calibration and management.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * Portions from Linux are:
9 * Copyright (c) 1991, 1992, 1995 Linus Torvalds
10 */
12 #include <xen/config.h>
13 #include <xen/errno.h>
14 #include <xen/event.h>
15 #include <xen/sched.h>
16 #include <xen/lib.h>
17 #include <xen/config.h>
18 #include <xen/init.h>
19 #include <xen/time.h>
20 #include <xen/ac_timer.h>
21 #include <xen/smp.h>
22 #include <xen/irq.h>
23 #include <xen/softirq.h>
24 #include <asm/io.h>
25 #include <asm/msr.h>
26 #include <asm/mpspec.h>
27 #include <asm/processor.h>
28 #include <asm/fixmap.h>
29 #include <asm/mc146818rtc.h>
30 #include <asm/div64.h>
31 #include <asm/hpet.h>
32 #include <io_ports.h>
34 /* opt_hpet_force: If true, force HPET configuration via PCI space. */
35 /* NB. This is a gross hack. Mainly useful for HPET testing. */
36 static int opt_hpet_force = 0;
37 boolean_param("hpet_force", opt_hpet_force);
39 #define EPOCH MILLISECS(1000)
41 unsigned long cpu_khz; /* CPU clock frequency in kHz. */
42 unsigned long hpet_address;
43 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
44 int timer_ack = 0;
45 unsigned long volatile jiffies;
46 static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
48 struct time_scale {
49 int shift;
50 u32 mul_frac;
51 };
53 struct cpu_time {
54 u64 local_tsc_stamp;
55 s_time_t stime_local_stamp;
56 s_time_t stime_master_stamp;
57 struct time_scale tsc_scale;
58 struct ac_timer calibration_timer;
59 } __cacheline_aligned;
61 static struct cpu_time cpu_time[NR_CPUS];
63 /* Protected by platform_timer_lock. */
64 static s_time_t stime_platform_stamp;
65 static u64 platform_timer_stamp;
66 static struct time_scale platform_timer_scale;
67 static spinlock_t platform_timer_lock = SPIN_LOCK_UNLOCKED;
68 static u64 (*read_platform_count)(void);
70 /*
71 * 32-bit division of integer dividend and integer divisor yielding
72 * 32-bit fractional quotient.
73 */
74 static inline u32 div_frac(u32 dividend, u32 divisor)
75 {
76 u32 quotient, remainder;
77 ASSERT(dividend < divisor);
78 __asm__ (
79 "divl %4"
80 : "=a" (quotient), "=d" (remainder)
81 : "0" (0), "1" (dividend), "r" (divisor) );
82 return quotient;
83 }
85 /*
86 * 32-bit multiplication of multiplicand and fractional multiplier
87 * yielding 32-bit product (radix point at same position as in multiplicand).
88 */
89 static inline u32 mul_frac(u32 multiplicand, u32 multiplier)
90 {
91 u32 product_int, product_frac;
92 __asm__ (
93 "mul %3"
94 : "=a" (product_frac), "=d" (product_int)
95 : "0" (multiplicand), "r" (multiplier) );
96 return product_int;
97 }
99 /*
100 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
101 * yielding a 64-bit result.
102 */
103 static inline u64 scale_delta(u64 delta, struct time_scale *scale)
104 {
105 u64 product;
106 u32 tmp;
108 if ( scale->shift < 0 )
109 delta >>= -scale->shift;
110 else
111 delta <<= scale->shift;
113 __asm__ (
114 "push %%edx ; "
115 "mul %3 ; "
116 "pop %%eax ; "
117 "push %%edx ; "
118 "mul %3 ; "
119 "pop %3 ; "
120 "add %3,%%eax ; "
121 "xor %3,%3 ; "
122 "adc %3,%%edx ; "
123 : "=A" (product), "=r" (tmp)
124 : "A" (delta), "1" (scale->mul_frac) );
126 return product;
127 }
129 void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
130 {
131 if ( timer_ack )
132 {
133 extern spinlock_t i8259A_lock;
134 spin_lock(&i8259A_lock);
135 outb(0x0c, 0x20);
136 /* Ack the IRQ; AEOI will end it automatically. */
137 inb(0x20);
138 spin_unlock(&i8259A_lock);
139 }
141 /* Update jiffies counter. */
142 (*(unsigned long *)&jiffies)++;
144 /* Rough hack to allow accurate timers to sort-of-work with no APIC. */
145 if ( !cpu_has_apic )
146 raise_softirq(AC_TIMER_SOFTIRQ);
147 }
149 static struct irqaction irq0 = { timer_interrupt, "timer", NULL};
151 /* ------ Calibrate the TSC -------
152 * Return processor ticks per second / CALIBRATE_FRAC.
153 */
155 #define CLOCK_TICK_RATE 1193180 /* system crystal frequency (Hz) */
156 #define CALIBRATE_FRAC 20 /* calibrate over 50ms */
157 #define CALIBRATE_LATCH ((CLOCK_TICK_RATE+(CALIBRATE_FRAC/2))/CALIBRATE_FRAC)
159 static u64 calibrate_boot_tsc(void)
160 {
161 u64 start, end;
162 unsigned long count;
164 /* Set the Gate high, disable speaker */
165 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
167 /*
168 * Now let's take care of CTC channel 2
169 *
170 * Set the Gate high, program CTC channel 2 for mode 0, (interrupt on
171 * terminal count mode), binary count, load 5 * LATCH count, (LSB and MSB)
172 * to begin countdown.
173 */
174 outb(0xb0, PIT_MODE); /* binary, mode 0, LSB/MSB, Ch 2 */
175 outb(CALIBRATE_LATCH & 0xff, PIT_CH2); /* LSB of count */
176 outb(CALIBRATE_LATCH >> 8, PIT_CH2); /* MSB of count */
178 rdtscll(start);
179 for ( count = 0; (inb(0x61) & 0x20) == 0; count++ )
180 continue;
181 rdtscll(end);
183 /* Error if the CTC doesn't behave itself. */
184 if ( count == 0 )
185 return 0;
187 return ((end - start) * (u64)CALIBRATE_FRAC);
188 }
190 static void set_time_scale(struct time_scale *ts, u64 ticks_per_sec)
191 {
192 u64 tps64 = ticks_per_sec;
193 u32 tps32;
194 int shift = 0;
196 while ( tps64 > (MILLISECS(1000)*2) )
197 {
198 tps64 >>= 1;
199 shift--;
200 }
202 tps32 = (u32)tps64;
203 while ( tps32 < (u32)MILLISECS(1000) )
204 {
205 tps32 <<= 1;
206 shift++;
207 }
209 ts->mul_frac = div_frac(MILLISECS(1000), tps32);
210 ts->shift = shift;
211 }
213 static atomic_t tsc_calibrate_gang = ATOMIC_INIT(0);
214 static unsigned int tsc_calibrate_status = 0;
216 void calibrate_tsc_bp(void)
217 {
218 while ( atomic_read(&tsc_calibrate_gang) != (num_booting_cpus() - 1) )
219 mb();
221 outb(CALIBRATE_LATCH & 0xff, PIT_CH2);
222 outb(CALIBRATE_LATCH >> 8, PIT_CH2);
224 tsc_calibrate_status = 1;
225 wmb();
227 while ( (inb(0x61) & 0x20) == 0 )
228 continue;
230 tsc_calibrate_status = 2;
231 wmb();
233 while ( atomic_read(&tsc_calibrate_gang) != 0 )
234 mb();
235 }
237 void calibrate_tsc_ap(void)
238 {
239 u64 t1, t2, ticks_per_sec;
241 atomic_inc(&tsc_calibrate_gang);
243 while ( tsc_calibrate_status < 1 )
244 mb();
246 rdtscll(t1);
248 while ( tsc_calibrate_status < 2 )
249 mb();
251 rdtscll(t2);
253 ticks_per_sec = (t2 - t1) * (u64)CALIBRATE_FRAC;
254 set_time_scale(&cpu_time[smp_processor_id()].tsc_scale, ticks_per_sec);
256 atomic_dec(&tsc_calibrate_gang);
257 }
259 static char *freq_string(u64 freq)
260 {
261 static char s[20];
262 unsigned int x, y;
263 y = (unsigned int)do_div(freq, 1000000) / 1000;
264 x = (unsigned int)freq;
265 sprintf(s, "%u.%03uMHz", x, y);
266 return s;
267 }
269 /************************************************************
270 * PLATFORM TIMER 1: PROGRAMMABLE INTERVAL TIMER (LEGACY PIT)
271 */
273 /* Protected by platform_timer_lock. */
274 static u64 pit_counter64;
275 static u16 pit_stamp;
276 static struct ac_timer pit_overflow_timer;
278 static u16 pit_read_counter(void)
279 {
280 u16 count;
281 ASSERT(spin_is_locked(&platform_timer_lock));
282 outb(0x80, PIT_MODE);
283 count = inb(PIT_CH2);
284 count |= inb(PIT_CH2) << 8;
285 return count;
286 }
288 static void pit_overflow(void *unused)
289 {
290 u16 counter;
292 spin_lock(&platform_timer_lock);
293 counter = pit_read_counter();
294 pit_counter64 += (u16)(pit_stamp - counter);
295 pit_stamp = counter;
296 spin_unlock(&platform_timer_lock);
298 set_ac_timer(&pit_overflow_timer, NOW() + MILLISECS(20));
299 }
301 static u64 read_pit_count(void)
302 {
303 return pit_counter64 + (u16)(pit_stamp - pit_read_counter());
304 }
306 static int init_pit(void)
307 {
308 read_platform_count = read_pit_count;
310 init_ac_timer(&pit_overflow_timer, pit_overflow, NULL, 0);
311 pit_overflow(NULL);
312 platform_timer_stamp = pit_counter64;
313 set_time_scale(&platform_timer_scale, CLOCK_TICK_RATE);
315 printk("Platform timer is %s PIT\n", freq_string(CLOCK_TICK_RATE));
317 return 1;
318 }
320 /************************************************************
321 * PLATFORM TIMER 2: HIGH PRECISION EVENT TIMER (HPET)
322 */
324 /* Protected by platform_timer_lock. */
325 static u64 hpet_counter64, hpet_overflow_period;
326 static u32 hpet_stamp;
327 static struct ac_timer hpet_overflow_timer;
329 static void hpet_overflow(void *unused)
330 {
331 u32 counter;
333 spin_lock(&platform_timer_lock);
334 counter = hpet_read32(HPET_COUNTER);
335 hpet_counter64 += (u32)(counter - hpet_stamp);
336 hpet_stamp = counter;
337 spin_unlock(&platform_timer_lock);
339 set_ac_timer(&hpet_overflow_timer, NOW() + hpet_overflow_period);
340 }
342 static u64 read_hpet_count(void)
343 {
344 return hpet_counter64 + (u32)(hpet_read32(HPET_COUNTER) - hpet_stamp);
345 }
347 static int init_hpet(void)
348 {
349 u64 hpet_rate;
350 u32 hpet_id, hpet_period, cfg;
351 int i;
353 if ( (hpet_address == 0) && opt_hpet_force )
354 {
355 outl(0x800038a0, 0xcf8);
356 outl(0xff000001, 0xcfc);
357 outl(0x800038a0, 0xcf8);
358 hpet_address = inl(0xcfc) & 0xfffffffe;
359 printk("WARNING: Forcibly enabled HPET at %#lx.\n", hpet_address);
360 }
362 if ( hpet_address == 0 )
363 return 0;
365 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
367 hpet_id = hpet_read32(HPET_ID);
368 if ( hpet_id == 0 )
369 {
370 printk("BAD HPET vendor id.\n");
371 return 0;
372 }
374 /* Check for sane period (100ps <= period <= 100ns). */
375 hpet_period = hpet_read32(HPET_PERIOD);
376 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
377 {
378 printk("BAD HPET period %u.\n", hpet_period);
379 return 0;
380 }
382 cfg = hpet_read32(HPET_CFG);
383 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
384 hpet_write32(cfg, HPET_CFG);
386 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
387 {
388 cfg = hpet_read32(HPET_T0_CFG + i*0x20);
389 cfg &= ~HPET_TN_ENABLE;
390 hpet_write32(cfg & ~HPET_TN_ENABLE, HPET_T0_CFG);
391 }
393 cfg = hpet_read32(HPET_CFG);
394 cfg |= HPET_CFG_ENABLE;
395 hpet_write32(cfg, HPET_CFG);
397 read_platform_count = read_hpet_count;
399 hpet_rate = 1000000000000000ULL; /* 10^15 */
400 (void)do_div(hpet_rate, hpet_period);
401 set_time_scale(&platform_timer_scale, hpet_rate);
403 /* Trigger overflow avoidance roughly when counter increments 2^31. */
404 if ( (hpet_rate >> 31) != 0 )
405 {
406 hpet_overflow_period = MILLISECS(1000);
407 (void)do_div(hpet_overflow_period, (u32)(hpet_rate >> 31) + 1);
408 }
409 else
410 {
411 hpet_overflow_period = MILLISECS(1000) << 31;
412 (void)do_div(hpet_overflow_period, (u32)hpet_rate);
413 }
415 init_ac_timer(&hpet_overflow_timer, hpet_overflow, NULL, 0);
416 hpet_overflow(NULL);
417 platform_timer_stamp = hpet_counter64;
419 printk("Platform timer is %s HPET\n", freq_string(hpet_rate));
421 return 1;
422 }
424 /************************************************************
425 * PLATFORM TIMER 3: IBM 'CYCLONE' TIMER
426 */
428 int use_cyclone;
430 /*
431 * Although the counter is read via a 64-bit register, I believe it is actually
432 * a 40-bit counter. Since this will wrap, I read only the low 32 bits and
433 * periodically fold into a 64-bit software counter, just as for PIT and HPET.
434 */
435 #define CYCLONE_CBAR_ADDR 0xFEB00CD0
436 #define CYCLONE_PMCC_OFFSET 0x51A0
437 #define CYCLONE_MPMC_OFFSET 0x51D0
438 #define CYCLONE_MPCS_OFFSET 0x51A8
439 #define CYCLONE_TIMER_FREQ 100000000
441 /* Protected by platform_timer_lock. */
442 static u64 cyclone_counter64;
443 static u32 cyclone_stamp;
444 static struct ac_timer cyclone_overflow_timer;
445 static volatile u32 *cyclone_timer; /* Cyclone MPMC0 register */
447 static void cyclone_overflow(void *unused)
448 {
449 u32 counter;
451 spin_lock(&platform_timer_lock);
452 counter = *cyclone_timer;
453 cyclone_counter64 += (u32)(counter - cyclone_stamp);
454 cyclone_stamp = counter;
455 spin_unlock(&platform_timer_lock);
457 set_ac_timer(&cyclone_overflow_timer, NOW() + MILLISECS(20000));
458 }
460 static u64 read_cyclone_count(void)
461 {
462 return cyclone_counter64 + (u32)(*cyclone_timer - cyclone_stamp);
463 }
465 static volatile u32 *map_cyclone_reg(unsigned long regaddr)
466 {
467 unsigned long pageaddr = regaddr & PAGE_MASK;
468 unsigned long offset = regaddr & ~PAGE_MASK;
469 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
470 return (volatile u32 *)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
471 }
473 static int init_cyclone(void)
474 {
475 u32 base;
477 if ( !use_cyclone )
478 return 0;
480 /* Find base address. */
481 base = *(map_cyclone_reg(CYCLONE_CBAR_ADDR));
482 if ( base == 0 )
483 {
484 printk(KERN_ERR "Cyclone: Could not find valid CBAR value.\n");
485 return 0;
486 }
488 /* Enable timer and map the counter register. */
489 *(map_cyclone_reg(base + CYCLONE_PMCC_OFFSET)) = 1;
490 *(map_cyclone_reg(base + CYCLONE_MPCS_OFFSET)) = 1;
491 cyclone_timer = map_cyclone_reg(base + CYCLONE_MPMC_OFFSET);
493 read_platform_count = read_cyclone_count;
495 init_ac_timer(&cyclone_overflow_timer, cyclone_overflow, NULL, 0);
496 cyclone_overflow(NULL);
497 platform_timer_stamp = cyclone_counter64;
498 set_time_scale(&platform_timer_scale, CYCLONE_TIMER_FREQ);
500 printk("Platform timer is %s IBM Cyclone\n",
501 freq_string(CYCLONE_TIMER_FREQ));
503 return 1;
504 }
506 /************************************************************
507 * GENERIC PLATFORM TIMER INFRASTRUCTURE
508 */
510 static s_time_t __read_platform_stime(u64 platform_time)
511 {
512 u64 diff = platform_time - platform_timer_stamp;
513 ASSERT(spin_is_locked(&platform_timer_lock));
514 return (stime_platform_stamp + scale_delta(diff, &platform_timer_scale));
515 }
517 static s_time_t read_platform_stime(void)
518 {
519 u64 counter;
520 s_time_t stime;
522 spin_lock(&platform_timer_lock);
523 counter = read_platform_count();
524 stime = __read_platform_stime(counter);
525 spin_unlock(&platform_timer_lock);
527 return stime;
528 }
530 static void platform_time_calibration(void)
531 {
532 u64 counter;
533 s_time_t stamp;
535 spin_lock(&platform_timer_lock);
536 counter = read_platform_count();
537 stamp = __read_platform_stime(counter);
538 stime_platform_stamp = stamp;
539 platform_timer_stamp = counter;
540 spin_unlock(&platform_timer_lock);
541 }
543 static void init_platform_timer(void)
544 {
545 if ( !init_cyclone() && !init_hpet() )
546 BUG_ON(!init_pit());
547 }
550 /***************************************************************************
551 * CMOS Timer functions
552 ***************************************************************************/
554 /* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
555 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
556 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
557 *
558 * [For the Julian calendar (which was used in Russia before 1917,
559 * Britain & colonies before 1752, anywhere else before 1582,
560 * and is still in use by some communities) leave out the
561 * -year/100+year/400 terms, and add 10.]
562 *
563 * This algorithm was first published by Gauss (I think).
564 *
565 * WARNING: this function will overflow on 2106-02-07 06:28:16 on
566 * machines were long is 32-bit! (However, as time_t is signed, we
567 * will already get problems at other places on 2038-01-19 03:14:08)
568 */
569 static inline unsigned long
570 mktime (unsigned int year, unsigned int mon,
571 unsigned int day, unsigned int hour,
572 unsigned int min, unsigned int sec)
573 {
574 /* 1..12 -> 11,12,1..10: put Feb last since it has a leap day. */
575 if ( 0 >= (int) (mon -= 2) )
576 {
577 mon += 12;
578 year -= 1;
579 }
581 return ((((unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day)+
582 year*365 - 719499
583 )*24 + hour /* now have hours */
584 )*60 + min /* now have minutes */
585 )*60 + sec; /* finally seconds */
586 }
588 static unsigned long __get_cmos_time(void)
589 {
590 unsigned int year, mon, day, hour, min, sec;
592 sec = CMOS_READ(RTC_SECONDS);
593 min = CMOS_READ(RTC_MINUTES);
594 hour = CMOS_READ(RTC_HOURS);
595 day = CMOS_READ(RTC_DAY_OF_MONTH);
596 mon = CMOS_READ(RTC_MONTH);
597 year = CMOS_READ(RTC_YEAR);
599 if ( !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD )
600 {
601 BCD_TO_BIN(sec);
602 BCD_TO_BIN(min);
603 BCD_TO_BIN(hour);
604 BCD_TO_BIN(day);
605 BCD_TO_BIN(mon);
606 BCD_TO_BIN(year);
607 }
609 if ( (year += 1900) < 1970 )
610 year += 100;
612 return mktime(year, mon, day, hour, min, sec);
613 }
615 static unsigned long get_cmos_time(void)
616 {
617 unsigned long res, flags;
618 int i;
620 spin_lock_irqsave(&rtc_lock, flags);
622 /* read RTC exactly on falling edge of update flag */
623 for ( i = 0 ; i < 1000000 ; i++ ) /* may take up to 1 second... */
624 if ( (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) )
625 break;
626 for ( i = 0 ; i < 1000000 ; i++ ) /* must try at least 2.228 ms */
627 if ( !(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) )
628 break;
630 res = __get_cmos_time();
632 spin_unlock_irqrestore(&rtc_lock, flags);
633 return res;
634 }
636 /***************************************************************************
637 * System Time
638 ***************************************************************************/
640 s_time_t get_s_time(void)
641 {
642 struct cpu_time *t = &cpu_time[smp_processor_id()];
643 u64 tsc, delta;
644 s_time_t now;
646 rdtscll(tsc);
647 delta = tsc - t->local_tsc_stamp;
648 now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale);
650 return now;
651 }
653 static inline void version_update_begin(u32 *version)
654 {
655 /* Explicitly OR with 1 just in case version number gets out of sync. */
656 *version = (*version + 1) | 1;
657 wmb();
658 }
660 static inline void version_update_end(u32 *version)
661 {
662 wmb();
663 (*version)++;
664 }
666 static inline void __update_dom_time(struct vcpu *v)
667 {
668 struct cpu_time *t = &cpu_time[smp_processor_id()];
669 struct vcpu_time_info *u = &v->domain->shared_info->vcpu_time[v->vcpu_id];
671 version_update_begin(&u->version);
673 u->tsc_timestamp = t->local_tsc_stamp;
674 u->system_time = t->stime_local_stamp;
675 u->tsc_to_system_mul = t->tsc_scale.mul_frac;
676 u->tsc_shift = (s8)t->tsc_scale.shift;
678 version_update_end(&u->version);
679 }
681 void update_dom_time(struct vcpu *v)
682 {
683 if ( v->domain->shared_info->vcpu_time[v->vcpu_id].tsc_timestamp !=
684 cpu_time[smp_processor_id()].local_tsc_stamp )
685 __update_dom_time(v);
686 }
688 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
689 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
690 {
691 u64 x;
692 u32 y, _wc_sec, _wc_nsec;
693 struct domain *d;
694 shared_info_t *s;
696 x = (secs * 1000000000ULL) + (u64)nsecs + system_time_base;
697 y = do_div(x, 1000000000);
699 wc_sec = _wc_sec = (u32)x;
700 wc_nsec = _wc_nsec = (u32)y;
702 read_lock(&domlist_lock);
704 for_each_domain ( d )
705 {
706 s = d->shared_info;
707 version_update_begin(&s->wc_version);
708 s->wc_sec = _wc_sec;
709 s->wc_nsec = _wc_nsec;
710 version_update_end(&s->wc_version);
711 }
713 read_unlock(&domlist_lock);
714 }
716 void init_domain_time(struct domain *d)
717 {
718 version_update_begin(&d->shared_info->wc_version);
719 d->shared_info->wc_sec = wc_sec;
720 d->shared_info->wc_nsec = wc_nsec;
721 version_update_end(&d->shared_info->wc_version);
722 }
724 static void local_time_calibration(void *unused)
725 {
726 unsigned int cpu = smp_processor_id();
728 /*
729 * System timestamps, extrapolated from local and master oscillators,
730 * taken during this calibration and the previous calibration.
731 */
732 s_time_t prev_local_stime, curr_local_stime;
733 s_time_t prev_master_stime, curr_master_stime;
735 /* TSC timestamps taken during this calibration and prev calibration. */
736 u64 prev_tsc, curr_tsc;
738 /*
739 * System time and TSC ticks elapsed during the previous calibration
740 * 'epoch'. These values are down-shifted to fit in 32 bits.
741 */
742 u64 stime_elapsed64, tsc_elapsed64;
743 u32 stime_elapsed32, tsc_elapsed32;
745 /* The accumulated error in the local estimate. */
746 u64 local_stime_err;
748 /* Error correction to slow down a fast local clock. */
749 u32 error_factor = 0;
751 /* Calculated TSC shift to ensure 32-bit scale multiplier. */
752 int tsc_shift = 0;
754 /* The overall calibration scale multiplier. */
755 u32 calibration_mul_frac;
757 prev_tsc = cpu_time[cpu].local_tsc_stamp;
758 prev_local_stime = cpu_time[cpu].stime_local_stamp;
759 prev_master_stime = cpu_time[cpu].stime_master_stamp;
761 /* Disable IRQs to get 'instantaneous' current timestamps. */
762 local_irq_disable();
763 rdtscll(curr_tsc);
764 curr_local_stime = get_s_time();
765 curr_master_stime = read_platform_stime();
766 local_irq_enable();
768 #if 0
769 printk("PRE%d: tsc=%lld stime=%lld master=%lld\n",
770 cpu, prev_tsc, prev_local_stime, prev_master_stime);
771 printk("CUR%d: tsc=%lld stime=%lld master=%lld -> %lld\n",
772 cpu, curr_tsc, curr_local_stime, curr_master_stime,
773 curr_master_stime - curr_local_stime);
774 #endif
776 /* Local time warps forward if it lags behind master time. */
777 if ( curr_local_stime < curr_master_stime )
778 curr_local_stime = curr_master_stime;
780 stime_elapsed64 = curr_master_stime - prev_master_stime;
781 tsc_elapsed64 = curr_tsc - prev_tsc;
783 /*
784 * Calculate error-correction factor. This only slows down a fast local
785 * clock (slow clocks are warped forwards). The scale factor is clamped
786 * to >= 0.5.
787 */
788 if ( curr_local_stime != curr_master_stime )
789 {
790 local_stime_err = curr_local_stime - curr_master_stime;
791 if ( local_stime_err > EPOCH )
792 local_stime_err = EPOCH;
793 error_factor = div_frac(EPOCH, EPOCH + (u32)local_stime_err);
794 }
796 /*
797 * We require 0 < stime_elapsed < 2^31.
798 * This allows us to binary shift a 32-bit tsc_elapsed such that:
799 * stime_elapsed < tsc_elapsed <= 2*stime_elapsed
800 */
801 while ( ((u32)stime_elapsed64 != stime_elapsed64) ||
802 ((s32)stime_elapsed64 < 0) )
803 {
804 stime_elapsed64 >>= 1;
805 tsc_elapsed64 >>= 1;
806 }
808 /* stime_master_diff now fits in a 32-bit word. */
809 stime_elapsed32 = (u32)stime_elapsed64;
811 /* tsc_elapsed <= 2*stime_elapsed */
812 while ( tsc_elapsed64 > (stime_elapsed32 * 2) )
813 {
814 tsc_elapsed64 >>= 1;
815 tsc_shift--;
816 }
818 /* Local difference must now fit in 32 bits. */
819 ASSERT((u32)tsc_elapsed64 == tsc_elapsed64);
820 tsc_elapsed32 = (u32)tsc_elapsed64;
822 /* tsc_elapsed > stime_elapsed */
823 ASSERT(tsc_elapsed32 != 0);
824 while ( tsc_elapsed32 <= stime_elapsed32 )
825 {
826 tsc_elapsed32 <<= 1;
827 tsc_shift++;
828 }
830 calibration_mul_frac = div_frac(stime_elapsed32, tsc_elapsed32);
831 if ( error_factor != 0 )
832 calibration_mul_frac = mul_frac(calibration_mul_frac, error_factor);
834 #if 0
835 printk("---%d: %08x %08x %d\n", cpu,
836 error_factor, calibration_mul_frac, tsc_shift);
837 #endif
839 /* Record new timestamp information. */
840 cpu_time[cpu].tsc_scale.mul_frac = calibration_mul_frac;
841 cpu_time[cpu].tsc_scale.shift = tsc_shift;
842 cpu_time[cpu].local_tsc_stamp = curr_tsc;
843 cpu_time[cpu].stime_local_stamp = curr_local_stime;
844 cpu_time[cpu].stime_master_stamp = curr_master_stime;
846 set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
848 if ( cpu == 0 )
849 platform_time_calibration();
850 }
852 void init_percpu_time(void)
853 {
854 unsigned int cpu = smp_processor_id();
855 unsigned long flags;
856 s_time_t now;
858 local_irq_save(flags);
859 rdtscll(cpu_time[cpu].local_tsc_stamp);
860 now = (cpu == 0) ? 0 : read_platform_stime();
861 local_irq_restore(flags);
863 cpu_time[cpu].stime_master_stamp = now;
864 cpu_time[cpu].stime_local_stamp = now;
866 init_ac_timer(&cpu_time[cpu].calibration_timer,
867 local_time_calibration, NULL, cpu);
868 set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
869 }
871 /* Late init function (after all CPUs are booted). */
872 int __init init_xen_time(void)
873 {
874 wc_sec = get_cmos_time();
876 local_irq_disable();
878 init_percpu_time();
880 stime_platform_stamp = 0;
881 init_platform_timer();
883 local_irq_enable();
885 return 0;
886 }
889 /* Early init function. */
890 void __init early_time_init(void)
891 {
892 u64 tmp = calibrate_boot_tsc();
894 set_time_scale(&cpu_time[0].tsc_scale, tmp);
896 do_div(tmp, 1000);
897 cpu_khz = (unsigned long)tmp;
898 printk("Detected %lu.%03lu MHz processor.\n",
899 cpu_khz / 1000, cpu_khz % 1000);
901 setup_irq(0, &irq0);
902 }
904 /*
905 * Local variables:
906 * mode: C
907 * c-set-style: "BSD"
908 * c-basic-offset: 4
909 * tab-width: 4
910 * indent-tabs-mode: nil
911 * End:
912 */