ia64/xen-unstable

view xen/arch/x86/time.c @ 6141:7c2fdcb2c933

another merge
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 12 14:53:26 2005 +0000 (2005-08-12)
parents 38bee85ddeb8 10b395bc465e
children 4995d5f167c9 f51fe43c5d1c 6783e59e1c45 40b887fa79d0
line source
1 /******************************************************************************
2 * arch/x86/time.c
3 *
4 * Per-CPU time calibration and management.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * Portions from Linux are:
9 * Copyright (c) 1991, 1992, 1995 Linus Torvalds
10 */
12 #include <xen/config.h>
13 #include <xen/errno.h>
14 #include <xen/event.h>
15 #include <xen/sched.h>
16 #include <xen/lib.h>
17 #include <xen/config.h>
18 #include <xen/init.h>
19 #include <xen/time.h>
20 #include <xen/ac_timer.h>
21 #include <xen/smp.h>
22 #include <xen/irq.h>
23 #include <xen/softirq.h>
24 #include <asm/io.h>
25 #include <asm/msr.h>
26 #include <asm/mpspec.h>
27 #include <asm/processor.h>
28 #include <asm/fixmap.h>
29 #include <asm/mc146818rtc.h>
30 #include <asm/div64.h>
31 #include <asm/hpet.h>
32 #include <io_ports.h>
34 /* opt_hpet_force: If true, force HPET configuration via PCI space. */
35 /* NB. This is a gross hack. Mainly useful for HPET testing. */
36 static int opt_hpet_force = 0;
37 boolean_param("hpet_force", opt_hpet_force);
39 #define EPOCH MILLISECS(1000)
41 unsigned long cpu_khz; /* CPU clock frequency in kHz. */
42 unsigned long hpet_address;
43 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
44 int timer_ack = 0;
45 unsigned long volatile jiffies;
47 /* UTC time at system boot. */
48 static s64 wc_sec;
49 static u32 wc_nsec;
50 static spinlock_t wc_lock = SPIN_LOCK_UNLOCKED;
52 struct time_scale {
53 int shift;
54 u32 mul_frac;
55 };
57 struct cpu_time {
58 u64 local_tsc_stamp;
59 s_time_t stime_local_stamp;
60 s_time_t stime_master_stamp;
61 struct time_scale tsc_scale;
62 struct ac_timer calibration_timer;
63 } __cacheline_aligned;
65 static struct cpu_time cpu_time[NR_CPUS];
67 /* Protected by platform_timer_lock. */
68 static s_time_t stime_platform_stamp;
69 static u64 platform_timer_stamp;
70 static struct time_scale platform_timer_scale;
71 static spinlock_t platform_timer_lock = SPIN_LOCK_UNLOCKED;
72 static u64 (*read_platform_count)(void);
74 /*
75 * 32-bit division of integer dividend and integer divisor yielding
76 * 32-bit fractional quotient.
77 */
78 static inline u32 div_frac(u32 dividend, u32 divisor)
79 {
80 u32 quotient, remainder;
81 ASSERT(dividend < divisor);
82 __asm__ (
83 "divl %4"
84 : "=a" (quotient), "=d" (remainder)
85 : "0" (0), "1" (dividend), "r" (divisor) );
86 return quotient;
87 }
89 /*
90 * 32-bit multiplication of multiplicand and fractional multiplier
91 * yielding 32-bit product (radix point at same position as in multiplicand).
92 */
93 static inline u32 mul_frac(u32 multiplicand, u32 multiplier)
94 {
95 u32 product_int, product_frac;
96 __asm__ (
97 "mul %3"
98 : "=a" (product_frac), "=d" (product_int)
99 : "0" (multiplicand), "r" (multiplier) );
100 return product_int;
101 }
103 /*
104 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
105 * yielding a 64-bit result.
106 */
107 static inline u64 scale_delta(u64 delta, struct time_scale *scale)
108 {
109 u64 product;
110 #ifdef CONFIG_X86_32
111 u32 tmp1, tmp2;
112 #endif
114 if ( scale->shift < 0 )
115 delta >>= -scale->shift;
116 else
117 delta <<= scale->shift;
119 #ifdef CONFIG_X86_32
120 __asm__ (
121 "mul %5 ; "
122 "mov %4,%%eax ; "
123 "mov %%edx,%4 ; "
124 "mul %5 ; "
125 "add %4,%%eax ; "
126 "xor %5,%5 ; "
127 "adc %5,%%edx ; "
128 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
129 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
130 #else
131 __asm__ (
132 "mul %%rdx ; shrd $32,%%rdx,%%rax"
133 : "=a" (product) : "0" (delta), "d" ((u64)scale->mul_frac) );
134 #endif
136 return product;
137 }
139 void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
140 {
141 if ( timer_ack )
142 {
143 extern spinlock_t i8259A_lock;
144 spin_lock(&i8259A_lock);
145 outb(0x0c, 0x20);
146 /* Ack the IRQ; AEOI will end it automatically. */
147 inb(0x20);
148 spin_unlock(&i8259A_lock);
149 }
151 /* Update jiffies counter. */
152 (*(unsigned long *)&jiffies)++;
154 /* Rough hack to allow accurate timers to sort-of-work with no APIC. */
155 if ( !cpu_has_apic )
156 raise_softirq(AC_TIMER_SOFTIRQ);
157 }
159 static struct irqaction irq0 = { timer_interrupt, "timer", NULL};
161 /* ------ Calibrate the TSC -------
162 * Return processor ticks per second / CALIBRATE_FRAC.
163 */
165 #define CLOCK_TICK_RATE 1193180 /* system crystal frequency (Hz) */
166 #define CALIBRATE_FRAC 20 /* calibrate over 50ms */
167 #define CALIBRATE_LATCH ((CLOCK_TICK_RATE+(CALIBRATE_FRAC/2))/CALIBRATE_FRAC)
169 static u64 calibrate_boot_tsc(void)
170 {
171 u64 start, end;
172 unsigned long count;
174 /* Set the Gate high, disable speaker */
175 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
177 /*
178 * Now let's take care of CTC channel 2
179 *
180 * Set the Gate high, program CTC channel 2 for mode 0, (interrupt on
181 * terminal count mode), binary count, load 5 * LATCH count, (LSB and MSB)
182 * to begin countdown.
183 */
184 outb(0xb0, PIT_MODE); /* binary, mode 0, LSB/MSB, Ch 2 */
185 outb(CALIBRATE_LATCH & 0xff, PIT_CH2); /* LSB of count */
186 outb(CALIBRATE_LATCH >> 8, PIT_CH2); /* MSB of count */
188 rdtscll(start);
189 for ( count = 0; (inb(0x61) & 0x20) == 0; count++ )
190 continue;
191 rdtscll(end);
193 /* Error if the CTC doesn't behave itself. */
194 if ( count == 0 )
195 return 0;
197 return ((end - start) * (u64)CALIBRATE_FRAC);
198 }
200 static void set_time_scale(struct time_scale *ts, u64 ticks_per_sec)
201 {
202 u64 tps64 = ticks_per_sec;
203 u32 tps32;
204 int shift = 0;
206 while ( tps64 > (MILLISECS(1000)*2) )
207 {
208 tps64 >>= 1;
209 shift--;
210 }
212 tps32 = (u32)tps64;
213 while ( tps32 < (u32)MILLISECS(1000) )
214 {
215 tps32 <<= 1;
216 shift++;
217 }
219 ts->mul_frac = div_frac(MILLISECS(1000), tps32);
220 ts->shift = shift;
221 }
223 static atomic_t tsc_calibrate_gang = ATOMIC_INIT(0);
224 static unsigned int tsc_calibrate_status = 0;
226 void calibrate_tsc_bp(void)
227 {
228 while ( atomic_read(&tsc_calibrate_gang) != (num_booting_cpus() - 1) )
229 mb();
231 outb(CALIBRATE_LATCH & 0xff, PIT_CH2);
232 outb(CALIBRATE_LATCH >> 8, PIT_CH2);
234 tsc_calibrate_status = 1;
235 wmb();
237 while ( (inb(0x61) & 0x20) == 0 )
238 continue;
240 tsc_calibrate_status = 2;
241 wmb();
243 while ( atomic_read(&tsc_calibrate_gang) != 0 )
244 mb();
245 }
247 void calibrate_tsc_ap(void)
248 {
249 u64 t1, t2, ticks_per_sec;
251 atomic_inc(&tsc_calibrate_gang);
253 while ( tsc_calibrate_status < 1 )
254 mb();
256 rdtscll(t1);
258 while ( tsc_calibrate_status < 2 )
259 mb();
261 rdtscll(t2);
263 ticks_per_sec = (t2 - t1) * (u64)CALIBRATE_FRAC;
264 set_time_scale(&cpu_time[smp_processor_id()].tsc_scale, ticks_per_sec);
266 atomic_dec(&tsc_calibrate_gang);
267 }
269 static char *freq_string(u64 freq)
270 {
271 static char s[20];
272 unsigned int x, y;
273 y = (unsigned int)do_div(freq, 1000000) / 1000;
274 x = (unsigned int)freq;
275 sprintf(s, "%u.%03uMHz", x, y);
276 return s;
277 }
279 /************************************************************
280 * PLATFORM TIMER 1: PROGRAMMABLE INTERVAL TIMER (LEGACY PIT)
281 */
283 /* Protected by platform_timer_lock. */
284 static u64 pit_counter64;
285 static u16 pit_stamp;
286 static struct ac_timer pit_overflow_timer;
288 static u16 pit_read_counter(void)
289 {
290 u16 count;
291 ASSERT(spin_is_locked(&platform_timer_lock));
292 outb(0x80, PIT_MODE);
293 count = inb(PIT_CH2);
294 count |= inb(PIT_CH2) << 8;
295 return count;
296 }
298 static void pit_overflow(void *unused)
299 {
300 u16 counter;
302 spin_lock(&platform_timer_lock);
303 counter = pit_read_counter();
304 pit_counter64 += (u16)(pit_stamp - counter);
305 pit_stamp = counter;
306 spin_unlock(&platform_timer_lock);
308 set_ac_timer(&pit_overflow_timer, NOW() + MILLISECS(20));
309 }
311 static u64 read_pit_count(void)
312 {
313 return pit_counter64 + (u16)(pit_stamp - pit_read_counter());
314 }
316 static int init_pit(void)
317 {
318 read_platform_count = read_pit_count;
320 init_ac_timer(&pit_overflow_timer, pit_overflow, NULL, 0);
321 pit_overflow(NULL);
322 platform_timer_stamp = pit_counter64;
323 set_time_scale(&platform_timer_scale, CLOCK_TICK_RATE);
325 printk("Platform timer is %s PIT\n", freq_string(CLOCK_TICK_RATE));
327 return 1;
328 }
330 /************************************************************
331 * PLATFORM TIMER 2: HIGH PRECISION EVENT TIMER (HPET)
332 */
334 /* Protected by platform_timer_lock. */
335 static u64 hpet_counter64, hpet_overflow_period;
336 static u32 hpet_stamp;
337 static struct ac_timer hpet_overflow_timer;
339 static void hpet_overflow(void *unused)
340 {
341 u32 counter;
343 spin_lock(&platform_timer_lock);
344 counter = hpet_read32(HPET_COUNTER);
345 hpet_counter64 += (u32)(counter - hpet_stamp);
346 hpet_stamp = counter;
347 spin_unlock(&platform_timer_lock);
349 set_ac_timer(&hpet_overflow_timer, NOW() + hpet_overflow_period);
350 }
352 static u64 read_hpet_count(void)
353 {
354 return hpet_counter64 + (u32)(hpet_read32(HPET_COUNTER) - hpet_stamp);
355 }
357 static int init_hpet(void)
358 {
359 u64 hpet_rate;
360 u32 hpet_id, hpet_period, cfg;
361 int i;
363 if ( (hpet_address == 0) && opt_hpet_force )
364 {
365 outl(0x800038a0, 0xcf8);
366 outl(0xff000001, 0xcfc);
367 outl(0x800038a0, 0xcf8);
368 hpet_address = inl(0xcfc) & 0xfffffffe;
369 printk("WARNING: Forcibly enabled HPET at %#lx.\n", hpet_address);
370 }
372 if ( hpet_address == 0 )
373 return 0;
375 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
377 hpet_id = hpet_read32(HPET_ID);
378 if ( hpet_id == 0 )
379 {
380 printk("BAD HPET vendor id.\n");
381 return 0;
382 }
384 /* Check for sane period (100ps <= period <= 100ns). */
385 hpet_period = hpet_read32(HPET_PERIOD);
386 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
387 {
388 printk("BAD HPET period %u.\n", hpet_period);
389 return 0;
390 }
392 cfg = hpet_read32(HPET_CFG);
393 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
394 hpet_write32(cfg, HPET_CFG);
396 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
397 {
398 cfg = hpet_read32(HPET_T0_CFG + i*0x20);
399 cfg &= ~HPET_TN_ENABLE;
400 hpet_write32(cfg & ~HPET_TN_ENABLE, HPET_T0_CFG);
401 }
403 cfg = hpet_read32(HPET_CFG);
404 cfg |= HPET_CFG_ENABLE;
405 hpet_write32(cfg, HPET_CFG);
407 read_platform_count = read_hpet_count;
409 hpet_rate = 1000000000000000ULL; /* 10^15 */
410 (void)do_div(hpet_rate, hpet_period);
411 set_time_scale(&platform_timer_scale, hpet_rate);
413 /* Trigger overflow avoidance roughly when counter increments 2^31. */
414 if ( (hpet_rate >> 31) != 0 )
415 {
416 hpet_overflow_period = MILLISECS(1000);
417 (void)do_div(hpet_overflow_period, (u32)(hpet_rate >> 31) + 1);
418 }
419 else
420 {
421 hpet_overflow_period = MILLISECS(1000) << 31;
422 (void)do_div(hpet_overflow_period, (u32)hpet_rate);
423 }
425 init_ac_timer(&hpet_overflow_timer, hpet_overflow, NULL, 0);
426 hpet_overflow(NULL);
427 platform_timer_stamp = hpet_counter64;
429 printk("Platform timer is %s HPET\n", freq_string(hpet_rate));
431 return 1;
432 }
434 /************************************************************
435 * PLATFORM TIMER 3: IBM 'CYCLONE' TIMER
436 */
438 int use_cyclone;
440 /*
441 * Although the counter is read via a 64-bit register, I believe it is actually
442 * a 40-bit counter. Since this will wrap, I read only the low 32 bits and
443 * periodically fold into a 64-bit software counter, just as for PIT and HPET.
444 */
445 #define CYCLONE_CBAR_ADDR 0xFEB00CD0
446 #define CYCLONE_PMCC_OFFSET 0x51A0
447 #define CYCLONE_MPMC_OFFSET 0x51D0
448 #define CYCLONE_MPCS_OFFSET 0x51A8
449 #define CYCLONE_TIMER_FREQ 100000000
451 /* Protected by platform_timer_lock. */
452 static u64 cyclone_counter64;
453 static u32 cyclone_stamp;
454 static struct ac_timer cyclone_overflow_timer;
455 static volatile u32 *cyclone_timer; /* Cyclone MPMC0 register */
457 static void cyclone_overflow(void *unused)
458 {
459 u32 counter;
461 spin_lock(&platform_timer_lock);
462 counter = *cyclone_timer;
463 cyclone_counter64 += (u32)(counter - cyclone_stamp);
464 cyclone_stamp = counter;
465 spin_unlock(&platform_timer_lock);
467 set_ac_timer(&cyclone_overflow_timer, NOW() + MILLISECS(20000));
468 }
470 static u64 read_cyclone_count(void)
471 {
472 return cyclone_counter64 + (u32)(*cyclone_timer - cyclone_stamp);
473 }
475 static volatile u32 *map_cyclone_reg(unsigned long regaddr)
476 {
477 unsigned long pageaddr = regaddr & PAGE_MASK;
478 unsigned long offset = regaddr & ~PAGE_MASK;
479 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
480 return (volatile u32 *)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
481 }
483 static int init_cyclone(void)
484 {
485 u32 base;
487 if ( !use_cyclone )
488 return 0;
490 /* Find base address. */
491 base = *(map_cyclone_reg(CYCLONE_CBAR_ADDR));
492 if ( base == 0 )
493 {
494 printk(KERN_ERR "Cyclone: Could not find valid CBAR value.\n");
495 return 0;
496 }
498 /* Enable timer and map the counter register. */
499 *(map_cyclone_reg(base + CYCLONE_PMCC_OFFSET)) = 1;
500 *(map_cyclone_reg(base + CYCLONE_MPCS_OFFSET)) = 1;
501 cyclone_timer = map_cyclone_reg(base + CYCLONE_MPMC_OFFSET);
503 read_platform_count = read_cyclone_count;
505 init_ac_timer(&cyclone_overflow_timer, cyclone_overflow, NULL, 0);
506 cyclone_overflow(NULL);
507 platform_timer_stamp = cyclone_counter64;
508 set_time_scale(&platform_timer_scale, CYCLONE_TIMER_FREQ);
510 printk("Platform timer is %s IBM Cyclone\n",
511 freq_string(CYCLONE_TIMER_FREQ));
513 return 1;
514 }
516 /************************************************************
517 * GENERIC PLATFORM TIMER INFRASTRUCTURE
518 */
520 static s_time_t __read_platform_stime(u64 platform_time)
521 {
522 u64 diff = platform_time - platform_timer_stamp;
523 ASSERT(spin_is_locked(&platform_timer_lock));
524 return (stime_platform_stamp + scale_delta(diff, &platform_timer_scale));
525 }
527 static s_time_t read_platform_stime(void)
528 {
529 u64 counter;
530 s_time_t stime;
532 spin_lock(&platform_timer_lock);
533 counter = read_platform_count();
534 stime = __read_platform_stime(counter);
535 spin_unlock(&platform_timer_lock);
537 return stime;
538 }
540 static void platform_time_calibration(void)
541 {
542 u64 counter;
543 s_time_t stamp;
545 spin_lock(&platform_timer_lock);
546 counter = read_platform_count();
547 stamp = __read_platform_stime(counter);
548 stime_platform_stamp = stamp;
549 platform_timer_stamp = counter;
550 spin_unlock(&platform_timer_lock);
551 }
553 static void init_platform_timer(void)
554 {
555 if ( !init_cyclone() && !init_hpet() )
556 BUG_ON(!init_pit());
557 }
560 /***************************************************************************
561 * CMOS Timer functions
562 ***************************************************************************/
564 /* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
565 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
566 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
567 *
568 * [For the Julian calendar (which was used in Russia before 1917,
569 * Britain & colonies before 1752, anywhere else before 1582,
570 * and is still in use by some communities) leave out the
571 * -year/100+year/400 terms, and add 10.]
572 *
573 * This algorithm was first published by Gauss (I think).
574 *
575 * WARNING: this function will overflow on 2106-02-07 06:28:16 on
576 * machines were long is 32-bit! (However, as time_t is signed, we
577 * will already get problems at other places on 2038-01-19 03:14:08)
578 */
579 static inline unsigned long
580 mktime (unsigned int year, unsigned int mon,
581 unsigned int day, unsigned int hour,
582 unsigned int min, unsigned int sec)
583 {
584 /* 1..12 -> 11,12,1..10: put Feb last since it has a leap day. */
585 if ( 0 >= (int) (mon -= 2) )
586 {
587 mon += 12;
588 year -= 1;
589 }
591 return ((((unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day)+
592 year*365 - 719499
593 )*24 + hour /* now have hours */
594 )*60 + min /* now have minutes */
595 )*60 + sec; /* finally seconds */
596 }
598 static unsigned long __get_cmos_time(void)
599 {
600 unsigned int year, mon, day, hour, min, sec;
602 sec = CMOS_READ(RTC_SECONDS);
603 min = CMOS_READ(RTC_MINUTES);
604 hour = CMOS_READ(RTC_HOURS);
605 day = CMOS_READ(RTC_DAY_OF_MONTH);
606 mon = CMOS_READ(RTC_MONTH);
607 year = CMOS_READ(RTC_YEAR);
609 if ( !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD )
610 {
611 BCD_TO_BIN(sec);
612 BCD_TO_BIN(min);
613 BCD_TO_BIN(hour);
614 BCD_TO_BIN(day);
615 BCD_TO_BIN(mon);
616 BCD_TO_BIN(year);
617 }
619 if ( (year += 1900) < 1970 )
620 year += 100;
622 return mktime(year, mon, day, hour, min, sec);
623 }
625 static unsigned long get_cmos_time(void)
626 {
627 unsigned long res, flags;
628 int i;
630 spin_lock_irqsave(&rtc_lock, flags);
632 /* read RTC exactly on falling edge of update flag */
633 for ( i = 0 ; i < 1000000 ; i++ ) /* may take up to 1 second... */
634 if ( (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) )
635 break;
636 for ( i = 0 ; i < 1000000 ; i++ ) /* must try at least 2.228 ms */
637 if ( !(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) )
638 break;
640 res = __get_cmos_time();
642 spin_unlock_irqrestore(&rtc_lock, flags);
643 return res;
644 }
646 /***************************************************************************
647 * System Time
648 ***************************************************************************/
650 s_time_t get_s_time(void)
651 {
652 struct cpu_time *t = &cpu_time[smp_processor_id()];
653 u64 tsc, delta;
654 s_time_t now;
656 rdtscll(tsc);
657 delta = tsc - t->local_tsc_stamp;
658 now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale);
660 return now;
661 }
663 static inline void version_update_begin(u32 *version)
664 {
665 /* Explicitly OR with 1 just in case version number gets out of sync. */
666 *version = (*version + 1) | 1;
667 wmb();
668 }
670 static inline void version_update_end(u32 *version)
671 {
672 wmb();
673 (*version)++;
674 }
676 static inline void __update_dom_time(struct vcpu *v)
677 {
678 struct cpu_time *t = &cpu_time[smp_processor_id()];
679 struct vcpu_time_info *u = &v->domain->shared_info->vcpu_time[v->vcpu_id];
681 version_update_begin(&u->version);
683 u->tsc_timestamp = t->local_tsc_stamp;
684 u->system_time = t->stime_local_stamp;
685 u->tsc_to_system_mul = t->tsc_scale.mul_frac;
686 u->tsc_shift = (s8)t->tsc_scale.shift;
688 version_update_end(&u->version);
689 }
691 void update_dom_time(struct vcpu *v)
692 {
693 if ( v->domain->shared_info->vcpu_time[v->vcpu_id].tsc_timestamp !=
694 cpu_time[smp_processor_id()].local_tsc_stamp )
695 __update_dom_time(v);
696 }
698 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
699 void do_settime(s64 secs, u32 nsecs, u64 system_time_base)
700 {
701 s64 x;
702 u32 y;
703 struct domain *d;
704 shared_info_t *s;
706 x = (secs * 1000000000LL) + (u64)nsecs - system_time_base;
707 if ( x < 0 )
708 {
709 /* -ve UTC offset => -ve seconds, +ve nanoseconds. */
710 x = -x;
711 y = do_div(x, 1000000000);
712 x = -x;
713 if ( y != 0 )
714 {
715 y = 1000000000 - y;
716 x--;
717 }
718 }
719 else
720 {
721 y = do_div(x, 1000000000);
722 }
724 wc_sec = x;
725 wc_nsec = y;
727 read_lock(&domlist_lock);
728 spin_lock(&wc_lock);
730 for_each_domain ( d )
731 {
732 s = d->shared_info;
733 version_update_begin(&s->wc_version);
734 s->wc_sec = x;
735 s->wc_nsec = y;
736 version_update_end(&s->wc_version);
737 }
739 spin_unlock(&wc_lock);
740 read_unlock(&domlist_lock);
741 }
743 void init_domain_time(struct domain *d)
744 {
745 spin_lock(&wc_lock);
746 version_update_begin(&d->shared_info->wc_version);
747 d->shared_info->wc_sec = wc_sec;
748 d->shared_info->wc_nsec = wc_nsec;
749 version_update_end(&d->shared_info->wc_version);
750 spin_unlock(&wc_lock);
751 }
753 static void local_time_calibration(void *unused)
754 {
755 unsigned int cpu = smp_processor_id();
757 /*
758 * System timestamps, extrapolated from local and master oscillators,
759 * taken during this calibration and the previous calibration.
760 */
761 s_time_t prev_local_stime, curr_local_stime;
762 s_time_t prev_master_stime, curr_master_stime;
764 /* TSC timestamps taken during this calibration and prev calibration. */
765 u64 prev_tsc, curr_tsc;
767 /*
768 * System time and TSC ticks elapsed during the previous calibration
769 * 'epoch'. These values are down-shifted to fit in 32 bits.
770 */
771 u64 stime_elapsed64, tsc_elapsed64;
772 u32 stime_elapsed32, tsc_elapsed32;
774 /* The accumulated error in the local estimate. */
775 u64 local_stime_err;
777 /* Error correction to slow down a fast local clock. */
778 u32 error_factor = 0;
780 /* Calculated TSC shift to ensure 32-bit scale multiplier. */
781 int tsc_shift = 0;
783 /* The overall calibration scale multiplier. */
784 u32 calibration_mul_frac;
786 prev_tsc = cpu_time[cpu].local_tsc_stamp;
787 prev_local_stime = cpu_time[cpu].stime_local_stamp;
788 prev_master_stime = cpu_time[cpu].stime_master_stamp;
790 /* Disable IRQs to get 'instantaneous' current timestamps. */
791 local_irq_disable();
792 rdtscll(curr_tsc);
793 curr_local_stime = get_s_time();
794 curr_master_stime = read_platform_stime();
795 local_irq_enable();
797 #if 0
798 printk("PRE%d: tsc=%lld stime=%lld master=%lld\n",
799 cpu, prev_tsc, prev_local_stime, prev_master_stime);
800 printk("CUR%d: tsc=%lld stime=%lld master=%lld -> %lld\n",
801 cpu, curr_tsc, curr_local_stime, curr_master_stime,
802 curr_master_stime - curr_local_stime);
803 #endif
805 /* Local time warps forward if it lags behind master time. */
806 if ( curr_local_stime < curr_master_stime )
807 curr_local_stime = curr_master_stime;
809 stime_elapsed64 = curr_master_stime - prev_master_stime;
810 tsc_elapsed64 = curr_tsc - prev_tsc;
812 /*
813 * Calculate error-correction factor. This only slows down a fast local
814 * clock (slow clocks are warped forwards). The scale factor is clamped
815 * to >= 0.5.
816 */
817 if ( curr_local_stime != curr_master_stime )
818 {
819 local_stime_err = curr_local_stime - curr_master_stime;
820 if ( local_stime_err > EPOCH )
821 local_stime_err = EPOCH;
822 error_factor = div_frac(EPOCH, EPOCH + (u32)local_stime_err);
823 }
825 /*
826 * We require 0 < stime_elapsed < 2^31.
827 * This allows us to binary shift a 32-bit tsc_elapsed such that:
828 * stime_elapsed < tsc_elapsed <= 2*stime_elapsed
829 */
830 while ( ((u32)stime_elapsed64 != stime_elapsed64) ||
831 ((s32)stime_elapsed64 < 0) )
832 {
833 stime_elapsed64 >>= 1;
834 tsc_elapsed64 >>= 1;
835 }
837 /* stime_master_diff now fits in a 32-bit word. */
838 stime_elapsed32 = (u32)stime_elapsed64;
840 /* tsc_elapsed <= 2*stime_elapsed */
841 while ( tsc_elapsed64 > (stime_elapsed32 * 2) )
842 {
843 tsc_elapsed64 >>= 1;
844 tsc_shift--;
845 }
847 /* Local difference must now fit in 32 bits. */
848 ASSERT((u32)tsc_elapsed64 == tsc_elapsed64);
849 tsc_elapsed32 = (u32)tsc_elapsed64;
851 /* tsc_elapsed > stime_elapsed */
852 ASSERT(tsc_elapsed32 != 0);
853 while ( tsc_elapsed32 <= stime_elapsed32 )
854 {
855 tsc_elapsed32 <<= 1;
856 tsc_shift++;
857 }
859 calibration_mul_frac = div_frac(stime_elapsed32, tsc_elapsed32);
860 if ( error_factor != 0 )
861 calibration_mul_frac = mul_frac(calibration_mul_frac, error_factor);
863 #if 0
864 printk("---%d: %08x %08x %d\n", cpu,
865 error_factor, calibration_mul_frac, tsc_shift);
866 #endif
868 /* Record new timestamp information. */
869 cpu_time[cpu].tsc_scale.mul_frac = calibration_mul_frac;
870 cpu_time[cpu].tsc_scale.shift = tsc_shift;
871 cpu_time[cpu].local_tsc_stamp = curr_tsc;
872 cpu_time[cpu].stime_local_stamp = curr_local_stime;
873 cpu_time[cpu].stime_master_stamp = curr_master_stime;
875 set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
877 if ( cpu == 0 )
878 platform_time_calibration();
879 }
881 void init_percpu_time(void)
882 {
883 unsigned int cpu = smp_processor_id();
884 unsigned long flags;
885 s_time_t now;
887 local_irq_save(flags);
888 rdtscll(cpu_time[cpu].local_tsc_stamp);
889 now = (cpu == 0) ? 0 : read_platform_stime();
890 local_irq_restore(flags);
892 cpu_time[cpu].stime_master_stamp = now;
893 cpu_time[cpu].stime_local_stamp = now;
895 init_ac_timer(&cpu_time[cpu].calibration_timer,
896 local_time_calibration, NULL, cpu);
897 set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
898 }
900 /* Late init function (after all CPUs are booted). */
901 int __init init_xen_time(void)
902 {
903 wc_sec = get_cmos_time();
905 local_irq_disable();
907 init_percpu_time();
909 stime_platform_stamp = 0;
910 init_platform_timer();
912 local_irq_enable();
914 return 0;
915 }
918 /* Early init function. */
919 void __init early_time_init(void)
920 {
921 u64 tmp = calibrate_boot_tsc();
923 set_time_scale(&cpu_time[0].tsc_scale, tmp);
925 do_div(tmp, 1000);
926 cpu_khz = (unsigned long)tmp;
927 printk("Detected %lu.%03lu MHz processor.\n",
928 cpu_khz / 1000, cpu_khz % 1000);
930 setup_irq(0, &irq0);
931 }
933 /*
934 * Local variables:
935 * mode: C
936 * c-set-style: "BSD"
937 * c-basic-offset: 4
938 * tab-width: 4
939 * indent-tabs-mode: nil
940 * End:
941 */