ia64/xen-unstable

view linux-2.6.9-xen-sparse/arch/xen/i386/kernel/time.c @ 2782:c48afdf6aa0f

bitkeeper revision 1.1159.1.311 (41837ba99JEAnriHhRYNK8iMktj80A)

Properly initialise the timer_tsc module.
author kaf24@freefall.cl.cam.ac.uk
date Sat Oct 30 11:31:53 2004 +0000 (2004-10-30)
parents f04632034699
children 86d715dd8522
line source
1 /*
2 * linux/arch/i386/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 *
6 * This file contains the PC-specific time handling details:
7 * reading the RTC at bootup, etc..
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1995-03-26 Markus Kuhn
11 * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
12 * precision CMOS clock update
13 * 1996-05-03 Ingo Molnar
14 * fixed time warps in do_[slow|fast]_gettimeoffset()
15 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
16 * "A Kernel Model for Precision Timekeeping" by Dave Mills
17 * 1998-09-05 (Various)
18 * More robust do_fast_gettimeoffset() algorithm implemented
19 * (works with APM, Cyrix 6x86MX and Centaur C6),
20 * monotonic gettimeofday() with fast_get_timeoffset(),
21 * drift-proof precision TSC calibration on boot
22 * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
23 * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
24 * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
25 * 1998-12-16 Andrea Arcangeli
26 * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
27 * because was not accounting lost_ticks.
28 * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
29 * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
30 * serialize accesses to xtime/lost_ticks).
31 */
33 #include <linux/errno.h>
34 #include <linux/sched.h>
35 #include <linux/kernel.h>
36 #include <linux/param.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/interrupt.h>
40 #include <linux/time.h>
41 #include <linux/delay.h>
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/module.h>
45 #include <linux/sysdev.h>
46 #include <linux/bcd.h>
47 #include <linux/efi.h>
48 #include <linux/sysctl.h>
50 #include <asm/io.h>
51 #include <asm/smp.h>
52 #include <asm/irq.h>
53 #include <asm/msr.h>
54 #include <asm/delay.h>
55 #include <asm/mpspec.h>
56 #include <asm/uaccess.h>
57 #include <asm/processor.h>
58 #include <asm/timer.h>
60 #include "mach_time.h"
62 #include <linux/timex.h>
63 #include <linux/config.h>
65 #include <asm/hpet.h>
67 #include <asm/arch_hooks.h>
69 #include "io_ports.h"
71 extern spinlock_t i8259A_lock;
72 int pit_latch_buggy; /* extern */
74 u64 jiffies_64 = INITIAL_JIFFIES;
76 EXPORT_SYMBOL(jiffies_64);
78 unsigned long cpu_khz; /* Detected as we calibrate the TSC */
80 extern unsigned long wall_jiffies;
82 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
84 spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
85 EXPORT_SYMBOL(i8253_lock);
87 struct timer_opts *cur_timer = &timer_tsc;
89 /* These are peridically updated in shared_info, and then copied here. */
90 u32 shadow_tsc_stamp;
91 u64 shadow_system_time;
92 static u32 shadow_time_version;
93 static struct timeval shadow_tv;
94 extern u64 processed_system_time;
96 /*
97 * We use this to ensure that gettimeofday() is monotonically increasing. We
98 * only break this guarantee if the wall clock jumps backwards "a long way".
99 */
100 static struct timeval last_seen_tv = {0,0};
102 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
103 /* Periodically propagate synchronised time base to the RTC and to Xen. */
104 static long last_rtc_update, last_update_to_xen;
105 #endif
107 /* Periodically take synchronised time base from Xen, if we need it. */
108 static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
110 /* Keep track of last time we did processing/updating of jiffies and xtime. */
111 u64 processed_system_time; /* System time (ns) at last processing. */
113 #define NS_PER_TICK (1000000000ULL/HZ)
115 #define HANDLE_USEC_UNDERFLOW(_tv) do { \
116 while ((_tv).tv_usec < 0) { \
117 (_tv).tv_usec += USEC_PER_SEC; \
118 (_tv).tv_sec--; \
119 } \
120 } while (0)
121 #define HANDLE_USEC_OVERFLOW(_tv) do { \
122 while ((_tv).tv_usec >= USEC_PER_SEC) { \
123 (_tv).tv_usec -= USEC_PER_SEC; \
124 (_tv).tv_sec++; \
125 } \
126 } while (0)
127 static inline void __normalize_time(time_t *sec, s64 *nsec)
128 {
129 while (*nsec >= NSEC_PER_SEC) {
130 (*nsec) -= NSEC_PER_SEC;
131 (*sec)++;
132 }
133 while (*nsec < 0) {
134 (*nsec) += NSEC_PER_SEC;
135 (*sec)--;
136 }
137 }
139 /* Does this guest OS track Xen time, or set its wall clock independently? */
140 static int independent_wallclock = 0;
141 static int __init __independent_wallclock(char *str)
142 {
143 independent_wallclock = 1;
144 return 1;
145 }
146 __setup("independent_wallclock", __independent_wallclock);
147 #define INDEPENDENT_WALLCLOCK() \
148 (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
150 /*
151 * Reads a consistent set of time-base values from Xen, into a shadow data
152 * area. Must be called with the xtime_lock held for writing.
153 */
154 static void __get_time_values_from_xen(void)
155 {
156 shared_info_t *s = HYPERVISOR_shared_info;
158 do {
159 shadow_time_version = s->time_version2;
160 rmb();
161 shadow_tv.tv_sec = s->wc_sec;
162 shadow_tv.tv_usec = s->wc_usec;
163 shadow_tsc_stamp = (u32)s->tsc_timestamp;
164 shadow_system_time = s->system_time;
165 rmb();
166 }
167 while (shadow_time_version != s->time_version1);
169 cur_timer->mark_offset();
170 }
172 #define TIME_VALUES_UP_TO_DATE \
173 ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
175 /*
176 * This version of gettimeofday has microsecond resolution
177 * and better than microsecond precision on fast x86 machines with TSC.
178 */
179 void do_gettimeofday(struct timeval *tv)
180 {
181 unsigned long seq;
182 unsigned long usec, sec;
183 unsigned long max_ntp_tick;
184 unsigned long flags;
185 s64 nsec;
187 do {
188 unsigned long lost;
190 seq = read_seqbegin(&xtime_lock);
192 usec = cur_timer->get_offset();
193 lost = jiffies - wall_jiffies;
195 /*
196 * If time_adjust is negative then NTP is slowing the clock
197 * so make sure not to go into next possible interval.
198 * Better to lose some accuracy than have time go backwards..
199 */
200 if (unlikely(time_adjust < 0)) {
201 max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
202 usec = min(usec, max_ntp_tick);
204 if (lost)
205 usec += lost * max_ntp_tick;
206 }
207 else if (unlikely(lost))
208 usec += lost * (USEC_PER_SEC / HZ);
210 sec = xtime.tv_sec;
211 usec += (xtime.tv_nsec / NSEC_PER_USEC);
213 nsec = shadow_system_time - processed_system_time;
214 __normalize_time(&sec, &nsec);
215 usec += (long)nsec / NSEC_PER_USEC;
217 if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
218 /*
219 * We may have blocked for a long time,
220 * rendering our calculations invalid
221 * (e.g. the time delta may have
222 * overflowed). Detect that and recalculate
223 * with fresh values.
224 */
225 write_seqlock_irqsave(&xtime_lock, flags);
226 __get_time_values_from_xen();
227 write_sequnlock_irqrestore(&xtime_lock, flags);
228 continue;
229 }
230 } while (read_seqretry(&xtime_lock, seq));
232 while (usec >= USEC_PER_SEC) {
233 usec -= USEC_PER_SEC;
234 sec++;
235 }
237 /* Ensure that time-of-day is monotonically increasing. */
238 if ((sec < last_seen_tv.tv_sec) ||
239 ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
240 sec = last_seen_tv.tv_sec;
241 usec = last_seen_tv.tv_usec;
242 } else {
243 last_seen_tv.tv_sec = sec;
244 last_seen_tv.tv_usec = usec;
245 }
247 tv->tv_sec = sec;
248 tv->tv_usec = usec;
249 }
251 EXPORT_SYMBOL(do_gettimeofday);
253 int do_settimeofday(struct timespec *tv)
254 {
255 time_t wtm_sec, sec = tv->tv_sec;
256 long wtm_nsec;
257 s64 nsec;
258 struct timespec xentime;
260 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
261 return -EINVAL;
263 if (!INDEPENDENT_WALLCLOCK())
264 return 0; /* Silent failure? */
266 write_seqlock_irq(&xtime_lock);
268 /*
269 * Ensure we don't get blocked for a long time so that our time delta
270 * overflows. If that were to happen then our shadow time values would
271 * be stale, so we can retry with fresh ones.
272 */
273 again:
274 nsec = (s64)tv->tv_nsec -
275 ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
276 if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
277 __get_time_values_from_xen();
278 goto again;
279 }
281 __normalize_time(&sec, &nsec);
282 set_normalized_timespec(&xentime, sec, nsec);
284 /*
285 * This is revolting. We need to set "xtime" correctly. However, the
286 * value in this location is the value at the most recent update of
287 * wall time. Discover what correction gettimeofday() would have
288 * made, and then undo it!
289 */
290 nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
292 nsec -= (shadow_system_time - processed_system_time);
294 __normalize_time(&sec, &nsec);
295 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
296 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
298 set_normalized_timespec(&xtime, sec, nsec);
299 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
301 time_adjust = 0; /* stop active adjtime() */
302 time_status |= STA_UNSYNC;
303 time_maxerror = NTP_PHASE_LIMIT;
304 time_esterror = NTP_PHASE_LIMIT;
306 /* Reset all our running time counts. They make no sense now. */
307 last_seen_tv.tv_sec = 0;
308 last_update_from_xen = 0;
310 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
311 if (xen_start_info.flags & SIF_INITDOMAIN) {
312 dom0_op_t op;
313 last_rtc_update = last_update_to_xen = 0;
314 op.cmd = DOM0_SETTIME;
315 op.u.settime.secs = xentime.tv_sec;
316 op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
317 op.u.settime.system_time = shadow_system_time;
318 write_sequnlock_irq(&xtime_lock);
319 HYPERVISOR_dom0_op(&op);
320 } else
321 #endif
322 write_sequnlock_irq(&xtime_lock);
324 clock_was_set();
325 return 0;
326 }
328 EXPORT_SYMBOL(do_settimeofday);
330 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
331 static int set_rtc_mmss(unsigned long nowtime)
332 {
333 int retval;
335 /* gets recalled with irq locally disabled */
336 spin_lock(&rtc_lock);
337 if (efi_enabled)
338 retval = efi_set_rtc_mmss(nowtime);
339 else
340 retval = mach_set_rtc_mmss(nowtime);
341 spin_unlock(&rtc_lock);
343 return retval;
344 }
345 #endif
347 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
348 * Note: This function is required to return accurate
349 * time even in the absence of multiple timer ticks.
350 */
351 unsigned long long monotonic_clock(void)
352 {
353 return cur_timer->monotonic_clock();
354 }
355 EXPORT_SYMBOL(monotonic_clock);
357 #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
358 unsigned long profile_pc(struct pt_regs *regs)
359 {
360 unsigned long pc = instruction_pointer(regs);
362 if (in_lock_functions(pc))
363 return *(unsigned long *)(regs->ebp + 4);
365 return pc;
366 }
367 EXPORT_SYMBOL(profile_pc);
368 #endif
370 /*
371 * timer_interrupt() needs to keep up the real-time clock,
372 * as well as call the "do_timer()" routine every clocktick
373 */
374 static inline void do_timer_interrupt(int irq, void *dev_id,
375 struct pt_regs *regs)
376 {
377 time_t wtm_sec, sec;
378 s64 delta, nsec;
379 long sec_diff, wtm_nsec;
381 do {
382 __get_time_values_from_xen();
384 delta = (s64)(shadow_system_time +
385 ((s64)cur_timer->get_offset() *
386 (s64)NSEC_PER_USEC) -
387 processed_system_time);
388 }
389 while (!TIME_VALUES_UP_TO_DATE);
391 if (unlikely(delta < 0)) {
392 printk("Timer ISR: Time went backwards: %lld %lld %lld %lld\n",
393 delta, shadow_system_time,
394 ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
395 processed_system_time);
396 return;
397 }
399 /* Process elapsed jiffies since last call. */
400 while (delta >= NS_PER_TICK) {
401 delta -= NS_PER_TICK;
402 processed_system_time += NS_PER_TICK;
403 do_timer(regs);
404 if (regs)
405 profile_tick(CPU_PROFILING, regs);
406 }
408 /*
409 * Take synchronised time from Xen once a minute if we're not
410 * synchronised ourselves, and we haven't chosen to keep an independent
411 * time base.
412 */
413 if (!INDEPENDENT_WALLCLOCK() &&
414 ((time_status & STA_UNSYNC) != 0) &&
415 (xtime.tv_sec > (last_update_from_xen + 60))) {
416 /* Adjust shadow for jiffies that haven't updated xtime yet. */
417 shadow_tv.tv_usec -=
418 (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
419 HANDLE_USEC_UNDERFLOW(shadow_tv);
421 /*
422 * Reset our running time counts if they are invalidated by
423 * a warp backwards of more than 500ms.
424 */
425 sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
426 if (unlikely(abs(sec_diff) > 1) ||
427 unlikely(((sec_diff * USEC_PER_SEC) +
428 (xtime.tv_nsec / NSEC_PER_USEC) -
429 shadow_tv.tv_usec) > 500000)) {
430 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
431 last_rtc_update = last_update_to_xen = 0;
432 #endif
433 last_seen_tv.tv_sec = 0;
434 }
436 /* Update our unsynchronised xtime appropriately. */
437 sec = shadow_tv.tv_sec;
438 nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
440 __normalize_time(&sec, &nsec);
441 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
442 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
444 set_normalized_timespec(&xtime, sec, nsec);
445 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
447 last_update_from_xen = sec;
448 }
450 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
451 if (!(xen_start_info.flags & SIF_INITDOMAIN))
452 return;
454 /* Send synchronised time to Xen approximately every minute. */
455 if (((time_status & STA_UNSYNC) == 0) &&
456 (xtime.tv_sec > (last_update_to_xen + 60))) {
457 dom0_op_t op;
458 struct timeval tv;
460 tv.tv_sec = xtime.tv_sec;
461 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
462 tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
463 HANDLE_USEC_OVERFLOW(tv);
465 op.cmd = DOM0_SETTIME;
466 op.u.settime.secs = tv.tv_sec;
467 op.u.settime.usecs = tv.tv_usec;
468 op.u.settime.system_time = shadow_system_time;
469 HYPERVISOR_dom0_op(&op);
471 last_update_to_xen = xtime.tv_sec;
472 }
474 /*
475 * If we have an externally synchronized Linux clock, then update
476 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
477 * called as close as possible to 500 ms before the new second starts.
478 */
479 if ((time_status & STA_UNSYNC) == 0 &&
480 xtime.tv_sec > last_rtc_update + 660 &&
481 (xtime.tv_nsec / 1000)
482 >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
483 (xtime.tv_nsec / 1000)
484 <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
485 /* horrible...FIXME */
486 if (efi_enabled) {
487 if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
488 last_rtc_update = xtime.tv_sec;
489 else
490 last_rtc_update = xtime.tv_sec - 600;
491 } else if (set_rtc_mmss(xtime.tv_sec) == 0)
492 last_rtc_update = xtime.tv_sec;
493 else
494 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
495 }
496 #endif
497 }
499 /*
500 * This is the same as the above, except we _also_ save the current
501 * Time Stamp Counter value at the time of the timer interrupt, so that
502 * we later on can estimate the time of day more exactly.
503 */
504 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
505 {
506 /*
507 * Here we are in the timer irq handler. We just have irqs locally
508 * disabled but we don't know if the timer_bh is running on the other
509 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
510 * the irq version of write_lock because as just said we have irq
511 * locally disabled. -arca
512 */
513 write_seqlock(&xtime_lock);
514 do_timer_interrupt(irq, NULL, regs);
515 write_sequnlock(&xtime_lock);
516 return IRQ_HANDLED;
517 }
519 /* not static: needed by APM */
520 unsigned long get_cmos_time(void)
521 {
522 unsigned long retval;
524 spin_lock(&rtc_lock);
526 if (efi_enabled)
527 retval = efi_get_time();
528 else
529 retval = mach_get_cmos_time();
531 spin_unlock(&rtc_lock);
533 return retval;
534 }
536 static long clock_cmos_diff;
538 static int __time_suspend(struct sys_device *dev, u32 state)
539 {
540 /*
541 * Estimate time zone so that set_time can update the clock
542 */
543 clock_cmos_diff = -get_cmos_time();
544 clock_cmos_diff += get_seconds();
545 return 0;
546 }
548 static int __time_resume(struct sys_device *dev)
549 {
550 unsigned long flags;
551 unsigned long sec = get_cmos_time() + clock_cmos_diff;
552 write_seqlock_irqsave(&xtime_lock, flags);
553 xtime.tv_sec = sec;
554 xtime.tv_nsec = 0;
555 write_sequnlock_irqrestore(&xtime_lock, flags);
556 return 0;
557 }
559 static struct sysdev_class pit_sysclass = {
560 .resume = __time_resume,
561 .suspend = __time_suspend,
562 set_kset_name("pit"),
563 };
566 /* XXX this driverfs stuff should probably go elsewhere later -john */
567 static struct sys_device device_i8253 = {
568 .id = 0,
569 .cls = &pit_sysclass,
570 };
572 static int time_init_device(void)
573 {
574 int error = sysdev_class_register(&pit_sysclass);
575 if (!error)
576 error = sysdev_register(&device_i8253);
577 return error;
578 }
580 device_initcall(time_init_device);
582 #ifdef CONFIG_HPET_TIMER
583 extern void (*late_time_init)(void);
584 /* Duplicate of time_init() below, with hpet_enable part added */
585 void __init hpet_time_init(void)
586 {
587 xtime.tv_sec = get_cmos_time();
588 wall_to_monotonic.tv_sec = -xtime.tv_sec;
589 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
590 wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
592 if (hpet_enable() >= 0) {
593 printk("Using HPET for base-timer\n");
594 }
596 cur_timer = select_timer();
597 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
599 time_init_hook();
600 }
601 #endif
603 /* Dynamically-mapped IRQ. */
604 static int time_irq;
606 static struct irqaction irq_timer = {
607 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
608 NULL, NULL
609 };
611 void __init time_init(void)
612 {
613 #ifdef CONFIG_HPET_TIMER
614 if (is_hpet_capable()) {
615 /*
616 * HPET initialization needs to do memory-mapped io. So, let
617 * us do a late initialization after mem_init().
618 */
619 late_time_init = hpet_time_init;
620 return;
621 }
622 #endif
623 __get_time_values_from_xen();
624 xtime.tv_sec = shadow_tv.tv_sec;
625 wall_to_monotonic.tv_sec = -xtime.tv_sec;
626 xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
627 wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
628 processed_system_time = shadow_system_time;
630 if (cur_timer->init(NULL) != 0)
631 BUG();
632 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
634 time_irq = bind_virq_to_irq(VIRQ_TIMER);
636 (void)setup_irq(time_irq, &irq_timer);
637 }
639 /* Convert jiffies to system time. Call with xtime_lock held for reading. */
640 static inline u64 __jiffies_to_st(unsigned long j)
641 {
642 return processed_system_time + ((j - jiffies) * NS_PER_TICK);
643 }
645 /*
646 * This function works out when the the next timer function has to be
647 * executed (by looking at the timer list) and sets the Xen one-shot
648 * domain timer to the appropriate value. This is typically called in
649 * cpu_idle() before the domain blocks.
650 *
651 * The function returns a non-0 value on error conditions.
652 *
653 * It must be called with interrupts disabled.
654 */
655 int set_timeout_timer(void)
656 {
657 u64 alarm = 0;
658 int ret = 0;
660 /*
661 * This is safe against long blocking (since calculations are
662 * not based on TSC deltas). It is also safe against warped
663 * system time since suspend-resume is cooperative and we
664 * would first get locked out. It is safe against normal
665 * updates of jiffies since interrupts are off.
666 */
667 alarm = __jiffies_to_st(next_timer_interrupt());
669 /* Failure is pretty bad, but we'd best soldier on. */
670 if ( HYPERVISOR_set_timer_op(alarm) != 0 )
671 ret = -1;
673 return ret;
674 }
676 void time_suspend(void)
677 {
678 }
680 void time_resume(void)
681 {
682 unsigned long flags;
683 write_seqlock_irqsave(&xtime_lock, flags);
684 /* Get timebases for new environment. */
685 __get_time_values_from_xen();
686 /* Reset our own concept of passage of system time. */
687 processed_system_time = shadow_system_time;
688 /* Accept a warp in UTC (wall-clock) time. */
689 last_seen_tv.tv_sec = 0;
690 /* Make sure we resync UTC time with Xen on next timer interrupt. */
691 last_update_from_xen = 0;
692 write_sequnlock_irqrestore(&xtime_lock, flags);
693 }
695 /*
696 * /proc/sys/xen: This really belongs in another file. It can stay here for
697 * now however.
698 */
699 static ctl_table xen_subtable[] = {
700 {1, "independent_wallclock", &independent_wallclock,
701 sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
702 {0}
703 };
704 static ctl_table xen_table[] = {
705 {123, "xen", NULL, 0, 0555, xen_subtable},
706 {0}
707 };
708 static int __init xen_sysctl_init(void)
709 {
710 (void)register_sysctl_table(xen_table, 0);
711 return 0;
712 }
713 __initcall(xen_sysctl_init);