ia64/xen-unstable

view xen/arch/ia64/xen/xentime.c @ 16328:7ca6e0b9f73d

[IA64] Cleanup: remove unused functions, add static.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Tue Nov 06 14:03:36 2007 -0700 (2007-11-06)
parents cd889a7ccae4
children 430a036ab261
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/vcpu.h>
30 #include <linux/jiffies.h> // not included by xen/sched.h
31 #include <xen/softirq.h>
32 #include <xen/event.h>
34 /* FIXME: where these declarations should be there ? */
35 extern void ia64_init_itm(void);
37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
39 #define TIME_KEEPER_ID 0
40 unsigned long domain0_ready = 0;
41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
42 static unsigned long itc_scale __read_mostly, ns_scale __read_mostly;
43 static unsigned long itc_at_irq;
45 static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
46 static void ia64_wallclock_set(void);
48 /* We don't expect an absolute cycle value here, since then no way
49 * to prevent overflow for large norminator. Normally this conversion
50 * is used for relative offset.
51 */
52 u64 cycle_to_ns(u64 cycle)
53 {
54 return (cycle * itc_scale) >> 32;
55 }
57 static u64 ns_to_cycle(u64 ns)
58 {
59 return (ns * ns_scale) >> 32;
60 }
62 static inline u64 get_time_delta(void)
63 {
64 s64 delta_itc;
65 u64 cur_itc;
67 cur_itc = ia64_get_itc();
69 delta_itc = (s64)(cur_itc - itc_at_irq);
71 /* Ensure that the returned system time is monotonically increasing. */
72 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
73 return cycle_to_ns(delta_itc);
74 }
77 s_time_t get_s_time(void)
78 {
79 s_time_t now;
80 unsigned long seq;
82 do {
83 seq = read_seqbegin(&xtime_lock);
84 now = stime_irq + get_time_delta();
85 } while (unlikely(read_seqretry(&xtime_lock, seq)));
87 return now;
88 }
90 void update_vcpu_system_time(struct vcpu *v)
91 {
92 /* N-op here, and let dom0 to manage system time directly */
93 return;
94 }
96 void
97 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
98 {
99 unsigned long new_itm, old_itc;
101 new_itm = local_cpu_data->itm_next;
102 while (1) {
103 if (smp_processor_id() == TIME_KEEPER_ID) {
104 /*
105 * Here we are in the timer irq handler. We have irqs locally
106 * disabled, but we don't know if the timer_bh is running on
107 * another CPU. We need to avoid to SMP race by acquiring the
108 * xtime_lock.
109 */
110 write_seqlock(&xtime_lock);
111 /* Updates system time (nanoseconds since boot). */
112 old_itc = itc_at_irq;
113 itc_at_irq = ia64_get_itc();
114 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
116 write_sequnlock(&xtime_lock);
117 }
119 local_cpu_data->itm_next = new_itm;
121 if (time_after(new_itm, ia64_get_itc()))
122 break;
124 new_itm += local_cpu_data->itm_delta;
125 }
127 if (!is_idle_domain(current->domain) && !VMX_DOMAIN(current)) {
128 if (vcpu_timer_expired(current)) {
129 vcpu_pend_timer(current);
130 } else {
131 // ensure another timer interrupt happens
132 // even if domain doesn't
133 vcpu_set_next_timer(current);
134 raise_softirq(TIMER_SOFTIRQ);
135 return;
136 }
137 }
139 do {
140 /*
141 * If we're too close to the next clock tick for
142 * comfort, we increase the safety margin by
143 * intentionally dropping the next tick(s). We do NOT
144 * update itm.next because that would force us to call
145 * do_timer() which in turn would let our clock run
146 * too fast (with the potentially devastating effect
147 * of losing monotony of time).
148 */
149 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
150 new_itm += local_cpu_data->itm_delta;
151 ia64_set_itm(new_itm);
152 /* double check, in case we got hit by a (slow) PMI: */
153 } while (time_after_eq(ia64_get_itc(), new_itm));
154 raise_softirq(TIMER_SOFTIRQ);
155 }
157 static struct irqaction xen_timer_irqaction = {
158 .handler = (void *) xen_timer_interrupt,
159 .name = "timer"
160 };
162 void __init
163 ia64_time_init (void)
164 {
165 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
166 ia64_init_itm();
167 }
169 /* wallclock set from efi.get_time */
170 static void ia64_wallclock_set()
171 {
172 efi_time_t tv;
173 efi_time_cap_t tc;
174 efi_status_t status = 0;
176 status = (*efi.get_time)(&tv, &tc);
177 if (status != 0) {
178 wc_sec = 0; wc_nsec = 0;
179 printk("EFIRTC Get Time failed\n");
180 return;
181 }
183 wc_sec = mktime(tv.year, tv.month, tv.day, tv.hour, tv.minute, tv.second);
184 wc_nsec = tv.nanosecond;
185 if (tv.timezone != EFI_UNSPECIFIED_TIMEZONE) {
186 wc_sec -= tv.timezone * 60;
187 printk("Time Zone is %d minutes difference from UTC\n", tv.timezone);
188 } else {
189 printk("Time Zone is not specified on EFIRTC\n");
190 }
191 }
193 /* Late init function (after all CPUs are booted). */
194 int __init init_xen_time()
195 {
196 ia64_time_init();
197 ia64_wallclock_set();
198 itc_scale = 1000000000UL << 32 ;
199 itc_scale /= local_cpu_data->itc_freq;
200 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
202 /* System time ticks from zero. */
203 stime_irq = (s_time_t)0;
204 itc_at_irq = ia64_get_itc();
206 printk("Time init:\n");
207 printk(".... System Time: %ldns\n", NOW());
208 printk(".... scale: %16lX\n", itc_scale);
210 return 0;
211 }
213 int reprogram_timer(s_time_t timeout)
214 {
215 struct vcpu *v = current;
216 s_time_t expire;
217 unsigned long seq, cur_itc, itm_next;
219 if (!domain0_ready || timeout == 0) return 1;
221 do {
222 seq = read_seqbegin(&xtime_lock);
223 if ((expire = timeout - NOW()) < 0)
224 return 0;
226 cur_itc = ia64_get_itc();
227 itm_next = cur_itc + ns_to_cycle(expire);
228 } while (unlikely(read_seqretry(&xtime_lock, seq)));
230 local_cpu_data->itm_next = itm_next;
231 vcpu_set_next_timer(v);
232 return 1;
233 }
235 void send_timer_event(struct vcpu *v)
236 {
237 send_guest_vcpu_virq(v, VIRQ_TIMER);
238 }
240 /* This is taken from xen/arch/x86/time.c.
241 * and the value is replaced
242 * from 1000000000ull to NSEC_PER_SEC.
243 */
244 struct tm wallclock_time(void)
245 {
246 uint64_t seconds;
248 if (!wc_sec)
249 return (struct tm) { 0 };
251 seconds = NOW() + (wc_sec * NSEC_PER_SEC) + wc_nsec;
252 do_div(seconds, NSEC_PER_SEC);
253 return gmtime(seconds);
254 }