ia64/xen-unstable

view xen/arch/ia64/xentime.c @ 6141:7c2fdcb2c933

another merge
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 12 14:53:26 2005 +0000 (2005-08-12)
parents 38bee85ddeb8 217fb2d1f364
children 4995d5f167c9 f51fe43c5d1c 6783e59e1c45
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #ifdef XEN
30 #include <asm/vcpu.h>
31 #include <linux/jiffies.h> // not included by xen/sched.h
32 #endif
33 #include <xen/softirq.h>
35 #define TIME_KEEPER_ID 0
36 extern unsigned long wall_jiffies;
38 static s_time_t stime_irq; /* System time at last 'time update' */
40 unsigned long domain0_ready = 0;
42 #ifndef CONFIG_VTI
43 static inline u64 get_time_delta(void)
44 {
45 return ia64_get_itc();
46 }
47 #else // CONFIG_VTI
48 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
49 unsigned long itc_scale;
50 unsigned long itc_at_irq;
51 static unsigned long wc_sec, wc_nsec; /* UTC time at last 'time update'. */
52 //static rwlock_t time_lock = RW_LOCK_UNLOCKED;
53 static irqreturn_t vmx_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs);
55 static inline u64 get_time_delta(void)
56 {
57 s64 delta_itc;
58 u64 delta, cur_itc;
60 cur_itc = ia64_get_itc();
62 delta_itc = (s64)(cur_itc - itc_at_irq);
63 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
64 delta = ((u64)delta_itc) * itc_scale;
65 delta = delta >> 32;
67 return delta;
68 }
70 u64 tick_to_ns(u64 tick)
71 {
72 return (tick * itc_scale) >> 32;
73 }
74 #endif // CONFIG_VTI
76 s_time_t get_s_time(void)
77 {
78 s_time_t now;
79 unsigned long flags;
81 read_lock_irqsave(&xtime_lock, flags);
83 now = stime_irq + get_time_delta();
85 /* Ensure that the returned system time is monotonically increasing. */
86 {
87 static s_time_t prev_now = 0;
88 if ( unlikely(now < prev_now) )
89 now = prev_now;
90 prev_now = now;
91 }
93 read_unlock_irqrestore(&xtime_lock, flags);
95 return now;
96 }
98 void update_dom_time(struct vcpu *v)
99 {
100 // FIXME: implement this?
101 // printf("update_dom_time: called, not implemented, skipping\n");
102 return;
103 }
105 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
106 void do_settime(s64 secs, u32 nsecs, u64 system_time_base)
107 {
108 #ifdef CONFIG_VTI
109 u64 _nsecs;
111 write_lock_irq(&xtime_lock);
113 _nsecs = (u64)nsecs + (s64)(stime_irq - system_time_base);
114 while ( _nsecs >= 1000000000 )
115 {
116 _nsecs -= 1000000000;
117 secs++;
118 }
120 wc_sec = secs;
121 wc_nsec = (unsigned long)_nsecs;
123 write_unlock_irq(&xtime_lock);
125 update_dom_time(current->domain);
126 #else
127 // FIXME: Should this be do_settimeofday (from linux)???
128 printf("do_settime: called, not implemented, stopping\n");
129 dummy();
130 #endif
131 }
133 irqreturn_t
134 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
135 {
136 unsigned long new_itm;
138 #define HEARTBEAT_FREQ 16 // period in seconds
139 #ifdef HEARTBEAT_FREQ
140 static long count = 0;
141 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
142 printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
143 regs->cr_iip,
144 VCPU(current,interrupt_delivery_enabled),
145 VCPU(current,pending_interruption));
146 count = 0;
147 }
148 #endif
149 #ifndef XEN
150 if (unlikely(cpu_is_offline(smp_processor_id()))) {
151 return IRQ_HANDLED;
152 }
153 #endif
154 #ifdef XEN
155 if (current->domain == dom0) {
156 // FIXME: there's gotta be a better way of doing this...
157 // We have to ensure that domain0 is launched before we
158 // call vcpu_timer_expired on it
159 //domain0_ready = 1; // moved to xensetup.c
160 VCPU(current,pending_interruption) = 1;
161 }
162 if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
163 vcpu_pend_timer(dom0->vcpu[0]);
164 //vcpu_set_next_timer(dom0->vcpu[0]);
165 domain_wake(dom0->vcpu[0]);
166 }
167 if (!is_idle_task(current->domain) && current->domain != dom0) {
168 if (vcpu_timer_expired(current)) {
169 vcpu_pend_timer(current);
170 // ensure another timer interrupt happens even if domain doesn't
171 vcpu_set_next_timer(current);
172 domain_wake(current);
173 }
174 }
175 raise_actimer_softirq();
176 #endif
178 #ifndef XEN
179 platform_timer_interrupt(irq, dev_id, regs);
180 #endif
182 new_itm = local_cpu_data->itm_next;
184 if (!time_after(ia64_get_itc(), new_itm))
185 #ifdef XEN
186 return;
187 #else
188 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
189 ia64_get_itc(), new_itm);
190 #endif
192 #ifdef XEN
193 // printf("GOT TO HERE!!!!!!!!!!!\n");
194 //while(1);
195 #else
196 profile_tick(CPU_PROFILING, regs);
197 #endif
199 while (1) {
200 #ifndef XEN
201 update_process_times(user_mode(regs));
202 #endif
204 new_itm += local_cpu_data->itm_delta;
206 if (smp_processor_id() == TIME_KEEPER_ID) {
207 /*
208 * Here we are in the timer irq handler. We have irqs locally
209 * disabled, but we don't know if the timer_bh is running on
210 * another CPU. We need to avoid to SMP race by acquiring the
211 * xtime_lock.
212 */
213 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
214 write_seqlock(&xtime_lock);
215 #endif
216 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
217 do_timer(regs);
218 #endif
219 local_cpu_data->itm_next = new_itm;
220 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
221 write_sequnlock(&xtime_lock);
222 #endif
223 } else
224 local_cpu_data->itm_next = new_itm;
226 if (time_after(new_itm, ia64_get_itc()))
227 break;
228 }
230 do {
231 /*
232 * If we're too close to the next clock tick for
233 * comfort, we increase the safety margin by
234 * intentionally dropping the next tick(s). We do NOT
235 * update itm.next because that would force us to call
236 * do_timer() which in turn would let our clock run
237 * too fast (with the potentially devastating effect
238 * of losing monotony of time).
239 */
240 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
241 new_itm += local_cpu_data->itm_delta;
242 //#ifdef XEN
243 // vcpu_set_next_timer(current);
244 //#else
245 //printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
246 ia64_set_itm(new_itm);
247 //#endif
248 /* double check, in case we got hit by a (slow) PMI: */
249 } while (time_after_eq(ia64_get_itc(), new_itm));
250 return IRQ_HANDLED;
251 }
253 static struct irqaction xen_timer_irqaction = {
254 #ifdef CONFIG_VTI
255 .handler = vmx_timer_interrupt,
256 #else // CONFIG_VTI
257 .handler = xen_timer_interrupt,
258 #endif // CONFIG_VTI
259 #ifndef XEN
260 .flags = SA_INTERRUPT,
261 #endif
262 .name = "timer"
263 };
265 void __init
266 xen_time_init (void)
267 {
268 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
269 ia64_init_itm();
270 }
273 #ifdef CONFIG_VTI
275 /* Late init function (after all CPUs are booted). */
276 int __init init_xen_time()
277 {
278 struct timespec tm;
280 itc_scale = 1000000000UL << 32 ;
281 itc_scale /= local_cpu_data->itc_freq;
283 /* System time ticks from zero. */
284 stime_irq = (s_time_t)0;
285 itc_at_irq = ia64_get_itc();
287 /* Wallclock time starts as the initial RTC time. */
288 efi_gettimeofday(&tm);
289 wc_sec = tm.tv_sec;
290 wc_nsec = tm.tv_nsec;
293 printk("Time init:\n");
294 printk(".... System Time: %ldns\n", NOW());
295 printk(".... scale: %16lX\n", itc_scale);
296 printk(".... Wall Clock: %lds %ldus\n", wc_sec, wc_nsec/1000);
298 return 0;
299 }
301 static irqreturn_t
302 vmx_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
303 {
304 unsigned long new_itm;
305 struct vcpu *v = current;
308 new_itm = local_cpu_data->itm_next;
310 if (!time_after(ia64_get_itc(), new_itm))
311 return;
313 while (1) {
314 #ifdef CONFIG_SMP
315 /*
316 * For UP, this is done in do_timer(). Weird, but
317 * fixing that would require updates to all
318 * platforms.
319 */
320 update_process_times(user_mode(v, regs));
321 #endif
322 new_itm += local_cpu_data->itm_delta;
324 if (smp_processor_id() == TIME_KEEPER_ID) {
325 /*
326 * Here we are in the timer irq handler. We have irqs locally
327 * disabled, but we don't know if the timer_bh is running on
328 * another CPU. We need to avoid to SMP race by acquiring the
329 * xtime_lock.
330 */
331 local_cpu_data->itm_next = new_itm;
333 write_lock_irq(&xtime_lock);
334 /* Update jiffies counter. */
335 (*(unsigned long *)&jiffies_64)++;
337 /* Update wall time. */
338 wc_nsec += 1000000000/HZ;
339 if ( wc_nsec >= 1000000000 )
340 {
341 wc_nsec -= 1000000000;
342 wc_sec++;
343 }
345 /* Updates system time (nanoseconds since boot). */
346 stime_irq += MILLISECS(1000/HZ);
347 itc_at_irq = ia64_get_itc();
349 write_unlock_irq(&xtime_lock);
351 } else
352 local_cpu_data->itm_next = new_itm;
354 if (time_after(new_itm, ia64_get_itc()))
355 break;
356 }
358 do {
359 /*
360 * If we're too close to the next clock tick for
361 * comfort, we increase the safety margin by
362 * intentionally dropping the next tick(s). We do NOT
363 * update itm.next because that would force us to call
364 * do_timer() which in turn would let our clock run
365 * too fast (with the potentially devastating effect
366 * of losing monotony of time).
367 */
368 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
369 new_itm += local_cpu_data->itm_delta;
370 ia64_set_itm(new_itm);
371 /* double check, in case we got hit by a (slow) PMI: */
372 } while (time_after_eq(ia64_get_itc(), new_itm));
373 raise_softirq(AC_TIMER_SOFTIRQ);
375 return IRQ_HANDLED;
376 }
377 #endif // CONFIG_VTI