ia64/xen-unstable

view xen/arch/ia64/xen/xentime.c @ 9597:8f7aad20b4a5

Backtrack on the new interface for reserved event-channel
ports, as binding them in user space via the evtchn driver
would be a pain. Instead extend VIRQs so they can be
classified as 'global' or 'per vcpu'. The former can only
be allocated once per guest, but can be re-bound to
an arbitrary VCPU.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 05 19:30:02 2006 +0100 (2006-04-05)
parents 08aede767c63
children 5cc367720223
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/vcpu.h>
30 #include <linux/jiffies.h> // not included by xen/sched.h
31 #include <xen/softirq.h>
32 #include <xen/event.h>
34 /* FIXME: where these declarations should be there ? */
35 extern void ia64_init_itm(void);
37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
39 #define TIME_KEEPER_ID 0
40 unsigned long domain0_ready = 0;
41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
42 unsigned long itc_scale, ns_scale;
43 unsigned long itc_at_irq;
45 /* We don't expect an absolute cycle value here, since then no way
46 * to prevent overflow for large norminator. Normally this conversion
47 * is used for relative offset.
48 */
49 u64 cycle_to_ns(u64 cycle)
50 {
51 return (cycle * itc_scale) >> 32;
52 }
54 u64 ns_to_cycle(u64 ns)
55 {
56 return (ns * ns_scale) >> 32;
57 }
59 static inline u64 get_time_delta(void)
60 {
61 s64 delta_itc;
62 u64 cur_itc;
64 cur_itc = ia64_get_itc();
66 delta_itc = (s64)(cur_itc - itc_at_irq);
68 /* Ensure that the returned system time is monotonically increasing. */
69 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
70 return cycle_to_ns(delta_itc);
71 }
74 s_time_t get_s_time(void)
75 {
76 s_time_t now;
77 unsigned long seq;
79 do {
80 seq = read_seqbegin(&xtime_lock);
81 now = stime_irq + get_time_delta();
82 } while (unlikely(read_seqretry(&xtime_lock, seq)));
84 return now;
85 }
87 void update_vcpu_system_time(struct vcpu *v)
88 {
89 /* N-op here, and let dom0 to manage system time directly */
90 return;
91 }
93 void update_domain_wallclock_time(struct domain *d)
94 {
95 /* N-op here, and let dom0 to manage system time directly */
96 return;
97 }
99 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
100 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
101 {
102 /* If absolute system time is managed by dom0, there's no need for such
103 * action since only virtual itc/itm service is provided.
104 */
105 return;
106 }
108 irqreturn_t
109 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
110 {
111 unsigned long new_itm, old_itc;
113 #if 0
114 #define HEARTBEAT_FREQ 16 // period in seconds
115 #ifdef HEARTBEAT_FREQ
116 static long count = 0;
117 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
118 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
119 regs->cr_iip /*,
120 VCPU(current,interrupt_delivery_enabled),
121 VCPU(current,pending_interruption) */);
122 count = 0;
123 }
124 #endif
125 #endif
127 #if 0
128 /* Nobody seems to be able to explain this code.
129 It seems to be accumulated tricks, which are not required anymore.
130 Also I have made many tests, I'd like to get confirmation from
131 other site (TG). */
132 if (current->domain == dom0) {
133 // FIXME: there's gotta be a better way of doing this...
134 // We have to ensure that domain0 is launched before we
135 // call vcpu_timer_expired on it
136 //domain0_ready = 1; // moved to xensetup.c
137 VCPU(current,pending_interruption) = 1;
138 }
139 if (domain0_ready && current->domain != dom0) {
140 if(vcpu_timer_expired(dom0->vcpu[0])) {
141 vcpu_pend_timer(dom0->vcpu[0]);
142 //vcpu_set_next_timer(dom0->vcpu[0]);
143 vcpu_wake(dom0->vcpu[0]);
144 }
145 }
146 #endif
147 if (!is_idle_domain(current->domain)) {
148 if (vcpu_timer_expired(current)) {
149 vcpu_pend_timer(current);
150 // ensure another timer interrupt happens even if domain doesn't
151 vcpu_set_next_timer(current);
152 vcpu_wake(current);
153 }
154 }
155 new_itm = local_cpu_data->itm_next;
157 if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
158 return IRQ_HANDLED;
160 if (VMX_DOMAIN(current))
161 vcpu_wake(current);
163 while (1) {
164 new_itm += local_cpu_data->itm_delta;
166 if (smp_processor_id() == TIME_KEEPER_ID) {
167 /*
168 * Here we are in the timer irq handler. We have irqs locally
169 * disabled, but we don't know if the timer_bh is running on
170 * another CPU. We need to avoid to SMP race by acquiring the
171 * xtime_lock.
172 */
173 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
174 write_seqlock(&xtime_lock);
175 #endif
176 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
177 do_timer(regs);
178 #endif
179 local_cpu_data->itm_next = new_itm;
181 /* Updates system time (nanoseconds since boot). */
182 old_itc = itc_at_irq;
183 itc_at_irq = ia64_get_itc();
184 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
186 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
187 write_sequnlock(&xtime_lock);
188 #endif
189 } else
190 local_cpu_data->itm_next = new_itm;
192 if (time_after(new_itm, ia64_get_itc()))
193 break;
194 }
196 do {
197 /*
198 * If we're too close to the next clock tick for
199 * comfort, we increase the safety margin by
200 * intentionally dropping the next tick(s). We do NOT
201 * update itm.next because that would force us to call
202 * do_timer() which in turn would let our clock run
203 * too fast (with the potentially devastating effect
204 * of losing monotony of time).
205 */
206 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
207 new_itm += local_cpu_data->itm_delta;
208 //#ifdef XEN
209 // vcpu_set_next_timer(current);
210 //#else
211 //printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
212 ia64_set_itm(new_itm);
213 //#endif
214 /* double check, in case we got hit by a (slow) PMI: */
215 } while (time_after_eq(ia64_get_itc(), new_itm));
216 raise_softirq(TIMER_SOFTIRQ);
218 return IRQ_HANDLED;
219 }
221 static struct irqaction xen_timer_irqaction = {
222 .handler = (void *) xen_timer_interrupt,
223 .name = "timer"
224 };
226 void __init
227 ia64_time_init (void)
228 {
229 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
230 ia64_init_itm();
231 }
234 /* Late init function (after all CPUs are booted). */
235 int __init init_xen_time()
236 {
237 ia64_time_init();
238 itc_scale = 1000000000UL << 32 ;
239 itc_scale /= local_cpu_data->itc_freq;
240 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
242 /* System time ticks from zero. */
243 stime_irq = (s_time_t)0;
244 itc_at_irq = ia64_get_itc();
246 printk("Time init:\n");
247 printk(".... System Time: %ldns\n", NOW());
248 printk(".... scale: %16lX\n", itc_scale);
250 return 0;
251 }
253 int reprogram_timer(s_time_t timeout)
254 {
255 struct vcpu *v = current;
256 s_time_t expire;
257 unsigned long seq, cur_itc, itm_next;
259 if (!domain0_ready || timeout == 0) return 1;
261 do {
262 seq = read_seqbegin(&xtime_lock);
263 if ((expire = timeout - NOW()) < 0)
264 return 0;
266 cur_itc = ia64_get_itc();
267 itm_next = cur_itc + ns_to_cycle(expire);
268 } while (unlikely(read_seqretry(&xtime_lock, seq)));
270 local_cpu_data->itm_next = itm_next;
271 vcpu_set_next_timer(v);
272 return 1;
273 }
275 void send_timer_event(struct vcpu *v)
276 {
277 send_guest_vcpu_virq(v, VIRQ_TIMER);
278 }