direct-io.hg

view xen/arch/ia64/xen/xentime.c @ 11344:647d3208704a

[IA64] fix VTi hangs

In some scenarios, xen_timer_interrupt will not set machine itm,
this may make this LP not receive timer interrupt any more.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Mon Aug 28 13:18:36 2006 -0600 (2006-08-28)
parents b20733e82ab6
children 29b02d929b7e 6fae3a36f50b
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/vcpu.h>
30 #include <linux/jiffies.h> // not included by xen/sched.h
31 #include <xen/softirq.h>
32 #include <xen/event.h>
34 /* FIXME: where these declarations should be there ? */
35 extern void ia64_init_itm(void);
37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
39 #define TIME_KEEPER_ID 0
40 unsigned long domain0_ready = 0;
41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
42 unsigned long itc_scale, ns_scale;
43 unsigned long itc_at_irq;
45 /* We don't expect an absolute cycle value here, since then no way
46 * to prevent overflow for large norminator. Normally this conversion
47 * is used for relative offset.
48 */
49 u64 cycle_to_ns(u64 cycle)
50 {
51 return (cycle * itc_scale) >> 32;
52 }
54 u64 ns_to_cycle(u64 ns)
55 {
56 return (ns * ns_scale) >> 32;
57 }
59 static inline u64 get_time_delta(void)
60 {
61 s64 delta_itc;
62 u64 cur_itc;
64 cur_itc = ia64_get_itc();
66 delta_itc = (s64)(cur_itc - itc_at_irq);
68 /* Ensure that the returned system time is monotonically increasing. */
69 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
70 return cycle_to_ns(delta_itc);
71 }
74 s_time_t get_s_time(void)
75 {
76 s_time_t now;
77 unsigned long seq;
79 do {
80 seq = read_seqbegin(&xtime_lock);
81 now = stime_irq + get_time_delta();
82 } while (unlikely(read_seqretry(&xtime_lock, seq)));
84 return now;
85 }
87 void update_vcpu_system_time(struct vcpu *v)
88 {
89 /* N-op here, and let dom0 to manage system time directly */
90 return;
91 }
93 void update_domain_wallclock_time(struct domain *d)
94 {
95 /* N-op here, and let dom0 to manage system time directly */
96 return;
97 }
99 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
100 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
101 {
102 /* If absolute system time is managed by dom0, there's no need for such
103 * action since only virtual itc/itm service is provided.
104 */
105 return;
106 }
108 void
109 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
110 {
111 unsigned long new_itm, old_itc;
112 int f_setitm = 0;
114 #if 0
115 #define HEARTBEAT_FREQ 16 // period in seconds
116 #ifdef HEARTBEAT_FREQ
117 static long count = 0;
118 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
119 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
120 regs->cr_iip /*,
121 !current->vcpu_info->evtchn_upcall_mask,
122 VCPU(current,pending_interruption) */);
123 count = 0;
124 }
125 #endif
126 #endif
128 if (!is_idle_domain(current->domain)&&!VMX_DOMAIN(current))
129 if (vcpu_timer_expired(current)) {
130 vcpu_pend_timer(current);
131 // ensure another timer interrupt happens even if domain doesn't
132 vcpu_set_next_timer(current);
133 f_setitm = 1;
134 }
136 new_itm = local_cpu_data->itm_next;
138 if (f_setitm && !time_after(ia64_get_itc(), new_itm))
139 return;
141 while (1) {
142 new_itm += local_cpu_data->itm_delta;
144 if (smp_processor_id() == TIME_KEEPER_ID) {
145 /*
146 * Here we are in the timer irq handler. We have irqs locally
147 * disabled, but we don't know if the timer_bh is running on
148 * another CPU. We need to avoid to SMP race by acquiring the
149 * xtime_lock.
150 */
151 //#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
152 write_seqlock(&xtime_lock);
153 //#endif
154 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
155 do_timer(regs);
156 #endif
157 local_cpu_data->itm_next = new_itm;
159 /* Updates system time (nanoseconds since boot). */
160 old_itc = itc_at_irq;
161 itc_at_irq = ia64_get_itc();
162 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
164 //#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
165 write_sequnlock(&xtime_lock);
166 //#endif
167 } else
168 local_cpu_data->itm_next = new_itm;
170 if (time_after(new_itm, ia64_get_itc()))
171 break;
172 }
174 do {
175 /*
176 * If we're too close to the next clock tick for
177 * comfort, we increase the safety margin by
178 * intentionally dropping the next tick(s). We do NOT
179 * update itm.next because that would force us to call
180 * do_timer() which in turn would let our clock run
181 * too fast (with the potentially devastating effect
182 * of losing monotony of time).
183 */
184 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
185 new_itm += local_cpu_data->itm_delta;
186 ia64_set_itm(new_itm);
187 /* double check, in case we got hit by a (slow) PMI: */
188 } while (time_after_eq(ia64_get_itc(), new_itm));
189 raise_softirq(TIMER_SOFTIRQ);
190 }
192 static struct irqaction xen_timer_irqaction = {
193 .handler = (void *) xen_timer_interrupt,
194 .name = "timer"
195 };
197 void __init
198 ia64_time_init (void)
199 {
200 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
201 ia64_init_itm();
202 }
205 /* Late init function (after all CPUs are booted). */
206 int __init init_xen_time()
207 {
208 ia64_time_init();
209 itc_scale = 1000000000UL << 32 ;
210 itc_scale /= local_cpu_data->itc_freq;
211 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
213 /* System time ticks from zero. */
214 stime_irq = (s_time_t)0;
215 itc_at_irq = ia64_get_itc();
217 printk("Time init:\n");
218 printk(".... System Time: %ldns\n", NOW());
219 printk(".... scale: %16lX\n", itc_scale);
221 return 0;
222 }
224 int reprogram_timer(s_time_t timeout)
225 {
226 struct vcpu *v = current;
227 s_time_t expire;
228 unsigned long seq, cur_itc, itm_next;
230 if (!domain0_ready || timeout == 0) return 1;
232 do {
233 seq = read_seqbegin(&xtime_lock);
234 if ((expire = timeout - NOW()) < 0)
235 return 0;
237 cur_itc = ia64_get_itc();
238 itm_next = cur_itc + ns_to_cycle(expire);
239 } while (unlikely(read_seqretry(&xtime_lock, seq)));
241 local_cpu_data->itm_next = itm_next;
242 vcpu_set_next_timer(v);
243 return 1;
244 }
246 void send_timer_event(struct vcpu *v)
247 {
248 send_guest_vcpu_virq(v, VIRQ_TIMER);
249 }