ia64/xen-unstable

view xen/arch/ia64/xen/xentime.c @ 9479:2b6e531dab38

[IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one trickish bug

Per agreement on the summit, xen/ia64 will move to event channel
model same as xen/x86, under which event is the layer under pirq
(external interrupt), virq, and ipi with the latter three bound
to event ports. Within that model, no external interrupt will be
injected directly and evtchn_upcall_mask is the flag to control
whether events are deliverable.

So xenlinux needs to operate evtchn_upcall_mask at all places
where it originally operates vpsr.i. However these two flags are
presented at different shared area, and thus xenlinux can't ensure
atomical update on two flags which leaves severe stability issues.
One severe bug comes for this reason where some hypercall may be
restarted infinitely when events pending.

Actually based on description of future model, events become the
superset of external interrupts and thus evtchn_upcall_mask super-
set of vpsr.i (interrupt_delivery_enable). We can merge two flags
into one by removing the latter. By this way, we ensure correctness
and most importantly conform to common code which always assumes
upon evtchn_upcall_mask.

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Thu Mar 30 09:55:26 2006 -0700 (2006-03-30)
parents 29dfadcc5029
children 5cc367720223
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/vcpu.h>
30 #include <linux/jiffies.h> // not included by xen/sched.h
31 #include <xen/softirq.h>
32 #include <xen/event.h>
34 /* FIXME: where these declarations should be there ? */
35 extern void ia64_init_itm(void);
37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
39 #define TIME_KEEPER_ID 0
40 unsigned long domain0_ready = 0;
41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
42 unsigned long itc_scale, ns_scale;
43 unsigned long itc_at_irq;
45 /* We don't expect an absolute cycle value here, since then no way
46 * to prevent overflow for large norminator. Normally this conversion
47 * is used for relative offset.
48 */
49 u64 cycle_to_ns(u64 cycle)
50 {
51 return (cycle * itc_scale) >> 32;
52 }
54 u64 ns_to_cycle(u64 ns)
55 {
56 return (ns * ns_scale) >> 32;
57 }
59 static inline u64 get_time_delta(void)
60 {
61 s64 delta_itc;
62 u64 cur_itc;
64 cur_itc = ia64_get_itc();
66 delta_itc = (s64)(cur_itc - itc_at_irq);
68 /* Ensure that the returned system time is monotonically increasing. */
69 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
70 return cycle_to_ns(delta_itc);
71 }
74 s_time_t get_s_time(void)
75 {
76 s_time_t now;
77 unsigned long seq;
79 do {
80 seq = read_seqbegin(&xtime_lock);
81 now = stime_irq + get_time_delta();
82 } while (unlikely(read_seqretry(&xtime_lock, seq)));
84 return now;
85 }
87 void update_dom_time(struct vcpu *v)
88 {
89 /* N-op here, and let dom0 to manage system time directly */
90 return;
91 }
93 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
94 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
95 {
96 /* If absolute system time is managed by dom0, there's no need for such
97 * action since only virtual itc/itm service is provided.
98 */
99 return;
100 }
102 irqreturn_t
103 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
104 {
105 unsigned long new_itm, old_itc;
107 #if 0
108 #define HEARTBEAT_FREQ 16 // period in seconds
109 #ifdef HEARTBEAT_FREQ
110 static long count = 0;
111 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
112 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
113 regs->cr_iip /*,
114 !current->vcpu_info->evtchn_upcall_mask,
115 VCPU(current,pending_interruption) */);
116 count = 0;
117 }
118 #endif
119 #endif
121 if (!is_idle_domain(current->domain))
122 if (vcpu_timer_expired(current)) {
123 vcpu_pend_timer(current);
124 // ensure another timer interrupt happens even if domain doesn't
125 vcpu_set_next_timer(current);
126 }
128 new_itm = local_cpu_data->itm_next;
130 if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
131 return IRQ_HANDLED;
133 while (1) {
134 new_itm += local_cpu_data->itm_delta;
136 if (smp_processor_id() == TIME_KEEPER_ID) {
137 /*
138 * Here we are in the timer irq handler. We have irqs locally
139 * disabled, but we don't know if the timer_bh is running on
140 * another CPU. We need to avoid to SMP race by acquiring the
141 * xtime_lock.
142 */
143 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
144 write_seqlock(&xtime_lock);
145 #endif
146 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
147 do_timer(regs);
148 #endif
149 local_cpu_data->itm_next = new_itm;
151 /* Updates system time (nanoseconds since boot). */
152 old_itc = itc_at_irq;
153 itc_at_irq = ia64_get_itc();
154 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
156 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
157 write_sequnlock(&xtime_lock);
158 #endif
159 } else
160 local_cpu_data->itm_next = new_itm;
162 if (time_after(new_itm, ia64_get_itc()))
163 break;
164 }
166 do {
167 /*
168 * If we're too close to the next clock tick for
169 * comfort, we increase the safety margin by
170 * intentionally dropping the next tick(s). We do NOT
171 * update itm.next because that would force us to call
172 * do_timer() which in turn would let our clock run
173 * too fast (with the potentially devastating effect
174 * of losing monotony of time).
175 */
176 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
177 new_itm += local_cpu_data->itm_delta;
178 ia64_set_itm(new_itm);
179 /* double check, in case we got hit by a (slow) PMI: */
180 } while (time_after_eq(ia64_get_itc(), new_itm));
181 raise_softirq(TIMER_SOFTIRQ);
183 return IRQ_HANDLED;
184 }
186 static struct irqaction xen_timer_irqaction = {
187 .handler = (void *) xen_timer_interrupt,
188 .name = "timer"
189 };
191 void __init
192 ia64_time_init (void)
193 {
194 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
195 ia64_init_itm();
196 }
199 /* Late init function (after all CPUs are booted). */
200 int __init init_xen_time()
201 {
202 ia64_time_init();
203 itc_scale = 1000000000UL << 32 ;
204 itc_scale /= local_cpu_data->itc_freq;
205 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
207 /* System time ticks from zero. */
208 stime_irq = (s_time_t)0;
209 itc_at_irq = ia64_get_itc();
211 printk("Time init:\n");
212 printk(".... System Time: %ldns\n", NOW());
213 printk(".... scale: %16lX\n", itc_scale);
215 return 0;
216 }
218 int reprogram_timer(s_time_t timeout)
219 {
220 struct vcpu *v = current;
221 s_time_t expire;
222 unsigned long seq, cur_itc, itm_next;
224 if (!domain0_ready || timeout == 0) return 1;
226 do {
227 seq = read_seqbegin(&xtime_lock);
228 if ((expire = timeout - NOW()) < 0)
229 return 0;
231 cur_itc = ia64_get_itc();
232 itm_next = cur_itc + ns_to_cycle(expire);
233 } while (unlikely(read_seqretry(&xtime_lock, seq)));
235 local_cpu_data->itm_next = itm_next;
236 vcpu_set_next_timer(v);
237 return 1;
238 }
240 void send_timer_event(struct vcpu *v)
241 {
242 send_guest_virq(v, VIRQ_TIMER);
243 }