ia64/xen-unstable

view xen/arch/ia64/xen/xentime.c @ 9314:58a3ed82eee4

[IA64] Removed warning messages

This patch removed warning messages in vcpu.c, xentime.c and xensetup.c.
I tested compilation, booting dom0, and creation/destruction domU.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Mon Mar 20 09:19:36 2006 -0700 (2006-03-20)
parents 25003dd43a92
children 29dfadcc5029 08aede767c63
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/vcpu.h>
30 #include <linux/jiffies.h> // not included by xen/sched.h
31 #include <xen/softirq.h>
32 #include <xen/event.h>
34 /* FIXME: where these declarations should be there ? */
35 extern void ia64_init_itm(void);
37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
39 #define TIME_KEEPER_ID 0
40 unsigned long domain0_ready = 0;
41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
42 unsigned long itc_scale, ns_scale;
43 unsigned long itc_at_irq;
45 /* We don't expect an absolute cycle value here, since then no way
46 * to prevent overflow for large norminator. Normally this conversion
47 * is used for relative offset.
48 */
49 u64 cycle_to_ns(u64 cycle)
50 {
51 return (cycle * itc_scale) >> 32;
52 }
54 u64 ns_to_cycle(u64 ns)
55 {
56 return (ns * ns_scale) >> 32;
57 }
59 static inline u64 get_time_delta(void)
60 {
61 s64 delta_itc;
62 u64 cur_itc;
64 cur_itc = ia64_get_itc();
66 delta_itc = (s64)(cur_itc - itc_at_irq);
68 /* Ensure that the returned system time is monotonically increasing. */
69 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
70 return cycle_to_ns(delta_itc);
71 }
74 s_time_t get_s_time(void)
75 {
76 s_time_t now;
77 unsigned long seq;
79 do {
80 seq = read_seqbegin(&xtime_lock);
81 now = stime_irq + get_time_delta();
82 } while (unlikely(read_seqretry(&xtime_lock, seq)));
84 return now;
85 }
87 void update_dom_time(struct vcpu *v)
88 {
89 /* N-op here, and let dom0 to manage system time directly */
90 return;
91 }
93 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
94 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
95 {
96 /* If absolute system time is managed by dom0, there's no need for such
97 * action since only virtual itc/itm service is provided.
98 */
99 return;
100 }
102 irqreturn_t
103 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
104 {
105 unsigned long new_itm, old_itc;
107 #if 0
108 #define HEARTBEAT_FREQ 16 // period in seconds
109 #ifdef HEARTBEAT_FREQ
110 static long count = 0;
111 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
112 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
113 regs->cr_iip /*,
114 VCPU(current,interrupt_delivery_enabled),
115 VCPU(current,pending_interruption) */);
116 count = 0;
117 }
118 #endif
119 #endif
121 #if 0
122 /* Nobody seems to be able to explain this code.
123 It seems to be accumulated tricks, which are not required anymore.
124 Also I have made many tests, I'd like to get confirmation from
125 other site (TG). */
126 if (current->domain == dom0) {
127 // FIXME: there's gotta be a better way of doing this...
128 // We have to ensure that domain0 is launched before we
129 // call vcpu_timer_expired on it
130 //domain0_ready = 1; // moved to xensetup.c
131 VCPU(current,pending_interruption) = 1;
132 }
133 if (domain0_ready && current->domain != dom0) {
134 if(vcpu_timer_expired(dom0->vcpu[0])) {
135 vcpu_pend_timer(dom0->vcpu[0]);
136 //vcpu_set_next_timer(dom0->vcpu[0]);
137 vcpu_wake(dom0->vcpu[0]);
138 }
139 }
140 #endif
141 if (!is_idle_domain(current->domain)) {
142 if (vcpu_timer_expired(current)) {
143 vcpu_pend_timer(current);
144 // ensure another timer interrupt happens even if domain doesn't
145 vcpu_set_next_timer(current);
146 vcpu_wake(current);
147 }
148 }
149 new_itm = local_cpu_data->itm_next;
151 if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
152 return IRQ_HANDLED;
154 if (VMX_DOMAIN(current))
155 vcpu_wake(current);
157 while (1) {
158 new_itm += local_cpu_data->itm_delta;
160 if (smp_processor_id() == TIME_KEEPER_ID) {
161 /*
162 * Here we are in the timer irq handler. We have irqs locally
163 * disabled, but we don't know if the timer_bh is running on
164 * another CPU. We need to avoid to SMP race by acquiring the
165 * xtime_lock.
166 */
167 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
168 write_seqlock(&xtime_lock);
169 #endif
170 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
171 do_timer(regs);
172 #endif
173 local_cpu_data->itm_next = new_itm;
175 /* Updates system time (nanoseconds since boot). */
176 old_itc = itc_at_irq;
177 itc_at_irq = ia64_get_itc();
178 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
180 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
181 write_sequnlock(&xtime_lock);
182 #endif
183 } else
184 local_cpu_data->itm_next = new_itm;
186 if (time_after(new_itm, ia64_get_itc()))
187 break;
188 }
190 do {
191 /*
192 * If we're too close to the next clock tick for
193 * comfort, we increase the safety margin by
194 * intentionally dropping the next tick(s). We do NOT
195 * update itm.next because that would force us to call
196 * do_timer() which in turn would let our clock run
197 * too fast (with the potentially devastating effect
198 * of losing monotony of time).
199 */
200 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
201 new_itm += local_cpu_data->itm_delta;
202 //#ifdef XEN
203 // vcpu_set_next_timer(current);
204 //#else
205 //printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
206 ia64_set_itm(new_itm);
207 //#endif
208 /* double check, in case we got hit by a (slow) PMI: */
209 } while (time_after_eq(ia64_get_itc(), new_itm));
210 raise_softirq(TIMER_SOFTIRQ);
212 return IRQ_HANDLED;
213 }
215 static struct irqaction xen_timer_irqaction = {
216 .handler = (void *) xen_timer_interrupt,
217 .name = "timer"
218 };
220 void __init
221 ia64_time_init (void)
222 {
223 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
224 ia64_init_itm();
225 }
228 /* Late init function (after all CPUs are booted). */
229 int __init init_xen_time()
230 {
231 ia64_time_init();
232 itc_scale = 1000000000UL << 32 ;
233 itc_scale /= local_cpu_data->itc_freq;
234 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
236 /* System time ticks from zero. */
237 stime_irq = (s_time_t)0;
238 itc_at_irq = ia64_get_itc();
240 printk("Time init:\n");
241 printk(".... System Time: %ldns\n", NOW());
242 printk(".... scale: %16lX\n", itc_scale);
244 return 0;
245 }
247 int reprogram_timer(s_time_t timeout)
248 {
249 struct vcpu *v = current;
250 s_time_t expire;
251 unsigned long seq, cur_itc, itm_next;
253 if (!domain0_ready || timeout == 0) return 1;
255 do {
256 seq = read_seqbegin(&xtime_lock);
257 if ((expire = timeout - NOW()) < 0)
258 return 0;
260 cur_itc = ia64_get_itc();
261 itm_next = cur_itc + ns_to_cycle(expire);
262 } while (unlikely(read_seqretry(&xtime_lock, seq)));
264 local_cpu_data->itm_next = itm_next;
265 vcpu_set_next_timer(v);
266 return 1;
267 }
269 void send_timer_event(struct vcpu *v)
270 {
271 send_guest_virq(v, VIRQ_TIMER);
272 }