ia64/xen-unstable

view xen/arch/ia64/xen/xentime.c @ 10685:8ad37880564d

[IA64] emulate PAL_HALT_LIGHT on domU

This patch emulates Guest PAL_HALT_LIGHT on domU by using do_block and timer.
It also adds the function of the timer interrupt to domU at the vcpu woke up.

Signed-off-by: Atsushi SAKAI <sakaia@jp.fujitsu.com>
[warning fixes and static inlining]
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Mon Jul 10 13:12:41 2006 -0600 (2006-07-10)
parents b20733e82ab6
children b2abc70be89e
line source
1 /*
2 * xen/arch/ia64/time.c
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/sched.h>
16 #include <linux/time.h>
17 #include <linux/interrupt.h>
18 #include <linux/efi.h>
19 #include <linux/profile.h>
20 #include <linux/timex.h>
22 #include <asm/machvec.h>
23 #include <asm/delay.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/sal.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/vcpu.h>
30 #include <linux/jiffies.h> // not included by xen/sched.h
31 #include <xen/softirq.h>
32 #include <xen/event.h>
34 /* FIXME: where these declarations should be there ? */
35 extern void ia64_init_itm(void);
37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
39 #define TIME_KEEPER_ID 0
40 unsigned long domain0_ready = 0;
41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
42 unsigned long itc_scale, ns_scale;
43 unsigned long itc_at_irq;
45 static inline u64 get_time_delta(void)
46 {
47 s64 delta_itc;
48 u64 cur_itc;
50 cur_itc = ia64_get_itc();
52 delta_itc = (s64)(cur_itc - itc_at_irq);
54 /* Ensure that the returned system time is monotonically increasing. */
55 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
56 return cycle_to_ns(delta_itc);
57 }
60 s_time_t get_s_time(void)
61 {
62 s_time_t now;
63 unsigned long seq;
65 do {
66 seq = read_seqbegin(&xtime_lock);
67 now = stime_irq + get_time_delta();
68 } while (unlikely(read_seqretry(&xtime_lock, seq)));
70 return now;
71 }
73 void update_vcpu_system_time(struct vcpu *v)
74 {
75 /* N-op here, and let dom0 to manage system time directly */
76 return;
77 }
79 void update_domain_wallclock_time(struct domain *d)
80 {
81 /* N-op here, and let dom0 to manage system time directly */
82 return;
83 }
85 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
86 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
87 {
88 /* If absolute system time is managed by dom0, there's no need for such
89 * action since only virtual itc/itm service is provided.
90 */
91 return;
92 }
94 void
95 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
96 {
97 unsigned long new_itm, old_itc;
99 #if 0
100 #define HEARTBEAT_FREQ 16 // period in seconds
101 #ifdef HEARTBEAT_FREQ
102 static long count = 0;
103 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
104 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
105 regs->cr_iip /*,
106 !current->vcpu_info->evtchn_upcall_mask,
107 VCPU(current,pending_interruption) */);
108 count = 0;
109 }
110 #endif
111 #endif
113 if (!is_idle_domain(current->domain)&&!VMX_DOMAIN(current))
114 if (vcpu_timer_expired(current)) {
115 vcpu_pend_timer(current);
116 // ensure another timer interrupt happens even if domain doesn't
117 vcpu_set_next_timer(current);
118 }
120 new_itm = local_cpu_data->itm_next;
122 if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
123 return;
125 while (1) {
126 new_itm += local_cpu_data->itm_delta;
128 if (smp_processor_id() == TIME_KEEPER_ID) {
129 /*
130 * Here we are in the timer irq handler. We have irqs locally
131 * disabled, but we don't know if the timer_bh is running on
132 * another CPU. We need to avoid to SMP race by acquiring the
133 * xtime_lock.
134 */
135 //#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
136 write_seqlock(&xtime_lock);
137 //#endif
138 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
139 do_timer(regs);
140 #endif
141 local_cpu_data->itm_next = new_itm;
143 /* Updates system time (nanoseconds since boot). */
144 old_itc = itc_at_irq;
145 itc_at_irq = ia64_get_itc();
146 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
148 //#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
149 write_sequnlock(&xtime_lock);
150 //#endif
151 } else
152 local_cpu_data->itm_next = new_itm;
154 if (time_after(new_itm, ia64_get_itc()))
155 break;
156 }
158 do {
159 /*
160 * If we're too close to the next clock tick for
161 * comfort, we increase the safety margin by
162 * intentionally dropping the next tick(s). We do NOT
163 * update itm.next because that would force us to call
164 * do_timer() which in turn would let our clock run
165 * too fast (with the potentially devastating effect
166 * of losing monotony of time).
167 */
168 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
169 new_itm += local_cpu_data->itm_delta;
170 ia64_set_itm(new_itm);
171 /* double check, in case we got hit by a (slow) PMI: */
172 } while (time_after_eq(ia64_get_itc(), new_itm));
173 raise_softirq(TIMER_SOFTIRQ);
174 }
176 static struct irqaction xen_timer_irqaction = {
177 .handler = (void *) xen_timer_interrupt,
178 .name = "timer"
179 };
181 void __init
182 ia64_time_init (void)
183 {
184 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
185 ia64_init_itm();
186 }
189 /* Late init function (after all CPUs are booted). */
190 int __init init_xen_time()
191 {
192 ia64_time_init();
193 itc_scale = 1000000000UL << 32 ;
194 itc_scale /= local_cpu_data->itc_freq;
195 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
197 /* System time ticks from zero. */
198 stime_irq = (s_time_t)0;
199 itc_at_irq = ia64_get_itc();
201 printk("Time init:\n");
202 printk(".... System Time: %ldns\n", NOW());
203 printk(".... scale: %16lX\n", itc_scale);
205 return 0;
206 }
208 int reprogram_timer(s_time_t timeout)
209 {
210 struct vcpu *v = current;
211 s_time_t expire;
212 unsigned long seq, cur_itc, itm_next;
214 if (!domain0_ready || timeout == 0) return 1;
216 do {
217 seq = read_seqbegin(&xtime_lock);
218 if ((expire = timeout - NOW()) < 0)
219 return 0;
221 cur_itc = ia64_get_itc();
222 itm_next = cur_itc + ns_to_cycle(expire);
223 } while (unlikely(read_seqretry(&xtime_lock, seq)));
225 local_cpu_data->itm_next = itm_next;
226 vcpu_set_next_timer(v);
227 return 1;
228 }
230 void send_timer_event(struct vcpu *v)
231 {
232 send_guest_vcpu_virq(v, VIRQ_TIMER);
233 }