ia64/xen-unstable

view xen/arch/x86/hvm/vpt.c @ 16356:8ff5bb70136d

x86, hvm: Clean up periodic timer code a little. This leads naturally
to a no-missed-tick-accounting mode which is a combination of ticks
delivered 'off beat' immediately upon re-scheduling when ticks are
missed, then reverting to delivering ticks 'on beat' as usual.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Thu Nov 08 10:33:18 2007 +0000 (2007-11-08)
parents 644e7577f6ee
children 0f9b5ab59579
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 */
21 #include <xen/time.h>
22 #include <asm/hvm/support.h>
23 #include <asm/hvm/vpt.h>
24 #include <asm/event.h>
26 #define mode_is(d, name) \
27 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
29 static void pt_lock(struct periodic_time *pt)
30 {
31 struct vcpu *v;
33 for ( ; ; )
34 {
35 v = pt->vcpu;
36 spin_lock(&v->arch.hvm_vcpu.tm_lock);
37 if ( likely(pt->vcpu == v) )
38 break;
39 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
40 }
41 }
43 static void pt_unlock(struct periodic_time *pt)
44 {
45 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
46 }
48 static void pt_process_missed_ticks(struct periodic_time *pt)
49 {
50 s_time_t missed_ticks, now = NOW();
52 if ( pt->one_shot )
53 return;
55 missed_ticks = now - pt->scheduled;
56 if ( missed_ticks <= 0 )
57 return;
59 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
60 pt->pending_intr_nr += missed_ticks;
61 pt->scheduled += missed_ticks * pt->period;
62 }
64 static void pt_freeze_time(struct vcpu *v)
65 {
66 if ( !mode_is(v->domain, delay_for_missed_ticks) )
67 return;
69 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
70 }
72 static void pt_thaw_time(struct vcpu *v)
73 {
74 if ( !mode_is(v->domain, delay_for_missed_ticks) )
75 return;
77 if ( v->arch.hvm_vcpu.guest_time == 0 )
78 return;
80 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
81 v->arch.hvm_vcpu.guest_time = 0;
82 }
84 void pt_save_timer(struct vcpu *v)
85 {
86 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
87 struct periodic_time *pt;
89 if ( test_bit(_VPF_blocked, &v->pause_flags) )
90 return;
92 spin_lock(&v->arch.hvm_vcpu.tm_lock);
94 list_for_each_entry ( pt, head, list )
95 stop_timer(&pt->timer);
97 pt_freeze_time(v);
99 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
100 }
102 void pt_restore_timer(struct vcpu *v)
103 {
104 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
105 struct periodic_time *pt;
107 spin_lock(&v->arch.hvm_vcpu.tm_lock);
109 list_for_each_entry ( pt, head, list )
110 {
111 pt_process_missed_ticks(pt);
112 set_timer(&pt->timer, pt->scheduled);
113 }
115 pt_thaw_time(v);
117 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
118 }
120 static void pt_timer_fn(void *data)
121 {
122 struct periodic_time *pt = data;
124 pt_lock(pt);
126 pt->pending_intr_nr++;
128 if ( !pt->one_shot )
129 {
130 pt->scheduled += pt->period;
131 pt_process_missed_ticks(pt);
132 set_timer(&pt->timer, pt->scheduled);
133 }
135 vcpu_kick(pt->vcpu);
137 pt_unlock(pt);
138 }
140 void pt_update_irq(struct vcpu *v)
141 {
142 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
143 struct periodic_time *pt;
144 uint64_t max_lag = -1ULL;
145 int irq = -1;
147 spin_lock(&v->arch.hvm_vcpu.tm_lock);
149 list_for_each_entry ( pt, head, list )
150 {
151 if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
152 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
153 {
154 max_lag = pt->last_plt_gtime + pt->period_cycles;
155 irq = pt->irq;
156 }
157 }
159 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
161 if ( is_lvtt(v, irq) )
162 {
163 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
164 }
165 else if ( irq >= 0 )
166 {
167 hvm_isa_irq_deassert(v->domain, irq);
168 hvm_isa_irq_assert(v->domain, irq);
169 }
170 }
172 static struct periodic_time *is_pt_irq(
173 struct vcpu *v, struct hvm_intack intack)
174 {
175 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
176 struct periodic_time *pt;
177 struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
178 int vector;
180 list_for_each_entry ( pt, head, list )
181 {
182 if ( !pt->pending_intr_nr )
183 continue;
185 if ( is_lvtt(v, pt->irq) )
186 {
187 if ( pt->irq != intack.vector )
188 continue;
189 return pt;
190 }
192 vector = get_isa_irq_vector(v, pt->irq, intack.source);
194 /* RTC irq need special care */
195 if ( (intack.vector != vector) ||
196 ((pt->irq == 8) && !is_rtc_periodic_irq(rtc)) )
197 continue;
199 return pt;
200 }
202 return NULL;
203 }
205 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
206 {
207 struct periodic_time *pt;
208 time_cb *cb;
209 void *cb_priv;
211 spin_lock(&v->arch.hvm_vcpu.tm_lock);
213 pt = is_pt_irq(v, intack);
214 if ( pt == NULL )
215 {
216 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
217 return;
218 }
220 if ( pt->one_shot )
221 {
222 pt->enabled = 0;
223 list_del(&pt->list);
224 }
225 else
226 {
227 if ( mode_is(v->domain, no_missed_tick_accounting) )
228 {
229 pt->last_plt_gtime = hvm_get_guest_time(v);
230 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
231 }
232 else
233 {
234 pt->last_plt_gtime += pt->period_cycles;
235 pt->pending_intr_nr--;
236 }
237 }
239 if ( mode_is(v->domain, delay_for_missed_ticks) &&
240 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
241 hvm_set_guest_time(v, pt->last_plt_gtime);
243 cb = pt->cb;
244 cb_priv = pt->priv;
246 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
248 if ( cb != NULL )
249 cb(v, cb_priv);
250 }
252 void pt_reset(struct vcpu *v)
253 {
254 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
255 struct periodic_time *pt;
257 spin_lock(&v->arch.hvm_vcpu.tm_lock);
259 list_for_each_entry ( pt, head, list )
260 {
261 pt->pending_intr_nr = 0;
262 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
263 pt->scheduled = NOW() + pt->period;
264 set_timer(&pt->timer, pt->scheduled);
265 }
267 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
268 }
270 void pt_migrate(struct vcpu *v)
271 {
272 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
273 struct periodic_time *pt;
275 spin_lock(&v->arch.hvm_vcpu.tm_lock);
277 list_for_each_entry ( pt, head, list )
278 migrate_timer(&pt->timer, v->processor);
280 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
281 }
283 void create_periodic_time(
284 struct vcpu *v, struct periodic_time *pt, uint64_t period,
285 uint8_t irq, char one_shot, time_cb *cb, void *data)
286 {
287 destroy_periodic_time(pt);
289 spin_lock(&v->arch.hvm_vcpu.tm_lock);
291 pt->enabled = 1;
292 pt->pending_intr_nr = 0;
294 /* Periodic timer must be at least 0.9ms. */
295 if ( (period < 900000) && !one_shot )
296 {
297 gdprintk(XENLOG_WARNING,
298 "HVM_PlatformTime: program too small period %"PRIu64"\n",
299 period);
300 period = 900000;
301 }
303 pt->period = period;
304 pt->vcpu = v;
305 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
306 pt->irq = irq;
307 pt->period_cycles = (u64)period * cpu_khz / 1000000L;
308 pt->one_shot = one_shot;
309 pt->scheduled = NOW() + period;
310 /*
311 * Offset LAPIC ticks from other timer ticks. Otherwise guests which use
312 * LAPIC ticks for process accounting can see long sequences of process
313 * ticks incorrectly accounted to interrupt processing.
314 */
315 if ( is_lvtt(v, irq) )
316 pt->scheduled += period >> 1;
317 pt->cb = cb;
318 pt->priv = data;
320 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
322 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
323 set_timer(&pt->timer, pt->scheduled);
325 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
326 }
328 void destroy_periodic_time(struct periodic_time *pt)
329 {
330 if ( !pt->enabled )
331 return;
333 pt_lock(pt);
334 pt->enabled = 0;
335 list_del(&pt->list);
336 pt_unlock(pt);
338 /*
339 * pt_timer_fn() can run until this kill_timer() returns. We must do this
340 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
341 */
342 kill_timer(&pt->timer);
343 }