ia64/xen-unstable

view xen/arch/x86/hvm/vpt.c @ 16603:4553bc1087d9

hvm: Reduce vpt.c dependencies on external timer details.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 12 15:41:20 2007 +0000 (2007-12-12)
parents f2f7c92bf1c1
children 2ebced8f8baf
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/time.h>
21 #include <asm/hvm/support.h>
22 #include <asm/hvm/vpt.h>
23 #include <asm/event.h>
25 #define mode_is(d, name) \
26 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
28 static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
29 {
30 struct vcpu *v = pt->vcpu;
31 unsigned int gsi, isa_irq;
33 if ( pt->source == PTSRC_lapic )
34 return pt->irq;
36 isa_irq = pt->irq;
37 gsi = hvm_isa_irq_to_gsi(isa_irq);
39 if ( src == hvm_intsrc_pic )
40 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
41 + (isa_irq & 7));
43 ASSERT(src == hvm_intsrc_lapic);
44 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
45 }
47 static int pt_irq_masked(struct periodic_time *pt)
48 {
49 struct vcpu *v = pt->vcpu;
50 unsigned int gsi, isa_irq;
51 uint8_t pic_imr;
53 if ( pt->source == PTSRC_lapic )
54 {
55 struct vlapic *vlapic = vcpu_vlapic(v);
56 return (vlapic_enabled(vlapic) &&
57 !(vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
58 }
60 isa_irq = pt->irq;
61 gsi = hvm_isa_irq_to_gsi(isa_irq);
62 pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
64 return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
65 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
66 }
68 static void pt_lock(struct periodic_time *pt)
69 {
70 struct vcpu *v;
72 for ( ; ; )
73 {
74 v = pt->vcpu;
75 spin_lock(&v->arch.hvm_vcpu.tm_lock);
76 if ( likely(pt->vcpu == v) )
77 break;
78 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
79 }
80 }
82 static void pt_unlock(struct periodic_time *pt)
83 {
84 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
85 }
87 static void pt_process_missed_ticks(struct periodic_time *pt)
88 {
89 s_time_t missed_ticks, now = NOW();
91 if ( pt->one_shot )
92 return;
94 missed_ticks = now - pt->scheduled;
95 if ( missed_ticks <= 0 )
96 return;
98 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
99 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
100 pt->do_not_freeze = !pt->pending_intr_nr;
101 else
102 pt->pending_intr_nr += missed_ticks;
103 pt->scheduled += missed_ticks * pt->period;
104 }
106 static void pt_freeze_time(struct vcpu *v)
107 {
108 if ( !mode_is(v->domain, delay_for_missed_ticks) )
109 return;
111 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
112 }
114 static void pt_thaw_time(struct vcpu *v)
115 {
116 if ( !mode_is(v->domain, delay_for_missed_ticks) )
117 return;
119 if ( v->arch.hvm_vcpu.guest_time == 0 )
120 return;
122 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
123 v->arch.hvm_vcpu.guest_time = 0;
124 }
126 void pt_save_timer(struct vcpu *v)
127 {
128 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
129 struct periodic_time *pt;
131 if ( test_bit(_VPF_blocked, &v->pause_flags) )
132 return;
134 spin_lock(&v->arch.hvm_vcpu.tm_lock);
136 list_for_each_entry ( pt, head, list )
137 if ( !pt->do_not_freeze )
138 stop_timer(&pt->timer);
140 pt_freeze_time(v);
142 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
143 }
145 void pt_restore_timer(struct vcpu *v)
146 {
147 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
148 struct periodic_time *pt;
150 spin_lock(&v->arch.hvm_vcpu.tm_lock);
152 list_for_each_entry ( pt, head, list )
153 {
154 pt_process_missed_ticks(pt);
155 set_timer(&pt->timer, pt->scheduled);
156 }
158 pt_thaw_time(v);
160 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
161 }
163 static void pt_timer_fn(void *data)
164 {
165 struct periodic_time *pt = data;
167 pt_lock(pt);
169 pt->pending_intr_nr++;
171 if ( !pt->one_shot )
172 {
173 pt->scheduled += pt->period;
174 pt_process_missed_ticks(pt);
175 set_timer(&pt->timer, pt->scheduled);
176 }
178 vcpu_kick(pt->vcpu);
180 pt_unlock(pt);
181 }
183 void pt_update_irq(struct vcpu *v)
184 {
185 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
186 struct periodic_time *pt, *earliest_pt = NULL;
187 uint64_t max_lag = -1ULL;
188 int irq, is_lapic;
190 spin_lock(&v->arch.hvm_vcpu.tm_lock);
192 list_for_each_entry ( pt, head, list )
193 {
194 if ( !pt_irq_masked(pt) && pt->pending_intr_nr &&
195 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
196 {
197 max_lag = pt->last_plt_gtime + pt->period_cycles;
198 earliest_pt = pt;
199 }
200 }
202 if ( earliest_pt == NULL )
203 {
204 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
205 return;
206 }
208 earliest_pt->irq_issued = 1;
209 irq = earliest_pt->irq;
210 is_lapic = (earliest_pt->source == PTSRC_lapic);
212 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
214 if ( is_lapic )
215 {
216 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
217 }
218 else
219 {
220 hvm_isa_irq_deassert(v->domain, irq);
221 hvm_isa_irq_assert(v->domain, irq);
222 }
223 }
225 static struct periodic_time *is_pt_irq(
226 struct vcpu *v, struct hvm_intack intack)
227 {
228 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
229 struct periodic_time *pt;
231 list_for_each_entry ( pt, head, list )
232 {
233 if ( pt->pending_intr_nr && pt->irq_issued &&
234 (intack.vector == pt_irq_vector(pt, intack.source)) )
235 return pt;
236 }
238 return NULL;
239 }
241 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
242 {
243 struct periodic_time *pt;
244 time_cb *cb;
245 void *cb_priv;
247 spin_lock(&v->arch.hvm_vcpu.tm_lock);
249 pt = is_pt_irq(v, intack);
250 if ( pt == NULL )
251 {
252 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
253 return;
254 }
256 pt->do_not_freeze = 0;
257 pt->irq_issued = 0;
259 if ( pt->one_shot )
260 {
261 if ( pt->on_list )
262 list_del(&pt->list);
263 pt->on_list = 0;
264 }
265 else
266 {
267 if ( mode_is(v->domain, one_missed_tick_pending) )
268 {
269 pt->last_plt_gtime = hvm_get_guest_time(v);
270 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
271 }
272 else
273 {
274 pt->last_plt_gtime += pt->period_cycles;
275 pt->pending_intr_nr--;
276 }
277 }
279 if ( mode_is(v->domain, delay_for_missed_ticks) &&
280 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
281 hvm_set_guest_time(v, pt->last_plt_gtime);
283 cb = pt->cb;
284 cb_priv = pt->priv;
286 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
288 if ( cb != NULL )
289 cb(v, cb_priv);
290 }
292 void pt_reset(struct vcpu *v)
293 {
294 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
295 struct periodic_time *pt;
297 spin_lock(&v->arch.hvm_vcpu.tm_lock);
299 list_for_each_entry ( pt, head, list )
300 {
301 pt->pending_intr_nr = 0;
302 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
303 pt->scheduled = NOW() + pt->period;
304 set_timer(&pt->timer, pt->scheduled);
305 }
307 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
308 }
310 void pt_migrate(struct vcpu *v)
311 {
312 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
313 struct periodic_time *pt;
315 spin_lock(&v->arch.hvm_vcpu.tm_lock);
317 list_for_each_entry ( pt, head, list )
318 migrate_timer(&pt->timer, v->processor);
320 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
321 }
323 void create_periodic_time(
324 struct vcpu *v, struct periodic_time *pt, uint64_t period,
325 uint8_t irq, char one_shot, time_cb *cb, void *data)
326 {
327 ASSERT(pt->source != 0);
329 destroy_periodic_time(pt);
331 spin_lock(&v->arch.hvm_vcpu.tm_lock);
333 pt->pending_intr_nr = 0;
334 pt->do_not_freeze = 0;
335 pt->irq_issued = 0;
337 /* Periodic timer must be at least 0.9ms. */
338 if ( (period < 900000) && !one_shot )
339 {
340 gdprintk(XENLOG_WARNING,
341 "HVM_PlatformTime: program too small period %"PRIu64"\n",
342 period);
343 period = 900000;
344 }
346 pt->period = period;
347 pt->vcpu = v;
348 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
349 pt->irq = irq;
350 pt->period_cycles = (u64)period * cpu_khz / 1000000L;
351 pt->one_shot = one_shot;
352 pt->scheduled = NOW() + period;
353 /*
354 * Offset LAPIC ticks from other timer ticks. Otherwise guests which use
355 * LAPIC ticks for process accounting can see long sequences of process
356 * ticks incorrectly accounted to interrupt processing.
357 */
358 if ( pt->source == PTSRC_lapic )
359 pt->scheduled += period >> 1;
360 pt->cb = cb;
361 pt->priv = data;
363 pt->on_list = 1;
364 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
366 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
367 set_timer(&pt->timer, pt->scheduled);
369 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
370 }
372 void destroy_periodic_time(struct periodic_time *pt)
373 {
374 /* Was this structure previously initialised by create_periodic_time()? */
375 if ( pt->vcpu == NULL )
376 return;
378 pt_lock(pt);
379 if ( pt->on_list )
380 list_del(&pt->list);
381 pt->on_list = 0;
382 pt_unlock(pt);
384 /*
385 * pt_timer_fn() can run until this kill_timer() returns. We must do this
386 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
387 */
388 kill_timer(&pt->timer);
389 }