ia64/xen-unstable

view xen/arch/x86/hvm/vpt.c @ 19302:d4c9a63a1642

hvm/vpt: Check that an irq is not blocked before waking the vcpu

Currently, when a timer fires for a vpt interrupt, the interrupt
handler calls vcpu_kick() without checking to see if the IRQ is
blocked. This causes the vcpu to wake up out of a halt when it
shouldn't.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 09 13:50:45 2009 +0000 (2009-03-09)
parents 09a6fa059b37
children 82bbce59b65d
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/time.h>
21 #include <asm/hvm/support.h>
22 #include <asm/hvm/vpt.h>
23 #include <asm/event.h>
25 #define mode_is(d, name) \
26 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
28 void hvm_init_guest_time(struct domain *d)
29 {
30 struct pl_time *pl = &d->arch.hvm_domain.pl_time;
32 spin_lock_init(&pl->pl_time_lock);
33 pl->stime_offset = -(u64)get_s_time();
34 pl->last_guest_time = 0;
35 }
37 u64 hvm_get_guest_time(struct vcpu *v)
38 {
39 struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
40 u64 now;
42 /* Called from device models shared with PV guests. Be careful. */
43 ASSERT(is_hvm_vcpu(v));
45 spin_lock(&pl->pl_time_lock);
46 now = get_s_time() + pl->stime_offset;
47 if ( (int64_t)(now - pl->last_guest_time) >= 0 )
48 pl->last_guest_time = now;
49 else
50 now = pl->last_guest_time;
51 spin_unlock(&pl->pl_time_lock);
53 return now + v->arch.hvm_vcpu.stime_offset;
54 }
56 void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
57 {
58 v->arch.hvm_vcpu.stime_offset += guest_time - hvm_get_guest_time(v);
59 }
61 static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
62 {
63 struct vcpu *v = pt->vcpu;
64 unsigned int gsi, isa_irq;
66 if ( pt->source == PTSRC_lapic )
67 return pt->irq;
69 isa_irq = pt->irq;
70 gsi = hvm_isa_irq_to_gsi(isa_irq);
72 if ( src == hvm_intsrc_pic )
73 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
74 + (isa_irq & 7));
76 ASSERT(src == hvm_intsrc_lapic);
77 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
78 }
80 static int pt_irq_masked(struct periodic_time *pt)
81 {
82 struct vcpu *v = pt->vcpu;
83 unsigned int gsi, isa_irq;
84 uint8_t pic_imr;
86 if ( pt->source == PTSRC_lapic )
87 {
88 struct vlapic *vlapic = vcpu_vlapic(v);
89 return (!vlapic_enabled(vlapic) ||
90 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
91 }
93 isa_irq = pt->irq;
94 gsi = hvm_isa_irq_to_gsi(isa_irq);
95 pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
97 return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
98 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
99 }
101 static void pt_lock(struct periodic_time *pt)
102 {
103 struct vcpu *v;
105 for ( ; ; )
106 {
107 v = pt->vcpu;
108 spin_lock(&v->arch.hvm_vcpu.tm_lock);
109 if ( likely(pt->vcpu == v) )
110 break;
111 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
112 }
113 }
115 static void pt_unlock(struct periodic_time *pt)
116 {
117 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
118 }
120 static void pt_process_missed_ticks(struct periodic_time *pt)
121 {
122 s_time_t missed_ticks, now = NOW();
124 if ( pt->one_shot )
125 return;
127 missed_ticks = now - pt->scheduled;
128 if ( missed_ticks <= 0 )
129 return;
131 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
132 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
133 pt->do_not_freeze = !pt->pending_intr_nr;
134 else
135 pt->pending_intr_nr += missed_ticks;
136 pt->scheduled += missed_ticks * pt->period;
137 }
139 static void pt_freeze_time(struct vcpu *v)
140 {
141 if ( !mode_is(v->domain, delay_for_missed_ticks) )
142 return;
144 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
145 }
147 static void pt_thaw_time(struct vcpu *v)
148 {
149 if ( !mode_is(v->domain, delay_for_missed_ticks) )
150 return;
152 if ( v->arch.hvm_vcpu.guest_time == 0 )
153 return;
155 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
156 v->arch.hvm_vcpu.guest_time = 0;
157 }
159 void pt_save_timer(struct vcpu *v)
160 {
161 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
162 struct periodic_time *pt;
164 if ( test_bit(_VPF_blocked, &v->pause_flags) )
165 return;
167 spin_lock(&v->arch.hvm_vcpu.tm_lock);
169 list_for_each_entry ( pt, head, list )
170 if ( !pt->do_not_freeze )
171 stop_timer(&pt->timer);
173 pt_freeze_time(v);
175 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
176 }
178 void pt_restore_timer(struct vcpu *v)
179 {
180 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
181 struct periodic_time *pt;
183 spin_lock(&v->arch.hvm_vcpu.tm_lock);
185 list_for_each_entry ( pt, head, list )
186 {
187 pt_process_missed_ticks(pt);
188 set_timer(&pt->timer, pt->scheduled);
189 }
191 pt_thaw_time(v);
193 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
194 }
196 static void pt_timer_fn(void *data)
197 {
198 struct periodic_time *pt = data;
200 pt_lock(pt);
202 pt->pending_intr_nr++;
203 pt->do_not_freeze = 0;
205 if ( !pt->one_shot )
206 {
207 pt->scheduled += pt->period;
208 pt_process_missed_ticks(pt);
209 set_timer(&pt->timer, pt->scheduled);
210 }
212 if ( !pt_irq_masked(pt) )
213 vcpu_kick(pt->vcpu);
215 pt_unlock(pt);
216 }
218 void pt_update_irq(struct vcpu *v)
219 {
220 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
221 struct periodic_time *pt, *earliest_pt = NULL;
222 uint64_t max_lag = -1ULL;
223 int irq, is_lapic;
225 spin_lock(&v->arch.hvm_vcpu.tm_lock);
227 list_for_each_entry ( pt, head, list )
228 {
229 if ( !pt_irq_masked(pt) && pt->pending_intr_nr &&
230 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
231 {
232 max_lag = pt->last_plt_gtime + pt->period_cycles;
233 earliest_pt = pt;
234 }
235 }
237 if ( earliest_pt == NULL )
238 {
239 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
240 return;
241 }
243 earliest_pt->irq_issued = 1;
244 irq = earliest_pt->irq;
245 is_lapic = (earliest_pt->source == PTSRC_lapic);
247 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
249 if ( is_lapic )
250 {
251 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
252 }
253 else
254 {
255 hvm_isa_irq_deassert(v->domain, irq);
256 hvm_isa_irq_assert(v->domain, irq);
257 }
258 }
260 static struct periodic_time *is_pt_irq(
261 struct vcpu *v, struct hvm_intack intack)
262 {
263 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
264 struct periodic_time *pt;
266 list_for_each_entry ( pt, head, list )
267 {
268 if ( pt->pending_intr_nr && pt->irq_issued &&
269 (intack.vector == pt_irq_vector(pt, intack.source)) )
270 return pt;
271 }
273 return NULL;
274 }
276 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
277 {
278 struct periodic_time *pt;
279 time_cb *cb;
280 void *cb_priv;
282 spin_lock(&v->arch.hvm_vcpu.tm_lock);
284 pt = is_pt_irq(v, intack);
285 if ( pt == NULL )
286 {
287 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
288 return;
289 }
291 pt->irq_issued = 0;
293 if ( pt->one_shot )
294 {
295 if ( pt->on_list )
296 list_del(&pt->list);
297 pt->on_list = 0;
298 }
299 else
300 {
301 if ( mode_is(v->domain, one_missed_tick_pending) ||
302 mode_is(v->domain, no_missed_ticks_pending) )
303 {
304 pt->last_plt_gtime = hvm_get_guest_time(v);
305 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
306 }
307 else
308 {
309 pt->last_plt_gtime += pt->period_cycles;
310 pt->pending_intr_nr--;
311 }
312 }
314 if ( mode_is(v->domain, delay_for_missed_ticks) &&
315 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
316 hvm_set_guest_time(v, pt->last_plt_gtime);
318 cb = pt->cb;
319 cb_priv = pt->priv;
321 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
323 if ( cb != NULL )
324 cb(v, cb_priv);
325 }
327 void pt_reset(struct vcpu *v)
328 {
329 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
330 struct periodic_time *pt;
332 spin_lock(&v->arch.hvm_vcpu.tm_lock);
334 list_for_each_entry ( pt, head, list )
335 {
336 pt->pending_intr_nr = 0;
337 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
338 pt->scheduled = NOW() + pt->period;
339 set_timer(&pt->timer, pt->scheduled);
340 }
342 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
343 }
345 void pt_migrate(struct vcpu *v)
346 {
347 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
348 struct periodic_time *pt;
350 spin_lock(&v->arch.hvm_vcpu.tm_lock);
352 list_for_each_entry ( pt, head, list )
353 migrate_timer(&pt->timer, v->processor);
355 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
356 }
358 void create_periodic_time(
359 struct vcpu *v, struct periodic_time *pt, uint64_t delta,
360 uint64_t period, uint8_t irq, time_cb *cb, void *data)
361 {
362 ASSERT(pt->source != 0);
364 destroy_periodic_time(pt);
366 spin_lock(&v->arch.hvm_vcpu.tm_lock);
368 pt->pending_intr_nr = 0;
369 pt->do_not_freeze = 0;
370 pt->irq_issued = 0;
372 /* Periodic timer must be at least 0.1ms. */
373 if ( (period < 100000) && period )
374 {
375 if ( !test_and_set_bool(pt->warned_timeout_too_short) )
376 gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too "
377 "small period %"PRIu64"\n", period);
378 period = 100000;
379 }
381 pt->period = period;
382 pt->vcpu = v;
383 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
384 pt->irq = irq;
385 pt->period_cycles = (u64)period;
386 pt->one_shot = !period;
387 pt->scheduled = NOW() + delta;
389 if ( !pt->one_shot )
390 {
391 if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
392 {
393 pt->scheduled = align_timer(pt->scheduled, pt->period);
394 }
395 else if ( pt->source == PTSRC_lapic )
396 {
397 /*
398 * Offset LAPIC ticks from other timer ticks. Otherwise guests
399 * which use LAPIC ticks for process accounting can see long
400 * sequences of process ticks incorrectly accounted to interrupt
401 * processing (seen with RHEL3 guest).
402 */
403 pt->scheduled += delta >> 1;
404 }
405 }
407 pt->cb = cb;
408 pt->priv = data;
410 pt->on_list = 1;
411 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
413 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
414 set_timer(&pt->timer, pt->scheduled);
416 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
417 }
419 void destroy_periodic_time(struct periodic_time *pt)
420 {
421 /* Was this structure previously initialised by create_periodic_time()? */
422 if ( pt->vcpu == NULL )
423 return;
425 pt_lock(pt);
426 if ( pt->on_list )
427 list_del(&pt->list);
428 pt->on_list = 0;
429 pt_unlock(pt);
431 /*
432 * pt_timer_fn() can run until this kill_timer() returns. We must do this
433 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
434 */
435 kill_timer(&pt->timer);
436 }