ia64/xen-unstable

view xen/arch/x86/hvm/vpt.c @ 18703:6f74549ac4c5

x86, hvm: Allow 100us periodic virtual timers

Adjust vpt and hpet minimum period (for timers) from 900us to 100us to
be able to pass Windows 2008 compatibility tests.

Signed-off-by: Peter Johnston <peter.johnston@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 22 12:08:16 2008 +0100 (2008-10-22)
parents 71c15dfaa12b
children 16eede823854
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/time.h>
21 #include <asm/hvm/support.h>
22 #include <asm/hvm/vpt.h>
23 #include <asm/event.h>
25 #define mode_is(d, name) \
26 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
28 void hvm_init_guest_time(struct domain *d)
29 {
30 struct pl_time *pl = &d->arch.hvm_domain.pl_time;
32 spin_lock_init(&pl->pl_time_lock);
33 pl->stime_offset = -(u64)get_s_time();
34 pl->last_guest_time = 0;
35 }
37 u64 hvm_get_guest_time(struct vcpu *v)
38 {
39 struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
40 u64 now;
42 /* Called from device models shared with PV guests. Be careful. */
43 ASSERT(is_hvm_vcpu(v));
45 spin_lock(&pl->pl_time_lock);
46 now = get_s_time() + pl->stime_offset;
47 if ( (int64_t)(now - pl->last_guest_time) >= 0 )
48 pl->last_guest_time = now;
49 else
50 now = pl->last_guest_time;
51 spin_unlock(&pl->pl_time_lock);
53 return now + v->arch.hvm_vcpu.stime_offset;
54 }
56 void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
57 {
58 v->arch.hvm_vcpu.stime_offset += guest_time - hvm_get_guest_time(v);
59 }
61 static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
62 {
63 struct vcpu *v = pt->vcpu;
64 unsigned int gsi, isa_irq;
66 if ( pt->source == PTSRC_lapic )
67 return pt->irq;
69 isa_irq = pt->irq;
70 gsi = hvm_isa_irq_to_gsi(isa_irq);
72 if ( src == hvm_intsrc_pic )
73 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
74 + (isa_irq & 7));
76 ASSERT(src == hvm_intsrc_lapic);
77 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
78 }
80 static int pt_irq_masked(struct periodic_time *pt)
81 {
82 struct vcpu *v = pt->vcpu;
83 unsigned int gsi, isa_irq;
84 uint8_t pic_imr;
86 if ( pt->source == PTSRC_lapic )
87 {
88 struct vlapic *vlapic = vcpu_vlapic(v);
89 return (!vlapic_enabled(vlapic) ||
90 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
91 }
93 isa_irq = pt->irq;
94 gsi = hvm_isa_irq_to_gsi(isa_irq);
95 pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
97 return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
98 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
99 }
101 static void pt_lock(struct periodic_time *pt)
102 {
103 struct vcpu *v;
105 for ( ; ; )
106 {
107 v = pt->vcpu;
108 spin_lock(&v->arch.hvm_vcpu.tm_lock);
109 if ( likely(pt->vcpu == v) )
110 break;
111 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
112 }
113 }
115 static void pt_unlock(struct periodic_time *pt)
116 {
117 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
118 }
120 static void pt_process_missed_ticks(struct periodic_time *pt)
121 {
122 s_time_t missed_ticks, now = NOW();
124 if ( pt->one_shot )
125 return;
127 missed_ticks = now - pt->scheduled;
128 if ( missed_ticks <= 0 )
129 return;
131 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
132 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
133 pt->do_not_freeze = !pt->pending_intr_nr;
134 else
135 pt->pending_intr_nr += missed_ticks;
136 pt->scheduled += missed_ticks * pt->period;
137 }
139 static void pt_freeze_time(struct vcpu *v)
140 {
141 if ( !mode_is(v->domain, delay_for_missed_ticks) )
142 return;
144 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
145 }
147 static void pt_thaw_time(struct vcpu *v)
148 {
149 if ( !mode_is(v->domain, delay_for_missed_ticks) )
150 return;
152 if ( v->arch.hvm_vcpu.guest_time == 0 )
153 return;
155 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
156 v->arch.hvm_vcpu.guest_time = 0;
157 }
159 void pt_save_timer(struct vcpu *v)
160 {
161 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
162 struct periodic_time *pt;
164 if ( test_bit(_VPF_blocked, &v->pause_flags) )
165 return;
167 spin_lock(&v->arch.hvm_vcpu.tm_lock);
169 list_for_each_entry ( pt, head, list )
170 if ( !pt->do_not_freeze )
171 stop_timer(&pt->timer);
173 pt_freeze_time(v);
175 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
176 }
178 void pt_restore_timer(struct vcpu *v)
179 {
180 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
181 struct periodic_time *pt;
183 spin_lock(&v->arch.hvm_vcpu.tm_lock);
185 list_for_each_entry ( pt, head, list )
186 {
187 pt_process_missed_ticks(pt);
188 set_timer(&pt->timer, pt->scheduled);
189 }
191 pt_thaw_time(v);
193 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
194 }
196 static void pt_timer_fn(void *data)
197 {
198 struct periodic_time *pt = data;
200 pt_lock(pt);
202 pt->pending_intr_nr++;
203 pt->do_not_freeze = 0;
205 if ( !pt->one_shot )
206 {
207 pt->scheduled += pt->period;
208 pt_process_missed_ticks(pt);
209 set_timer(&pt->timer, pt->scheduled);
210 }
212 vcpu_kick(pt->vcpu);
214 pt_unlock(pt);
215 }
217 void pt_update_irq(struct vcpu *v)
218 {
219 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
220 struct periodic_time *pt, *earliest_pt = NULL;
221 uint64_t max_lag = -1ULL;
222 int irq, is_lapic;
224 spin_lock(&v->arch.hvm_vcpu.tm_lock);
226 list_for_each_entry ( pt, head, list )
227 {
228 if ( !pt_irq_masked(pt) && pt->pending_intr_nr &&
229 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
230 {
231 max_lag = pt->last_plt_gtime + pt->period_cycles;
232 earliest_pt = pt;
233 }
234 }
236 if ( earliest_pt == NULL )
237 {
238 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
239 return;
240 }
242 earliest_pt->irq_issued = 1;
243 irq = earliest_pt->irq;
244 is_lapic = (earliest_pt->source == PTSRC_lapic);
246 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
248 if ( is_lapic )
249 {
250 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
251 }
252 else
253 {
254 hvm_isa_irq_deassert(v->domain, irq);
255 hvm_isa_irq_assert(v->domain, irq);
256 }
257 }
259 static struct periodic_time *is_pt_irq(
260 struct vcpu *v, struct hvm_intack intack)
261 {
262 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
263 struct periodic_time *pt;
265 list_for_each_entry ( pt, head, list )
266 {
267 if ( pt->pending_intr_nr && pt->irq_issued &&
268 (intack.vector == pt_irq_vector(pt, intack.source)) )
269 return pt;
270 }
272 return NULL;
273 }
275 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
276 {
277 struct periodic_time *pt;
278 time_cb *cb;
279 void *cb_priv;
281 spin_lock(&v->arch.hvm_vcpu.tm_lock);
283 pt = is_pt_irq(v, intack);
284 if ( pt == NULL )
285 {
286 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
287 return;
288 }
290 pt->irq_issued = 0;
292 if ( pt->one_shot )
293 {
294 if ( pt->on_list )
295 list_del(&pt->list);
296 pt->on_list = 0;
297 }
298 else
299 {
300 if ( mode_is(v->domain, one_missed_tick_pending) ||
301 mode_is(v->domain, no_missed_ticks_pending) )
302 {
303 pt->last_plt_gtime = hvm_get_guest_time(v);
304 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
305 }
306 else
307 {
308 pt->last_plt_gtime += pt->period_cycles;
309 pt->pending_intr_nr--;
310 }
311 }
313 if ( mode_is(v->domain, delay_for_missed_ticks) &&
314 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
315 hvm_set_guest_time(v, pt->last_plt_gtime);
317 cb = pt->cb;
318 cb_priv = pt->priv;
320 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
322 if ( cb != NULL )
323 cb(v, cb_priv);
324 }
326 void pt_reset(struct vcpu *v)
327 {
328 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
329 struct periodic_time *pt;
331 spin_lock(&v->arch.hvm_vcpu.tm_lock);
333 list_for_each_entry ( pt, head, list )
334 {
335 pt->pending_intr_nr = 0;
336 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
337 pt->scheduled = NOW() + pt->period;
338 set_timer(&pt->timer, pt->scheduled);
339 }
341 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
342 }
344 void pt_migrate(struct vcpu *v)
345 {
346 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
347 struct periodic_time *pt;
349 spin_lock(&v->arch.hvm_vcpu.tm_lock);
351 list_for_each_entry ( pt, head, list )
352 migrate_timer(&pt->timer, v->processor);
354 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
355 }
357 void create_periodic_time(
358 struct vcpu *v, struct periodic_time *pt, uint64_t delta,
359 uint64_t period, uint8_t irq, time_cb *cb, void *data)
360 {
361 ASSERT(pt->source != 0);
363 destroy_periodic_time(pt);
365 spin_lock(&v->arch.hvm_vcpu.tm_lock);
367 pt->pending_intr_nr = 0;
368 pt->do_not_freeze = 0;
369 pt->irq_issued = 0;
371 /* Periodic timer must be at least 0.1ms. */
372 if ( (period < 100000) && period )
373 {
374 if ( !test_and_set_bool(pt->warned_timeout_too_short) )
375 gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too "
376 "small period %"PRIu64"\n", period);
377 period = 100000;
378 }
380 pt->period = period;
381 pt->vcpu = v;
382 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
383 pt->irq = irq;
384 pt->period_cycles = (u64)period;
385 pt->one_shot = !period;
386 pt->scheduled = NOW() + delta;
387 /*
388 * Offset LAPIC ticks from other timer ticks. Otherwise guests which use
389 * LAPIC ticks for process accounting can see long sequences of process
390 * ticks incorrectly accounted to interrupt processing.
391 */
392 if ( pt->source == PTSRC_lapic )
393 pt->scheduled += delta >> 1;
394 pt->cb = cb;
395 pt->priv = data;
397 pt->on_list = 1;
398 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
400 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
401 set_timer(&pt->timer, pt->scheduled);
403 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
404 }
406 void destroy_periodic_time(struct periodic_time *pt)
407 {
408 /* Was this structure previously initialised by create_periodic_time()? */
409 if ( pt->vcpu == NULL )
410 return;
412 pt_lock(pt);
413 if ( pt->on_list )
414 list_del(&pt->list);
415 pt->on_list = 0;
416 pt_unlock(pt);
418 /*
419 * pt_timer_fn() can run until this kill_timer() returns. We must do this
420 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
421 */
422 kill_timer(&pt->timer);
423 }