ia64/xen-unstable

view xen/arch/x86/hvm/vpt.c @ 19823:82bbce59b65d

save/restore : Save guest's preferred TSC frequency in image

For save/restore or live migration between two different frequency
platforms, guest's preferred TSC frequency is required to caculate
guest's TSC after resotre, so save it in the image header.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jun 24 10:48:21 2009 +0100 (2009-06-24)
parents d4c9a63a1642
children 81edfffb3aff
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/time.h>
21 #include <asm/hvm/support.h>
22 #include <asm/hvm/vpt.h>
23 #include <asm/event.h>
25 #define mode_is(d, name) \
26 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
28 void hvm_init_guest_time(struct domain *d)
29 {
30 struct pl_time *pl = &d->arch.hvm_domain.pl_time;
32 spin_lock_init(&pl->pl_time_lock);
33 pl->stime_offset = -(u64)get_s_time();
34 pl->last_guest_time = 0;
36 d->arch.hvm_domain.gtsc_khz = cpu_khz;
37 }
39 u64 hvm_get_guest_time(struct vcpu *v)
40 {
41 struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
42 u64 now;
44 /* Called from device models shared with PV guests. Be careful. */
45 ASSERT(is_hvm_vcpu(v));
47 spin_lock(&pl->pl_time_lock);
48 now = get_s_time() + pl->stime_offset;
49 if ( (int64_t)(now - pl->last_guest_time) >= 0 )
50 pl->last_guest_time = now;
51 else
52 now = pl->last_guest_time;
53 spin_unlock(&pl->pl_time_lock);
55 return now + v->arch.hvm_vcpu.stime_offset;
56 }
58 void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
59 {
60 v->arch.hvm_vcpu.stime_offset += guest_time - hvm_get_guest_time(v);
61 }
63 static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
64 {
65 struct vcpu *v = pt->vcpu;
66 unsigned int gsi, isa_irq;
68 if ( pt->source == PTSRC_lapic )
69 return pt->irq;
71 isa_irq = pt->irq;
72 gsi = hvm_isa_irq_to_gsi(isa_irq);
74 if ( src == hvm_intsrc_pic )
75 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
76 + (isa_irq & 7));
78 ASSERT(src == hvm_intsrc_lapic);
79 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
80 }
82 static int pt_irq_masked(struct periodic_time *pt)
83 {
84 struct vcpu *v = pt->vcpu;
85 unsigned int gsi, isa_irq;
86 uint8_t pic_imr;
88 if ( pt->source == PTSRC_lapic )
89 {
90 struct vlapic *vlapic = vcpu_vlapic(v);
91 return (!vlapic_enabled(vlapic) ||
92 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
93 }
95 isa_irq = pt->irq;
96 gsi = hvm_isa_irq_to_gsi(isa_irq);
97 pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
99 return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
100 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
101 }
103 static void pt_lock(struct periodic_time *pt)
104 {
105 struct vcpu *v;
107 for ( ; ; )
108 {
109 v = pt->vcpu;
110 spin_lock(&v->arch.hvm_vcpu.tm_lock);
111 if ( likely(pt->vcpu == v) )
112 break;
113 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
114 }
115 }
117 static void pt_unlock(struct periodic_time *pt)
118 {
119 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
120 }
122 static void pt_process_missed_ticks(struct periodic_time *pt)
123 {
124 s_time_t missed_ticks, now = NOW();
126 if ( pt->one_shot )
127 return;
129 missed_ticks = now - pt->scheduled;
130 if ( missed_ticks <= 0 )
131 return;
133 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
134 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
135 pt->do_not_freeze = !pt->pending_intr_nr;
136 else
137 pt->pending_intr_nr += missed_ticks;
138 pt->scheduled += missed_ticks * pt->period;
139 }
141 static void pt_freeze_time(struct vcpu *v)
142 {
143 if ( !mode_is(v->domain, delay_for_missed_ticks) )
144 return;
146 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
147 }
149 static void pt_thaw_time(struct vcpu *v)
150 {
151 if ( !mode_is(v->domain, delay_for_missed_ticks) )
152 return;
154 if ( v->arch.hvm_vcpu.guest_time == 0 )
155 return;
157 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
158 v->arch.hvm_vcpu.guest_time = 0;
159 }
161 void pt_save_timer(struct vcpu *v)
162 {
163 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
164 struct periodic_time *pt;
166 if ( test_bit(_VPF_blocked, &v->pause_flags) )
167 return;
169 spin_lock(&v->arch.hvm_vcpu.tm_lock);
171 list_for_each_entry ( pt, head, list )
172 if ( !pt->do_not_freeze )
173 stop_timer(&pt->timer);
175 pt_freeze_time(v);
177 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
178 }
180 void pt_restore_timer(struct vcpu *v)
181 {
182 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
183 struct periodic_time *pt;
185 spin_lock(&v->arch.hvm_vcpu.tm_lock);
187 list_for_each_entry ( pt, head, list )
188 {
189 pt_process_missed_ticks(pt);
190 set_timer(&pt->timer, pt->scheduled);
191 }
193 pt_thaw_time(v);
195 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
196 }
198 static void pt_timer_fn(void *data)
199 {
200 struct periodic_time *pt = data;
202 pt_lock(pt);
204 pt->pending_intr_nr++;
205 pt->do_not_freeze = 0;
207 if ( !pt->one_shot )
208 {
209 pt->scheduled += pt->period;
210 pt_process_missed_ticks(pt);
211 set_timer(&pt->timer, pt->scheduled);
212 }
214 if ( !pt_irq_masked(pt) )
215 vcpu_kick(pt->vcpu);
217 pt_unlock(pt);
218 }
220 void pt_update_irq(struct vcpu *v)
221 {
222 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
223 struct periodic_time *pt, *earliest_pt = NULL;
224 uint64_t max_lag = -1ULL;
225 int irq, is_lapic;
227 spin_lock(&v->arch.hvm_vcpu.tm_lock);
229 list_for_each_entry ( pt, head, list )
230 {
231 if ( !pt_irq_masked(pt) && pt->pending_intr_nr &&
232 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
233 {
234 max_lag = pt->last_plt_gtime + pt->period_cycles;
235 earliest_pt = pt;
236 }
237 }
239 if ( earliest_pt == NULL )
240 {
241 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
242 return;
243 }
245 earliest_pt->irq_issued = 1;
246 irq = earliest_pt->irq;
247 is_lapic = (earliest_pt->source == PTSRC_lapic);
249 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
251 if ( is_lapic )
252 {
253 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
254 }
255 else
256 {
257 hvm_isa_irq_deassert(v->domain, irq);
258 hvm_isa_irq_assert(v->domain, irq);
259 }
260 }
262 static struct periodic_time *is_pt_irq(
263 struct vcpu *v, struct hvm_intack intack)
264 {
265 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
266 struct periodic_time *pt;
268 list_for_each_entry ( pt, head, list )
269 {
270 if ( pt->pending_intr_nr && pt->irq_issued &&
271 (intack.vector == pt_irq_vector(pt, intack.source)) )
272 return pt;
273 }
275 return NULL;
276 }
278 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
279 {
280 struct periodic_time *pt;
281 time_cb *cb;
282 void *cb_priv;
284 spin_lock(&v->arch.hvm_vcpu.tm_lock);
286 pt = is_pt_irq(v, intack);
287 if ( pt == NULL )
288 {
289 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
290 return;
291 }
293 pt->irq_issued = 0;
295 if ( pt->one_shot )
296 {
297 if ( pt->on_list )
298 list_del(&pt->list);
299 pt->on_list = 0;
300 }
301 else
302 {
303 if ( mode_is(v->domain, one_missed_tick_pending) ||
304 mode_is(v->domain, no_missed_ticks_pending) )
305 {
306 pt->last_plt_gtime = hvm_get_guest_time(v);
307 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
308 }
309 else
310 {
311 pt->last_plt_gtime += pt->period_cycles;
312 pt->pending_intr_nr--;
313 }
314 }
316 if ( mode_is(v->domain, delay_for_missed_ticks) &&
317 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
318 hvm_set_guest_time(v, pt->last_plt_gtime);
320 cb = pt->cb;
321 cb_priv = pt->priv;
323 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
325 if ( cb != NULL )
326 cb(v, cb_priv);
327 }
329 void pt_reset(struct vcpu *v)
330 {
331 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
332 struct periodic_time *pt;
334 spin_lock(&v->arch.hvm_vcpu.tm_lock);
336 list_for_each_entry ( pt, head, list )
337 {
338 pt->pending_intr_nr = 0;
339 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
340 pt->scheduled = NOW() + pt->period;
341 set_timer(&pt->timer, pt->scheduled);
342 }
344 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
345 }
347 void pt_migrate(struct vcpu *v)
348 {
349 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
350 struct periodic_time *pt;
352 spin_lock(&v->arch.hvm_vcpu.tm_lock);
354 list_for_each_entry ( pt, head, list )
355 migrate_timer(&pt->timer, v->processor);
357 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
358 }
360 void create_periodic_time(
361 struct vcpu *v, struct periodic_time *pt, uint64_t delta,
362 uint64_t period, uint8_t irq, time_cb *cb, void *data)
363 {
364 ASSERT(pt->source != 0);
366 destroy_periodic_time(pt);
368 spin_lock(&v->arch.hvm_vcpu.tm_lock);
370 pt->pending_intr_nr = 0;
371 pt->do_not_freeze = 0;
372 pt->irq_issued = 0;
374 /* Periodic timer must be at least 0.1ms. */
375 if ( (period < 100000) && period )
376 {
377 if ( !test_and_set_bool(pt->warned_timeout_too_short) )
378 gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too "
379 "small period %"PRIu64"\n", period);
380 period = 100000;
381 }
383 pt->period = period;
384 pt->vcpu = v;
385 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
386 pt->irq = irq;
387 pt->period_cycles = (u64)period;
388 pt->one_shot = !period;
389 pt->scheduled = NOW() + delta;
391 if ( !pt->one_shot )
392 {
393 if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
394 {
395 pt->scheduled = align_timer(pt->scheduled, pt->period);
396 }
397 else if ( pt->source == PTSRC_lapic )
398 {
399 /*
400 * Offset LAPIC ticks from other timer ticks. Otherwise guests
401 * which use LAPIC ticks for process accounting can see long
402 * sequences of process ticks incorrectly accounted to interrupt
403 * processing (seen with RHEL3 guest).
404 */
405 pt->scheduled += delta >> 1;
406 }
407 }
409 pt->cb = cb;
410 pt->priv = data;
412 pt->on_list = 1;
413 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
415 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
416 set_timer(&pt->timer, pt->scheduled);
418 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
419 }
421 void destroy_periodic_time(struct periodic_time *pt)
422 {
423 /* Was this structure previously initialised by create_periodic_time()? */
424 if ( pt->vcpu == NULL )
425 return;
427 pt_lock(pt);
428 if ( pt->on_list )
429 list_del(&pt->list);
430 pt->on_list = 0;
431 pt_unlock(pt);
433 /*
434 * pt_timer_fn() can run until this kill_timer() returns. We must do this
435 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
436 */
437 kill_timer(&pt->timer);
438 }