ia64/xen-unstable

annotate xen/arch/x86/hvm/vpt.c @ 19823:82bbce59b65d

save/restore : Save guest's preferred TSC frequency in image

For save/restore or live migration between two different frequency
platforms, guest's preferred TSC frequency is required to caculate
guest's TSC after resotre, so save it in the image header.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jun 24 10:48:21 2009 +0100 (2009-06-24)
parents d4c9a63a1642
children 81edfffb3aff
rev   line source
kfraser@13133 1 /*
kfraser@13133 2 * vpt.c: Virtual Platform Timer
kfraser@13133 3 *
kfraser@13133 4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
kfraser@13133 5 *
kfraser@13133 6 * This program is free software; you can redistribute it and/or modify it
kfraser@13133 7 * under the terms and conditions of the GNU General Public License,
kfraser@13133 8 * version 2, as published by the Free Software Foundation.
kfraser@13133 9 *
kfraser@13133 10 * This program is distributed in the hope it will be useful, but WITHOUT
kfraser@13133 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
kfraser@13133 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
kfraser@13133 13 * more details.
kfraser@13133 14 *
kfraser@13133 15 * You should have received a copy of the GNU General Public License along with
kfraser@13133 16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
kfraser@13133 17 * Place - Suite 330, Boston, MA 02111-1307 USA.
kfraser@13133 18 */
keir@15367 19
kfraser@13133 20 #include <xen/time.h>
kfraser@13133 21 #include <asm/hvm/support.h>
kfraser@13133 22 #include <asm/hvm/vpt.h>
kfraser@13133 23 #include <asm/event.h>
kfraser@13133 24
keir@16282 25 #define mode_is(d, name) \
keir@16282 26 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
keir@16245 27
keir@17720 28 void hvm_init_guest_time(struct domain *d)
keir@17720 29 {
keir@17720 30 struct pl_time *pl = &d->arch.hvm_domain.pl_time;
keir@17720 31
keir@17720 32 spin_lock_init(&pl->pl_time_lock);
keir@17720 33 pl->stime_offset = -(u64)get_s_time();
keir@17720 34 pl->last_guest_time = 0;
keir@19823 35
keir@19823 36 d->arch.hvm_domain.gtsc_khz = cpu_khz;
keir@17720 37 }
keir@17720 38
keir@17720 39 u64 hvm_get_guest_time(struct vcpu *v)
keir@17720 40 {
keir@17720 41 struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
keir@17720 42 u64 now;
keir@17720 43
keir@17764 44 /* Called from device models shared with PV guests. Be careful. */
keir@17764 45 ASSERT(is_hvm_vcpu(v));
keir@17764 46
keir@17720 47 spin_lock(&pl->pl_time_lock);
keir@17720 48 now = get_s_time() + pl->stime_offset;
keir@17720 49 if ( (int64_t)(now - pl->last_guest_time) >= 0 )
keir@17720 50 pl->last_guest_time = now;
keir@17720 51 else
keir@17720 52 now = pl->last_guest_time;
keir@17720 53 spin_unlock(&pl->pl_time_lock);
keir@17720 54
keir@17720 55 return now + v->arch.hvm_vcpu.stime_offset;
keir@17720 56 }
keir@17720 57
keir@17720 58 void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
keir@17720 59 {
keir@17720 60 v->arch.hvm_vcpu.stime_offset += guest_time - hvm_get_guest_time(v);
keir@17720 61 }
keir@17720 62
keir@16603 63 static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
keir@16603 64 {
keir@16603 65 struct vcpu *v = pt->vcpu;
keir@16603 66 unsigned int gsi, isa_irq;
keir@16603 67
keir@16603 68 if ( pt->source == PTSRC_lapic )
keir@16603 69 return pt->irq;
keir@16603 70
keir@16603 71 isa_irq = pt->irq;
keir@16603 72 gsi = hvm_isa_irq_to_gsi(isa_irq);
keir@16603 73
keir@16603 74 if ( src == hvm_intsrc_pic )
keir@16603 75 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
keir@16603 76 + (isa_irq & 7));
keir@16603 77
keir@16603 78 ASSERT(src == hvm_intsrc_lapic);
keir@16603 79 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
keir@16603 80 }
keir@16603 81
keir@16603 82 static int pt_irq_masked(struct periodic_time *pt)
keir@16603 83 {
keir@16603 84 struct vcpu *v = pt->vcpu;
keir@16603 85 unsigned int gsi, isa_irq;
keir@16603 86 uint8_t pic_imr;
keir@16603 87
keir@16603 88 if ( pt->source == PTSRC_lapic )
keir@16603 89 {
keir@16603 90 struct vlapic *vlapic = vcpu_vlapic(v);
keir@16605 91 return (!vlapic_enabled(vlapic) ||
keir@16605 92 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
keir@16603 93 }
keir@16603 94
keir@16603 95 isa_irq = pt->irq;
keir@16603 96 gsi = hvm_isa_irq_to_gsi(isa_irq);
keir@16603 97 pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
keir@16603 98
keir@16603 99 return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
keir@16603 100 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
keir@16603 101 }
keir@16603 102
keir@15367 103 static void pt_lock(struct periodic_time *pt)
keir@15367 104 {
keir@15367 105 struct vcpu *v;
keir@15367 106
keir@15367 107 for ( ; ; )
keir@15367 108 {
keir@15367 109 v = pt->vcpu;
keir@15367 110 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 111 if ( likely(pt->vcpu == v) )
keir@15367 112 break;
keir@15367 113 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 114 }
keir@15367 115 }
keir@15367 116
keir@15367 117 static void pt_unlock(struct periodic_time *pt)
keir@15367 118 {
keir@15367 119 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
keir@15367 120 }
keir@15367 121
keir@16282 122 static void pt_process_missed_ticks(struct periodic_time *pt)
kfraser@13133 123 {
keir@16315 124 s_time_t missed_ticks, now = NOW();
keir@16312 125
keir@16146 126 if ( pt->one_shot )
Tim@15967 127 return;
Tim@15967 128
keir@16315 129 missed_ticks = now - pt->scheduled;
kfraser@15356 130 if ( missed_ticks <= 0 )
kfraser@15356 131 return;
kfraser@15356 132
keir@16356 133 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
keir@16545 134 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
keir@16545 135 pt->do_not_freeze = !pt->pending_intr_nr;
keir@16545 136 else
keir@16545 137 pt->pending_intr_nr += missed_ticks;
keir@16356 138 pt->scheduled += missed_ticks * pt->period;
kfraser@13133 139 }
kfraser@13133 140
keir@16282 141 static void pt_freeze_time(struct vcpu *v)
keir@16245 142 {
keir@16282 143 if ( !mode_is(v->domain, delay_for_missed_ticks) )
keir@16282 144 return;
keir@16282 145
keir@16245 146 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
keir@16245 147 }
keir@16245 148
keir@16282 149 static void pt_thaw_time(struct vcpu *v)
keir@16282 150 {
keir@16282 151 if ( !mode_is(v->domain, delay_for_missed_ticks) )
keir@16282 152 return;
keir@16282 153
keir@16282 154 if ( v->arch.hvm_vcpu.guest_time == 0 )
keir@16282 155 return;
keir@16282 156
keir@16282 157 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
keir@16282 158 v->arch.hvm_vcpu.guest_time = 0;
keir@16282 159 }
keir@16282 160
keir@16245 161 void pt_save_timer(struct vcpu *v)
kfraser@13133 162 {
kfraser@13133 163 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
kfraser@13133 164 struct periodic_time *pt;
kfraser@13133 165
kfraser@14663 166 if ( test_bit(_VPF_blocked, &v->pause_flags) )
kfraser@13133 167 return;
kfraser@13133 168
keir@15367 169 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 170
kfraser@15356 171 list_for_each_entry ( pt, head, list )
keir@16545 172 if ( !pt->do_not_freeze )
keir@16545 173 stop_timer(&pt->timer);
keir@15367 174
keir@16282 175 pt_freeze_time(v);
keir@16245 176
keir@15367 177 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
kfraser@13133 178 }
kfraser@13133 179
keir@16245 180 void pt_restore_timer(struct vcpu *v)
kfraser@13133 181 {
kfraser@13133 182 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
kfraser@13133 183 struct periodic_time *pt;
kfraser@13133 184
keir@15367 185 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 186
keir@16245 187 list_for_each_entry ( pt, head, list )
kfraser@13133 188 {
keir@16312 189 pt_process_missed_ticks(pt);
keir@16245 190 set_timer(&pt->timer, pt->scheduled);
keir@16245 191 }
kfraser@13133 192
keir@16282 193 pt_thaw_time(v);
keir@15367 194
keir@15367 195 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
kfraser@13133 196 }
kfraser@13133 197
kfraser@15356 198 static void pt_timer_fn(void *data)
kfraser@13133 199 {
kfraser@13133 200 struct periodic_time *pt = data;
kfraser@13133 201
keir@15367 202 pt_lock(pt);
keir@15367 203
keir@16356 204 pt->pending_intr_nr++;
keir@16690 205 pt->do_not_freeze = 0;
kfraser@13133 206
keir@16146 207 if ( !pt->one_shot )
Tim@15967 208 {
Tim@15967 209 pt->scheduled += pt->period;
keir@16312 210 pt_process_missed_ticks(pt);
kfraser@13133 211 set_timer(&pt->timer, pt->scheduled);
Tim@15967 212 }
kfraser@13133 213
keir@19302 214 if ( !pt_irq_masked(pt) )
keir@19302 215 vcpu_kick(pt->vcpu);
keir@15367 216
keir@15367 217 pt_unlock(pt);
kfraser@13133 218 }
kfraser@13133 219
kfraser@13133 220 void pt_update_irq(struct vcpu *v)
kfraser@13133 221 {
kfraser@13133 222 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
keir@16603 223 struct periodic_time *pt, *earliest_pt = NULL;
kfraser@13133 224 uint64_t max_lag = -1ULL;
keir@16603 225 int irq, is_lapic;
kfraser@13133 226
keir@15367 227 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 228
kfraser@15356 229 list_for_each_entry ( pt, head, list )
kfraser@13133 230 {
keir@16603 231 if ( !pt_irq_masked(pt) && pt->pending_intr_nr &&
kaf24@13211 232 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
kfraser@13133 233 {
kaf24@13211 234 max_lag = pt->last_plt_gtime + pt->period_cycles;
keir@16603 235 earliest_pt = pt;
kfraser@13133 236 }
kfraser@13133 237 }
kfraser@13133 238
keir@16603 239 if ( earliest_pt == NULL )
keir@16603 240 {
keir@16603 241 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
keir@16603 242 return;
keir@16603 243 }
keir@16603 244
keir@16603 245 earliest_pt->irq_issued = 1;
keir@16603 246 irq = earliest_pt->irq;
keir@16603 247 is_lapic = (earliest_pt->source == PTSRC_lapic);
keir@16603 248
keir@15367 249 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 250
keir@16603 251 if ( is_lapic )
kaf24@13211 252 {
kfraser@13133 253 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
kaf24@13211 254 }
keir@16603 255 else
kfraser@13133 256 {
kfraser@13133 257 hvm_isa_irq_deassert(v->domain, irq);
kfraser@13133 258 hvm_isa_irq_assert(v->domain, irq);
kfraser@13133 259 }
kfraser@13133 260 }
kfraser@13133 261
kfraser@15393 262 static struct periodic_time *is_pt_irq(
keir@16028 263 struct vcpu *v, struct hvm_intack intack)
kfraser@13133 264 {
kfraser@13133 265 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
kfraser@13133 266 struct periodic_time *pt;
kfraser@13133 267
kfraser@15356 268 list_for_each_entry ( pt, head, list )
kfraser@13133 269 {
keir@16603 270 if ( pt->pending_intr_nr && pt->irq_issued &&
keir@16603 271 (intack.vector == pt_irq_vector(pt, intack.source)) )
kaf24@13211 272 return pt;
kfraser@13133 273 }
kfraser@13133 274
kfraser@13133 275 return NULL;
kfraser@13133 276 }
kfraser@13133 277
keir@16028 278 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
kfraser@13133 279 {
keir@15367 280 struct periodic_time *pt;
keir@15367 281 time_cb *cb;
keir@15367 282 void *cb_priv;
kfraser@13133 283
keir@15367 284 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 285
keir@16028 286 pt = is_pt_irq(v, intack);
kaf24@13211 287 if ( pt == NULL )
keir@15367 288 {
keir@15367 289 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
kfraser@13133 290 return;
keir@15367 291 }
keir@15367 292
keir@16603 293 pt->irq_issued = 0;
keir@16545 294
keir@16146 295 if ( pt->one_shot )
keir@16146 296 {
keir@16598 297 if ( pt->on_list )
keir@16598 298 list_del(&pt->list);
keir@16598 299 pt->on_list = 0;
keir@16146 300 }
keir@16146 301 else
keir@16146 302 {
keir@16690 303 if ( mode_is(v->domain, one_missed_tick_pending) ||
keir@16690 304 mode_is(v->domain, no_missed_ticks_pending) )
keir@16356 305 {
keir@16282 306 pt->last_plt_gtime = hvm_get_guest_time(v);
keir@16356 307 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
keir@16356 308 }
keir@16282 309 else
keir@16356 310 {
keir@16282 311 pt->last_plt_gtime += pt->period_cycles;
keir@16356 312 pt->pending_intr_nr--;
keir@16356 313 }
keir@16146 314 }
Christian@13512 315
keir@16282 316 if ( mode_is(v->domain, delay_for_missed_ticks) &&
keir@16282 317 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
keir@15367 318 hvm_set_guest_time(v, pt->last_plt_gtime);
kfraser@13133 319
keir@15367 320 cb = pt->cb;
keir@15367 321 cb_priv = pt->priv;
keir@15367 322
keir@15367 323 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 324
keir@15367 325 if ( cb != NULL )
keir@15367 326 cb(v, cb_priv);
kfraser@13133 327 }
kfraser@13133 328
kfraser@13133 329 void pt_reset(struct vcpu *v)
kfraser@13133 330 {
kfraser@13133 331 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
kfraser@13133 332 struct periodic_time *pt;
kfraser@13133 333
keir@15367 334 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 335
kfraser@15356 336 list_for_each_entry ( pt, head, list )
kfraser@13133 337 {
kfraser@15383 338 pt->pending_intr_nr = 0;
kfraser@15383 339 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
kfraser@15383 340 pt->scheduled = NOW() + pt->period;
kfraser@15383 341 set_timer(&pt->timer, pt->scheduled);
kfraser@13133 342 }
keir@15367 343
keir@15367 344 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
kfraser@13133 345 }
kfraser@13133 346
kfraser@15356 347 void pt_migrate(struct vcpu *v)
kfraser@15356 348 {
kfraser@15356 349 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
kfraser@15356 350 struct periodic_time *pt;
kfraser@15356 351
keir@15367 352 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 353
kfraser@15356 354 list_for_each_entry ( pt, head, list )
kfraser@15383 355 migrate_timer(&pt->timer, v->processor);
keir@15367 356
keir@15367 357 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
kfraser@15356 358 }
kfraser@15356 359
kfraser@15356 360 void create_periodic_time(
keir@18702 361 struct vcpu *v, struct periodic_time *pt, uint64_t delta,
keir@18702 362 uint64_t period, uint8_t irq, time_cb *cb, void *data)
kfraser@13133 363 {
keir@16603 364 ASSERT(pt->source != 0);
keir@16603 365
kfraser@13133 366 destroy_periodic_time(pt);
kfraser@13133 367
keir@15367 368 spin_lock(&v->arch.hvm_vcpu.tm_lock);
keir@15367 369
kfraser@15383 370 pt->pending_intr_nr = 0;
keir@16545 371 pt->do_not_freeze = 0;
keir@16603 372 pt->irq_issued = 0;
kfraser@15383 373
keir@18703 374 /* Periodic timer must be at least 0.1ms. */
keir@18703 375 if ( (period < 100000) && period )
kfraser@13133 376 {
keir@17586 377 if ( !test_and_set_bool(pt->warned_timeout_too_short) )
keir@17586 378 gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too "
keir@17586 379 "small period %"PRIu64"\n", period);
keir@18703 380 period = 100000;
kfraser@13133 381 }
kfraser@15617 382
kfraser@13133 383 pt->period = period;
Tim@13493 384 pt->vcpu = v;
kfraser@13133 385 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
kfraser@13133 386 pt->irq = irq;
keir@17720 387 pt->period_cycles = (u64)period;
keir@18702 388 pt->one_shot = !period;
keir@18702 389 pt->scheduled = NOW() + delta;
keir@19214 390
keir@19213 391 if ( !pt->one_shot )
keir@19213 392 {
keir@19213 393 if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
keir@19214 394 {
keir@19213 395 pt->scheduled = align_timer(pt->scheduled, pt->period);
keir@19214 396 }
keir@19213 397 else if ( pt->source == PTSRC_lapic )
keir@19214 398 {
keir@19214 399 /*
keir@19214 400 * Offset LAPIC ticks from other timer ticks. Otherwise guests
keir@19214 401 * which use LAPIC ticks for process accounting can see long
keir@19214 402 * sequences of process ticks incorrectly accounted to interrupt
keir@19214 403 * processing (seen with RHEL3 guest).
keir@19214 404 */
keir@19213 405 pt->scheduled += delta >> 1;
keir@19214 406 }
keir@19213 407 }
keir@19213 408
kfraser@13133 409 pt->cb = cb;
kfraser@13133 410 pt->priv = data;
kfraser@13133 411
keir@16598 412 pt->on_list = 1;
Tim@13493 413 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
kfraser@15383 414
kfraser@15383 415 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
kfraser@13133 416 set_timer(&pt->timer, pt->scheduled);
keir@15367 417
keir@15367 418 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
kfraser@13133 419 }
kfraser@13133 420
kfraser@13133 421 void destroy_periodic_time(struct periodic_time *pt)
kfraser@13133 422 {
keir@16598 423 /* Was this structure previously initialised by create_periodic_time()? */
keir@16598 424 if ( pt->vcpu == NULL )
kfraser@15356 425 return;
kfraser@15356 426
keir@15367 427 pt_lock(pt);
keir@16598 428 if ( pt->on_list )
keir@16598 429 list_del(&pt->list);
keir@16598 430 pt->on_list = 0;
kfraser@15383 431 pt_unlock(pt);
kfraser@15383 432
kfraser@15383 433 /*
kfraser@15383 434 * pt_timer_fn() can run until this kill_timer() returns. We must do this
kfraser@15383 435 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
kfraser@15383 436 */
kfraser@15356 437 kill_timer(&pt->timer);
kfraser@13133 438 }