ia64/xen-unstable

view xen/arch/x86/hvm/hpet.c @ 16410:ae6f4c7f15cb

hvm: Do not crash guest if it does an unaligned access to an HPET
register. Some CrashMe test apaprently does this and we don't want
that to crash the domain it runs in.
From: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 21 09:49:09 2007 +0000 (2007-11-21)
parents 74a3ee6e1795
children c00f31f27de6
line source
1 /*
2 * hpet.c: HPET emulation for HVM guests.
3 * Copyright (c) 2006, Intel Corporation.
4 * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <asm/hvm/vpt.h>
21 #include <asm/hvm/io.h>
22 #include <asm/hvm/support.h>
23 #include <asm/current.h>
24 #include <xen/sched.h>
25 #include <xen/event.h>
27 #define HPET_BASE_ADDRESS 0xfed00000ULL
28 #define HPET_MMAP_SIZE 1024
29 #define S_TO_NS 1000000000ULL /* 1s = 10^9 ns */
30 #define S_TO_FS 1000000000000000ULL /* 1s = 10^15 fs */
32 /* Frequency_of_TSC / frequency_of_HPET = 32 */
33 #define TSC_PER_HPET_TICK 32
34 #define guest_time_hpet(v) (hvm_get_guest_time(v) / TSC_PER_HPET_TICK)
36 #define HPET_ID 0x000
37 #define HPET_PERIOD 0x004
38 #define HPET_CFG 0x010
39 #define HPET_STATUS 0x020
40 #define HPET_COUNTER 0x0f0
41 #define HPET_T0_CFG 0x100
42 #define HPET_T0_CMP 0x108
43 #define HPET_T0_ROUTE 0x110
44 #define HPET_T1_CFG 0x120
45 #define HPET_T1_CMP 0x128
46 #define HPET_T1_ROUTE 0x130
47 #define HPET_T2_CFG 0x140
48 #define HPET_T2_CMP 0x148
49 #define HPET_T2_ROUTE 0x150
50 #define HPET_T3_CFG 0x160
52 #define HPET_CFG_ENABLE 0x001
53 #define HPET_CFG_LEGACY 0x002
55 #define HPET_TN_INT_TYPE_LEVEL 0x002
56 #define HPET_TN_ENABLE 0x004
57 #define HPET_TN_PERIODIC 0x008
58 #define HPET_TN_PERIODIC_CAP 0x010
59 #define HPET_TN_SIZE_CAP 0x020
60 #define HPET_TN_SETVAL 0x040
61 #define HPET_TN_32BIT 0x100
62 #define HPET_TN_INT_ROUTE_MASK 0x3e00
63 #define HPET_TN_INT_ROUTE_SHIFT 9
64 #define HPET_TN_INT_ROUTE_CAP_SHIFT 32
65 #define HPET_TN_CFG_BITS_READONLY_OR_RESERVED 0xffff80b1U
67 /* can be routed to IOAPIC.redirect_table[23..20] */
68 #define HPET_TN_INT_ROUTE_CAP (0x00f00000ULL \
69 << HPET_TN_INT_ROUTE_CAP_SHIFT)
71 #define HPET_TN_INT_ROUTE_CAP_MASK (0xffffffffULL \
72 << HPET_TN_INT_ROUTE_CAP_SHIFT)
74 #define hpet_tick_to_ns(h, tick) ((s_time_t)(tick)* \
75 (S_TO_NS*TSC_PER_HPET_TICK)/h->tsc_freq)
77 #define timer_config(h, n) (h->hpet.timers[n].config)
78 #define timer_enabled(h, n) (timer_config(h, n) & HPET_TN_ENABLE)
79 #define timer_is_periodic(h, n) (timer_config(h, n) & HPET_TN_PERIODIC)
80 #define timer_is_32bit(h, n) (timer_config(h, n) & HPET_TN_32BIT)
81 #define hpet_enabled(h) (h->hpet.config & HPET_CFG_ENABLE)
82 #define timer_level(h, n) (timer_config(h, n) & HPET_TN_INT_TYPE_LEVEL)
84 #define timer_int_route(h, n) \
85 ((timer_config(h, n) & HPET_TN_INT_ROUTE_MASK) >> HPET_TN_INT_ROUTE_SHIFT)
87 #define timer_int_route_cap(h, n) \
88 ((timer_config(h, n) & HPET_TN_INT_ROUTE_CAP_MASK) \
89 >> HPET_TN_INT_ROUTE_CAP_SHIFT)
91 #define hpet_time_after(a, b) ((int32_t)(b) - (int32_t)(a) < 0)
92 #define hpet_time_after64(a, b) ((int64_t)(b) - (int64_t)(a) < 0)
94 static inline uint64_t hpet_read64(HPETState *h, unsigned long addr)
95 {
96 addr &= ~7;
98 switch ( addr )
99 {
100 case HPET_ID:
101 return h->hpet.capability;
102 case HPET_CFG:
103 return h->hpet.config;
104 case HPET_STATUS:
105 return h->hpet.isr;
106 case HPET_COUNTER:
107 return h->hpet.mc64;
108 case HPET_T0_CFG:
109 case HPET_T1_CFG:
110 case HPET_T2_CFG:
111 return h->hpet.timers[(addr - HPET_T0_CFG) >> 5].config;
112 case HPET_T0_CMP:
113 case HPET_T1_CMP:
114 case HPET_T2_CMP:
115 return h->hpet.timers[(addr - HPET_T0_CMP) >> 5].cmp;
116 case HPET_T0_ROUTE:
117 case HPET_T1_ROUTE:
118 case HPET_T2_ROUTE:
119 return h->hpet.timers[(addr - HPET_T0_ROUTE) >> 5].fsb;
120 }
122 return 0;
123 }
125 static inline int hpet_check_access_length(
126 unsigned long addr, unsigned long len)
127 {
128 if ( (addr & (len - 1)) || (len > 8) )
129 {
130 /*
131 * According to ICH9 specification, unaligned accesses may result
132 * in unexpected behaviour or master abort, but should not crash/hang.
133 * Hence we read all-ones, drop writes, and log a warning.
134 */
135 gdprintk(XENLOG_WARNING, "HPET: access across register boundary: "
136 "%lx %lx\n", addr, len);
137 return -EINVAL;
138 }
140 return 0;
141 }
143 static inline uint64_t hpet_read_maincounter(HPETState *h)
144 {
145 ASSERT(spin_is_locked(&h->lock));
147 if ( hpet_enabled(h) )
148 return guest_time_hpet(h->vcpu) + h->mc_offset;
149 else
150 return h->hpet.mc64;
151 }
153 static unsigned long hpet_read(
154 struct vcpu *v, unsigned long addr, unsigned long length)
155 {
156 HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
157 unsigned long result;
158 uint64_t val;
160 addr &= HPET_MMAP_SIZE-1;
162 if ( hpet_check_access_length(addr, length) != 0 )
163 return ~0UL;
165 spin_lock(&h->lock);
167 val = hpet_read64(h, addr);
168 if ( (addr & ~7) == HPET_COUNTER )
169 val = hpet_read_maincounter(h);
171 result = val;
172 if ( length != 8 )
173 result = (val >> ((addr & 7) * 8)) & ((1UL << (length * 8)) - 1);
175 spin_unlock(&h->lock);
177 return result;
178 }
180 static void hpet_stop_timer(HPETState *h, unsigned int tn)
181 {
182 ASSERT(tn < HPET_TIMER_NUM);
183 ASSERT(spin_is_locked(&h->lock));
184 stop_timer(&h->timers[tn]);
185 }
187 /* the number of HPET tick that stands for
188 * 1/(2^10) second, namely, 0.9765625 milliseconds */
189 #define HPET_TINY_TIME_SPAN ((h->tsc_freq >> 10) / TSC_PER_HPET_TICK)
191 static void hpet_set_timer(HPETState *h, unsigned int tn)
192 {
193 uint64_t tn_cmp, cur_tick, diff;
195 ASSERT(tn < HPET_TIMER_NUM);
196 ASSERT(spin_is_locked(&h->lock));
198 if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
199 return;
201 if ( (tn == 0) && (h->hpet.config & HPET_CFG_LEGACY) )
202 {
203 /* HPET specification requires PIT shouldn't generate
204 * interrupts if LegacyReplacementRoute is set for timer0 */
205 PITState *pit = &h->vcpu->domain->arch.hvm_domain.pl_time.vpit;
206 pit_stop_channel0_irq(pit);
207 }
209 tn_cmp = h->hpet.timers[tn].cmp;
210 cur_tick = hpet_read_maincounter(h);
211 if ( timer_is_32bit(h, tn) )
212 {
213 tn_cmp = (uint32_t)tn_cmp;
214 cur_tick = (uint32_t)cur_tick;
215 }
217 diff = tn_cmp - cur_tick;
219 /*
220 * Detect time values set in the past. This is hard to do for 32-bit
221 * comparators as the timer does not have to be set that far in the future
222 * for the counter difference to wrap a 32-bit signed integer. We fudge
223 * by looking for a 'small' time value in the past.
224 */
225 if ( (int64_t)diff < 0 )
226 diff = (timer_is_32bit(h, tn) && (-diff > HPET_TINY_TIME_SPAN))
227 ? (uint32_t)diff : 0;
229 set_timer(&h->timers[tn], NOW() + hpet_tick_to_ns(h, diff));
230 }
232 static inline uint64_t hpet_fixup_reg(
233 uint64_t new, uint64_t old, uint64_t mask)
234 {
235 new &= mask;
236 new |= old & ~mask;
237 return new;
238 }
240 static void hpet_write(
241 struct vcpu *v, unsigned long addr,
242 unsigned long length, unsigned long val)
243 {
244 HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
245 uint64_t old_val, new_val;
246 int tn, i;
248 addr &= HPET_MMAP_SIZE-1;
250 if ( hpet_check_access_length(addr, length) != 0 )
251 return;
253 spin_lock(&h->lock);
255 old_val = hpet_read64(h, addr);
256 if ( (addr & ~7) == HPET_COUNTER )
257 old_val = hpet_read_maincounter(h);
259 new_val = val;
260 if ( length != 8 )
261 new_val = hpet_fixup_reg(
262 new_val << (addr & 7) * 8, old_val,
263 ((1ULL << (length*8)) - 1) << ((addr & 7) * 8));
265 switch ( addr & ~7 )
266 {
267 case HPET_CFG:
268 h->hpet.config = hpet_fixup_reg(new_val, old_val, 0x3);
270 if ( !(old_val & HPET_CFG_ENABLE) && (new_val & HPET_CFG_ENABLE) )
271 {
272 /* Enable main counter and interrupt generation. */
273 h->mc_offset = h->hpet.mc64 - guest_time_hpet(h->vcpu);
274 for ( i = 0; i < HPET_TIMER_NUM; i++ )
275 hpet_set_timer(h, i);
276 }
277 else if ( (old_val & HPET_CFG_ENABLE) && !(new_val & HPET_CFG_ENABLE) )
278 {
279 /* Halt main counter and disable interrupt generation. */
280 h->hpet.mc64 = h->mc_offset + guest_time_hpet(h->vcpu);
281 for ( i = 0; i < HPET_TIMER_NUM; i++ )
282 hpet_stop_timer(h, i);
283 }
284 break;
286 case HPET_COUNTER:
287 if ( hpet_enabled(h) )
288 gdprintk(XENLOG_WARNING,
289 "HPET: writing main counter but it's not halted!\n");
290 h->hpet.mc64 = new_val;
291 break;
293 case HPET_T0_CFG:
294 case HPET_T1_CFG:
295 case HPET_T2_CFG:
296 tn = (addr - HPET_T0_CFG) >> 5;
298 h->hpet.timers[tn].config = hpet_fixup_reg(new_val, old_val, 0x3f4e);
300 if ( timer_level(h, tn) )
301 {
302 gdprintk(XENLOG_ERR,
303 "HPET: level triggered interrupt not supported now\n");
304 domain_crash(current->domain);
305 break;
306 }
308 if ( new_val & HPET_TN_32BIT )
309 h->hpet.timers[tn].cmp = (uint32_t)h->hpet.timers[tn].cmp;
311 if ( !(old_val & HPET_TN_ENABLE) && (new_val & HPET_TN_ENABLE) )
312 hpet_set_timer(h, tn);
313 else if ( (old_val & HPET_TN_ENABLE) && !(new_val & HPET_TN_ENABLE) )
314 hpet_stop_timer(h, tn);
315 break;
317 case HPET_T0_CMP:
318 case HPET_T1_CMP:
319 case HPET_T2_CMP:
320 tn = (addr - HPET_T0_CMP) >> 5;
321 if ( timer_is_32bit(h, tn) )
322 new_val = (uint32_t)new_val;
323 if ( !timer_is_periodic(h, tn) ||
324 (h->hpet.timers[tn].config & HPET_TN_SETVAL) )
325 h->hpet.timers[tn].cmp = new_val;
326 else
327 h->hpet.period[tn] = new_val;
328 h->hpet.timers[tn].config &= ~HPET_TN_SETVAL;
329 if ( hpet_enabled(h) && timer_enabled(h, tn) )
330 hpet_set_timer(h, tn);
331 break;
333 case HPET_T0_ROUTE:
334 case HPET_T1_ROUTE:
335 case HPET_T2_ROUTE:
336 tn = (addr - HPET_T0_ROUTE) >> 5;
337 h->hpet.timers[tn].fsb = new_val;
338 break;
340 default:
341 /* Ignore writes to unsupported and reserved registers. */
342 break;
343 }
345 spin_unlock(&h->lock);
346 }
348 static int hpet_range(struct vcpu *v, unsigned long addr)
349 {
350 return ((addr >= HPET_BASE_ADDRESS) &&
351 (addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)));
352 }
354 struct hvm_mmio_handler hpet_mmio_handler = {
355 .check_handler = hpet_range,
356 .read_handler = hpet_read,
357 .write_handler = hpet_write
358 };
360 static void hpet_route_interrupt(HPETState *h, unsigned int tn)
361 {
362 unsigned int tn_int_route = timer_int_route(h, tn);
363 struct domain *d = h->vcpu->domain;
365 ASSERT(spin_is_locked(&h->lock));
367 if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
368 {
369 /* if LegacyReplacementRoute bit is set, HPET specification requires
370 timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC,
371 timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC. */
372 int isa_irq = (tn == 0) ? 0 : 8;
373 hvm_isa_irq_deassert(d, isa_irq);
374 hvm_isa_irq_assert(d, isa_irq);
375 return;
376 }
378 if ( !(timer_int_route_cap(h, tn) & (1U << tn_int_route)) )
379 {
380 gdprintk(XENLOG_ERR,
381 "HPET: timer%u: invalid interrupt route config\n", tn);
382 domain_crash(d);
383 return;
384 }
386 /* We only support edge-triggered interrupt now */
387 spin_lock(&d->arch.hvm_domain.irq_lock);
388 vioapic_irq_positive_edge(d, tn_int_route);
389 spin_unlock(&d->arch.hvm_domain.irq_lock);
390 }
392 static void hpet_timer_fn(void *opaque)
393 {
394 struct HPET_timer_fn_info *htfi = opaque;
395 HPETState *h = htfi->hs;
396 unsigned int tn = htfi->tn;
398 spin_lock(&h->lock);
400 if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
401 {
402 spin_unlock(&h->lock);
403 return;
404 }
406 hpet_route_interrupt(h, tn);
408 if ( timer_is_periodic(h, tn) && (h->hpet.period[tn] != 0) )
409 {
410 uint64_t mc = hpet_read_maincounter(h);
411 if ( timer_is_32bit(h, tn) )
412 {
413 while ( hpet_time_after(mc, h->hpet.timers[tn].cmp) )
414 h->hpet.timers[tn].cmp = (uint32_t)(
415 h->hpet.timers[tn].cmp + h->hpet.period[tn]);
416 }
417 else
418 {
419 while ( hpet_time_after64(mc, h->hpet.timers[tn].cmp) )
420 h->hpet.timers[tn].cmp += h->hpet.period[tn];
421 }
422 set_timer(&h->timers[tn],
423 NOW() + hpet_tick_to_ns(h, h->hpet.period[tn]));
424 }
426 spin_unlock(&h->lock);
427 }
429 void hpet_migrate_timers(struct vcpu *v)
430 {
431 struct HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
432 int i;
434 if ( v != h->vcpu )
435 return;
437 for ( i = 0; i < HPET_TIMER_NUM; i++ )
438 migrate_timer(&h->timers[i], v->processor);
439 }
441 static int hpet_save(struct domain *d, hvm_domain_context_t *h)
442 {
443 HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
444 int rc;
446 spin_lock(&hp->lock);
448 /* Write the proper value into the main counter */
449 hp->hpet.mc64 = hp->mc_offset + guest_time_hpet(hp->vcpu);
451 /* Save the HPET registers */
452 rc = _hvm_init_entry(h, HVM_SAVE_CODE(HPET), 0, HVM_SAVE_LENGTH(HPET));
453 if ( rc == 0 )
454 {
455 struct hvm_hw_hpet *rec = (struct hvm_hw_hpet *)&h->data[h->cur];
456 h->cur += HVM_SAVE_LENGTH(HPET);
457 memset(rec, 0, HVM_SAVE_LENGTH(HPET));
458 #define C(x) rec->x = hp->hpet.x
459 C(capability);
460 C(config);
461 C(isr);
462 C(mc64);
463 C(timers[0].config);
464 C(timers[0].cmp);
465 C(timers[0].fsb);
466 C(timers[1].config);
467 C(timers[1].cmp);
468 C(timers[1].fsb);
469 C(timers[2].config);
470 C(timers[2].cmp);
471 C(timers[2].fsb);
472 C(period[0]);
473 C(period[1]);
474 C(period[2]);
475 #undef C
476 }
478 spin_unlock(&hp->lock);
480 return rc;
481 }
483 static int hpet_load(struct domain *d, hvm_domain_context_t *h)
484 {
485 HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
486 struct hvm_hw_hpet *rec;
487 int i;
489 spin_lock(&hp->lock);
491 /* Reload the HPET registers */
492 if ( _hvm_check_entry(h, HVM_SAVE_CODE(HPET), HVM_SAVE_LENGTH(HPET)) )
493 {
494 spin_unlock(&hp->lock);
495 return -EINVAL;
496 }
498 rec = (struct hvm_hw_hpet *)&h->data[h->cur];
499 h->cur += HVM_SAVE_LENGTH(HPET);
501 #define C(x) hp->hpet.x = rec->x
502 C(capability);
503 C(config);
504 C(isr);
505 C(mc64);
506 C(timers[0].config);
507 C(timers[0].cmp);
508 C(timers[0].fsb);
509 C(timers[1].config);
510 C(timers[1].cmp);
511 C(timers[1].fsb);
512 C(timers[2].config);
513 C(timers[2].cmp);
514 C(timers[2].fsb);
515 C(period[0]);
516 C(period[1]);
517 C(period[2]);
518 #undef C
520 /* Recalculate the offset between the main counter and guest time */
521 hp->mc_offset = hp->hpet.mc64 - guest_time_hpet(hp->vcpu);
523 /* Restart the timers */
524 for ( i = 0; i < HPET_TIMER_NUM; i++ )
525 hpet_set_timer(hp, i);
527 spin_unlock(&hp->lock);
529 return 0;
530 }
532 HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
534 void hpet_init(struct vcpu *v)
535 {
536 HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
537 int i;
539 memset(h, 0, sizeof(HPETState));
541 spin_lock_init(&h->lock);
543 h->vcpu = v;
544 h->tsc_freq = ticks_per_sec(v);
546 /* 64-bit main counter; 3 timers supported; LegacyReplacementRoute. */
547 h->hpet.capability = 0x8086A201ULL;
549 /* This is the number of femptoseconds per HPET tick. */
550 /* Here we define HPET's frequency to be 1/32 of the TSC's */
551 h->hpet.capability |= ((S_TO_FS*TSC_PER_HPET_TICK/h->tsc_freq) << 32);
553 for ( i = 0; i < HPET_TIMER_NUM; i++ )
554 {
555 h->hpet.timers[i].config =
556 HPET_TN_INT_ROUTE_CAP | HPET_TN_SIZE_CAP | HPET_TN_PERIODIC_CAP;
557 h->hpet.timers[i].cmp = ~0ULL;
558 h->timer_fn_info[i].hs = h;
559 h->timer_fn_info[i].tn = i;
560 init_timer(&h->timers[i], hpet_timer_fn, &h->timer_fn_info[i],
561 v->processor);
562 }
563 }
565 void hpet_deinit(struct domain *d)
566 {
567 int i;
568 HPETState *h = &d->arch.hvm_domain.pl_time.vhpet;
570 for ( i = 0; i < HPET_TIMER_NUM; i++ )
571 kill_timer(&h->timers[i]);
572 }