ia64/xen-unstable

view xen/arch/x86/hvm/vlapic.c @ 16536:89e7031e153c

hvm: Initialize vlapic->timer_last_update.

Without the fix, before the first vlapic timer interrupt is injected,
the "vlapic->timer_last_update" in vlapic_get_tmcct() is always 0,
causing a wrong value of counter_passed.

Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 05 14:01:39 2007 +0000 (2007-12-05)
parents c00f31f27de6
children 40812c9d96e7
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/xmalloc.h>
25 #include <xen/domain_page.h>
26 #include <asm/page.h>
27 #include <xen/event.h>
28 #include <xen/trace.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #include <asm/hvm/vmx/vmx.h>
36 #include <public/hvm/ioreq.h>
37 #include <public/hvm/params.h>
39 #define VLAPIC_VERSION 0x00050014
40 #define VLAPIC_LVT_NUM 6
42 /* vlapic's frequence is 100 MHz */
43 #define APIC_BUS_CYCLE_NS 10
45 #define LVT_MASK \
46 APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK
48 #define LINT_MASK \
49 LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
50 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
52 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
53 {
54 /* LVTT */
55 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
56 /* LVTTHMR */
57 LVT_MASK | APIC_MODE_MASK,
58 /* LVTPC */
59 LVT_MASK | APIC_MODE_MASK,
60 /* LVT0-1 */
61 LINT_MASK, LINT_MASK,
62 /* LVTERR */
63 LVT_MASK
64 };
66 /* Following could belong in apicdef.h */
67 #define APIC_SHORT_MASK 0xc0000
68 #define APIC_DEST_NOSHORT 0x0
69 #define APIC_DEST_MASK 0x800
71 #define vlapic_lvt_enabled(vlapic, lvt_type) \
72 (!(vlapic_get_reg(vlapic, lvt_type) & APIC_LVT_MASKED))
74 #define vlapic_lvt_vector(vlapic, lvt_type) \
75 (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
77 #define vlapic_lvt_dm(vlapic, lvt_type) \
78 (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
80 #define vlapic_lvtt_period(vlapic) \
81 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
84 /*
85 * Generic APIC bitmap vector update & search routines.
86 */
88 #define VEC_POS(v) ((v)%32)
89 #define REG_POS(v) (((v)/32)* 0x10)
90 #define vlapic_test_and_set_vector(vec, bitmap) \
91 test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
92 #define vlapic_test_and_clear_vector(vec, bitmap) \
93 test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
94 #define vlapic_set_vector(vec, bitmap) \
95 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
96 #define vlapic_clear_vector(vec, bitmap) \
97 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
99 static int vlapic_find_highest_vector(void *bitmap)
100 {
101 uint32_t *word = bitmap;
102 int word_offset = MAX_VECTOR / 32;
104 /* Work backwards through the bitmap (first 32-bit word in every four). */
105 while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
106 continue;
108 return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
109 }
112 /*
113 * IRR-specific bitmap update & search routines.
114 */
116 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
117 {
118 return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
119 }
121 static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
122 {
123 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
124 }
126 int vlapic_find_highest_irr(struct vlapic *vlapic)
127 {
128 int result;
130 result = vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
131 ASSERT((result == -1) || (result >= 16));
133 return result;
134 }
136 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
137 {
138 int ret;
140 ret = !vlapic_test_and_set_irr(vec, vlapic);
141 if ( trig )
142 vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
144 /* We may need to wake up target vcpu, besides set pending bit here */
145 return ret;
146 }
148 int vlapic_find_highest_isr(struct vlapic *vlapic)
149 {
150 int result;
152 result = vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]);
153 ASSERT((result == -1) || (result >= 16));
155 return result;
156 }
158 uint32_t vlapic_get_ppr(struct vlapic *vlapic)
159 {
160 uint32_t tpr, isrv, ppr;
161 int isr;
163 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
164 isr = vlapic_find_highest_isr(vlapic);
165 isrv = (isr != -1) ? isr : 0;
167 if ( (tpr & 0xf0) >= (isrv & 0xf0) )
168 ppr = tpr & 0xff;
169 else
170 ppr = isrv & 0xf0;
172 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
173 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
174 vlapic, ppr, isr, isrv);
176 return ppr;
177 }
179 int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
180 {
181 int result = 0;
182 uint8_t logical_id;
184 logical_id = GET_APIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
186 switch ( vlapic_get_reg(vlapic, APIC_DFR) )
187 {
188 case APIC_DFR_FLAT:
189 if ( logical_id & mda )
190 result = 1;
191 break;
192 case APIC_DFR_CLUSTER:
193 if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
194 result = 1;
195 break;
196 default:
197 gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d: %08x\n",
198 vlapic_vcpu(vlapic)->vcpu_id,
199 vlapic_get_reg(vlapic, APIC_DFR));
200 break;
201 }
203 return result;
204 }
206 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
207 int short_hand, int dest, int dest_mode)
208 {
209 int result = 0;
210 struct vlapic *target = vcpu_vlapic(v);
212 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
213 "dest_mode 0x%x, short_hand 0x%x",
214 target, source, dest, dest_mode, short_hand);
216 switch ( short_hand )
217 {
218 case APIC_DEST_NOSHORT:
219 if ( dest_mode == 0 )
220 {
221 /* Physical mode. */
222 if ( (dest == 0xFF) || (dest == VLAPIC_ID(target)) )
223 result = 1;
224 }
225 else
226 {
227 /* Logical mode. */
228 result = vlapic_match_logical_addr(target, dest);
229 }
230 break;
232 case APIC_DEST_SELF:
233 if ( target == source )
234 result = 1;
235 break;
237 case APIC_DEST_ALLINC:
238 result = 1;
239 break;
241 case APIC_DEST_ALLBUT:
242 if ( target != source )
243 result = 1;
244 break;
246 default:
247 gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
248 break;
249 }
251 return result;
252 }
254 /* Add a pending IRQ into lapic. */
255 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
256 int vector, int level, int trig_mode)
257 {
258 int result = 0;
259 struct vlapic *vlapic = vcpu_vlapic(v);
261 switch ( delivery_mode )
262 {
263 case APIC_DM_FIXED:
264 case APIC_DM_LOWEST:
265 /* FIXME add logic for vcpu on reset */
266 if ( unlikely(!vlapic_enabled(vlapic)) )
267 break;
269 if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
270 {
271 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
272 "level trig mode repeatedly for vector %d", vector);
273 break;
274 }
276 if ( trig_mode )
277 {
278 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
279 "level trig mode for vector %d", vector);
280 vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
281 }
283 vcpu_kick(v);
285 result = 1;
286 break;
288 case APIC_DM_REMRD:
289 gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n");
290 break;
292 case APIC_DM_SMI:
293 gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
294 break;
296 case APIC_DM_NMI:
297 if ( !test_and_set_bool(v->nmi_pending) )
298 vcpu_kick(v);
299 break;
301 case APIC_DM_INIT:
302 /* No work on INIT de-assert for P4-type APIC. */
303 if ( trig_mode && !(level & APIC_INT_ASSERT) )
304 break;
305 /* FIXME How to check the situation after vcpu reset? */
306 if ( v->is_initialised )
307 hvm_vcpu_reset(v);
308 v->arch.hvm_vcpu.init_sipi_sipi_state =
309 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
310 result = 1;
311 break;
313 case APIC_DM_STARTUP:
314 if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
315 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
316 break;
318 v->arch.hvm_vcpu.init_sipi_sipi_state =
319 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
321 if ( v->is_initialised )
322 {
323 gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
324 goto exit_and_crash;
325 }
327 if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
328 result = 0;
329 break;
331 default:
332 gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
333 delivery_mode);
334 goto exit_and_crash;
335 }
337 return result;
339 exit_and_crash:
340 domain_crash(v->domain);
341 return 0;
342 }
344 /* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
345 struct vlapic *apic_round_robin(
346 struct domain *d, uint8_t vector, uint32_t bitmap)
347 {
348 int next, old;
349 struct vlapic *target = NULL;
351 old = next = d->arch.hvm_domain.irq.round_robin_prev_vcpu;
353 do {
354 if ( ++next == MAX_VIRT_CPUS )
355 next = 0;
356 if ( (d->vcpu[next] == NULL) || !test_bit(next, &bitmap) )
357 continue;
358 target = vcpu_vlapic(d->vcpu[next]);
359 if ( vlapic_enabled(target) )
360 break;
361 target = NULL;
362 } while ( next != old );
364 d->arch.hvm_domain.irq.round_robin_prev_vcpu = next;
366 return target;
367 }
369 void vlapic_EOI_set(struct vlapic *vlapic)
370 {
371 int vector = vlapic_find_highest_isr(vlapic);
373 /* Some EOI writes may not have a matching to an in-service interrupt. */
374 if ( vector == -1 )
375 return;
377 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
379 if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
380 vioapic_update_EOI(vlapic_domain(vlapic), vector);
381 }
383 static void vlapic_ipi(struct vlapic *vlapic)
384 {
385 uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
386 uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
388 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
389 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
390 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
391 unsigned int level = icr_low & APIC_INT_ASSERT;
392 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
393 unsigned int delivery_mode =icr_low & APIC_MODE_MASK;
394 unsigned int vector = icr_low & APIC_VECTOR_MASK;
396 struct vlapic *target;
397 struct vcpu *v;
398 uint32_t lpr_map = 0;
400 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
401 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
402 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
403 icr_high, icr_low, short_hand, dest,
404 trig_mode, level, dest_mode, delivery_mode, vector);
406 for_each_vcpu ( vlapic_domain(vlapic), v )
407 {
408 if ( vlapic_match_dest(v, vlapic, short_hand, dest, dest_mode) )
409 {
410 if ( delivery_mode == APIC_DM_LOWEST )
411 set_bit(v->vcpu_id, &lpr_map);
412 else
413 vlapic_accept_irq(v, delivery_mode,
414 vector, level, trig_mode);
415 }
416 }
418 if ( delivery_mode == APIC_DM_LOWEST )
419 {
420 target = apic_round_robin(vlapic_domain(v), vector, lpr_map);
421 if ( target != NULL )
422 vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
423 vector, level, trig_mode);
424 }
425 }
427 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
428 {
429 struct vcpu *v = current;
430 uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
431 uint64_t counter_passed;
433 counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
434 * 1000000000ULL / ticks_per_sec(v)
435 / APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
436 tmcct = tmict - counter_passed;
438 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
439 "timer initial count %d, timer current count %d, "
440 "offset %"PRId64,
441 tmict, tmcct, counter_passed);
443 return tmcct;
444 }
446 static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val)
447 {
448 /* Only bits 0, 1 and 3 are settable; others are MBZ. */
449 val &= 0xb;
450 vlapic_set_reg(vlapic, APIC_TDCR, val);
452 /* Update the demangled hw.timer_divisor. */
453 val = ((val & 3) | ((val & 8) >> 1)) + 1;
454 vlapic->hw.timer_divisor = 1 << (val & 7);
456 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
457 "timer_divisor: %d", vlapic->hw.timer_divisor);
458 }
460 static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset,
461 unsigned int len, unsigned int *result)
462 {
463 ASSERT((len == 4) && (offset >= 0) && (offset <= APIC_TDCR));
465 switch ( offset )
466 {
467 case APIC_PROCPRI:
468 *result = vlapic_get_ppr(vlapic);
469 break;
471 case APIC_TMCCT: /* Timer CCR */
472 *result = vlapic_get_tmcct(vlapic);
473 break;
475 default:
476 *result = vlapic_get_reg(vlapic, offset);
477 break;
478 }
479 }
481 static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
482 unsigned long len)
483 {
484 unsigned int alignment;
485 unsigned int tmp;
486 unsigned long result;
487 struct vlapic *vlapic = vcpu_vlapic(v);
488 unsigned int offset = address - vlapic_base_address(vlapic);
490 if ( offset > APIC_TDCR )
491 return 0;
493 /* some bugs on kernel cause read this with byte*/
494 if ( len != 4 )
495 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
496 "read with len=0x%lx, should be 4 instead",
497 len);
499 alignment = offset & 0x3;
501 vlapic_read_aligned(vlapic, offset & ~0x3, 4, &tmp);
502 switch ( len )
503 {
504 case 1:
505 result = *((unsigned char *)&tmp + alignment);
506 break;
508 case 2:
509 ASSERT( alignment != 3 );
510 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
511 break;
513 case 4:
514 ASSERT( alignment == 0 );
515 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
516 break;
518 default:
519 gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
520 "should be 4 instead.\n", len);
521 goto exit_and_crash;
522 }
524 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
525 "and the result is 0x%lx", offset, len, result);
527 return result;
529 exit_and_crash:
530 domain_crash(v->domain);
531 return 0;
532 }
534 void vlapic_pt_cb(struct vcpu *v, void *data)
535 {
536 *(s_time_t *)data = hvm_get_guest_time(v);
537 }
539 static void vlapic_write(struct vcpu *v, unsigned long address,
540 unsigned long len, unsigned long val)
541 {
542 struct vlapic *vlapic = vcpu_vlapic(v);
543 unsigned int offset = address - vlapic_base_address(vlapic);
545 if ( offset != 0xb0 )
546 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
547 "offset 0x%x with length 0x%lx, and value is 0x%lx",
548 offset, len, val);
550 /*
551 * According to the IA32 Manual, all accesses should be 32 bits.
552 * Some OSes do 8- or 16-byte accesses, however.
553 */
554 val &= 0xffffffff;
555 if ( len != 4 )
556 {
557 unsigned int tmp;
558 unsigned char alignment;
560 gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
562 alignment = offset & 0x3;
563 tmp = vlapic_read(v, offset & ~0x3, 4);
565 switch ( len )
566 {
567 case 1:
568 val = (tmp & ~(0xff << (8*alignment))) |
569 ((val & 0xff) << (8*alignment));
570 break;
572 case 2:
573 if ( alignment & 1 )
574 {
575 gdprintk(XENLOG_ERR, "Uneven alignment error for "
576 "2-byte vlapic access\n");
577 goto exit_and_crash;
578 }
580 val = (tmp & ~(0xffff << (8*alignment))) |
581 ((val & 0xffff) << (8*alignment));
582 break;
584 default:
585 gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
586 "should be 4 instead\n", len);
587 exit_and_crash:
588 domain_crash(v->domain);
589 return;
590 }
591 }
593 offset &= 0xff0;
595 switch ( offset )
596 {
597 case APIC_TASKPRI:
598 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
599 hvm_update_vtpr(v, (val >> 4) & 0x0f);
600 break;
602 case APIC_EOI:
603 vlapic_EOI_set(vlapic);
604 break;
606 case APIC_LDR:
607 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
608 break;
610 case APIC_DFR:
611 vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
612 break;
614 case APIC_SPIV:
615 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
617 if ( !(val & APIC_SPIV_APIC_ENABLED) )
618 {
619 int i;
620 uint32_t lvt_val;
622 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
624 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
625 {
626 lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
627 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
628 lvt_val | APIC_LVT_MASKED);
629 }
630 }
631 else
632 vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED;
633 break;
635 case APIC_ESR:
636 /* Nothing to do. */
637 break;
639 case APIC_ICR:
640 /* No delay here, so we always clear the pending bit*/
641 vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
642 vlapic_ipi(vlapic);
643 break;
645 case APIC_ICR2:
646 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
647 break;
649 case APIC_LVTT: /* LVT Timer Reg */
650 vlapic->pt.irq = val & APIC_VECTOR_MASK;
651 case APIC_LVTTHMR: /* LVT Thermal Monitor */
652 case APIC_LVTPC: /* LVT Performance Counter */
653 case APIC_LVT0: /* LVT LINT0 Reg */
654 case APIC_LVT1: /* LVT Lint1 Reg */
655 case APIC_LVTERR: /* LVT Error Reg */
656 if ( vlapic_sw_disabled(vlapic) )
657 val |= APIC_LVT_MASKED;
658 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
659 vlapic_set_reg(vlapic, offset, val);
660 break;
662 case APIC_TMICT:
663 {
664 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
665 (uint32_t)val * vlapic->hw.timer_divisor;
667 vlapic_set_reg(vlapic, APIC_TMICT, val);
668 create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
669 !vlapic_lvtt_period(vlapic), vlapic_pt_cb,
670 &vlapic->timer_last_update);
671 vlapic->timer_last_update = vlapic->pt.last_plt_gtime;
673 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
674 "bus cycle is %uns, "
675 "initial count %lu, period %"PRIu64"ns",
676 APIC_BUS_CYCLE_NS, val, period);
677 }
678 break;
680 case APIC_TDCR:
681 vlapic_set_tdcr(vlapic, val & 0xb);
682 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x",
683 vlapic->hw.timer_divisor);
684 break;
686 default:
687 gdprintk(XENLOG_DEBUG,
688 "Local APIC Write to read-only register 0x%x\n", offset);
689 break;
690 }
691 }
693 static int vlapic_range(struct vcpu *v, unsigned long addr)
694 {
695 struct vlapic *vlapic = vcpu_vlapic(v);
696 unsigned long offset = addr - vlapic_base_address(vlapic);
697 return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
698 }
700 struct hvm_mmio_handler vlapic_mmio_handler = {
701 .check_handler = vlapic_range,
702 .read_handler = vlapic_read,
703 .write_handler = vlapic_write
704 };
706 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
707 {
708 if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
709 {
710 if ( value & MSR_IA32_APICBASE_ENABLE )
711 {
712 vlapic_reset(vlapic);
713 vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED;
714 }
715 else
716 {
717 vlapic->hw.disabled |= VLAPIC_HW_DISABLED;
718 }
719 }
721 vlapic->hw.apic_base_msr = value;
723 vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
725 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
726 "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
727 }
729 int vlapic_accept_pic_intr(struct vcpu *v)
730 {
731 struct vlapic *vlapic = vcpu_vlapic(v);
732 uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
734 /*
735 * Only CPU0 is wired to the 8259A. INTA cycles occur if LINT0 is set up
736 * accept ExtInts, or if the LAPIC is disabled (so LINT0 behaves as INTR).
737 */
738 return ((v->vcpu_id == 0) &&
739 (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
740 vlapic_hw_disabled(vlapic)));
741 }
743 int vlapic_has_pending_irq(struct vcpu *v)
744 {
745 struct vlapic *vlapic = vcpu_vlapic(v);
746 int irr, isr;
748 if ( !vlapic_enabled(vlapic) )
749 return -1;
751 irr = vlapic_find_highest_irr(vlapic);
752 if ( irr == -1 )
753 return -1;
755 isr = vlapic_find_highest_isr(vlapic);
756 isr = (isr != -1) ? isr : 0;
757 if ( (isr & 0xf0) >= (irr & 0xf0) )
758 return -1;
760 return irr;
761 }
763 int vlapic_ack_pending_irq(struct vcpu *v, int vector)
764 {
765 struct vlapic *vlapic = vcpu_vlapic(v);
767 vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
768 vlapic_clear_irr(vector, vlapic);
770 return 1;
771 }
773 /* Reset the VLPAIC back to its power-on/reset state. */
774 void vlapic_reset(struct vlapic *vlapic)
775 {
776 struct vcpu *v = vlapic_vcpu(vlapic);
777 int i;
779 vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
780 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
782 for ( i = 0; i < 8; i++ )
783 {
784 vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
785 vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
786 vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
787 }
788 vlapic_set_reg(vlapic, APIC_ICR, 0);
789 vlapic_set_reg(vlapic, APIC_ICR2, 0);
790 vlapic_set_reg(vlapic, APIC_LDR, 0);
791 vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
792 vlapic_set_reg(vlapic, APIC_TMICT, 0);
793 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
794 vlapic_set_tdcr(vlapic, 0);
796 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
798 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
799 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
801 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
802 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
803 }
805 #ifdef HVM_DEBUG_SUSPEND
806 static void lapic_info(struct vlapic *s)
807 {
808 printk("*****lapic state:*****\n");
809 printk("lapic 0x%"PRIx64".\n", s->hw.apic_base_msr);
810 printk("lapic 0x%x.\n", s->hw.disabled);
811 printk("lapic 0x%x.\n", s->hw.timer_divisor);
812 }
813 #else
814 static void lapic_info(struct vlapic *s)
815 {
816 }
817 #endif
819 /* rearm the actimer if needed, after a HVM restore */
820 static void lapic_rearm(struct vlapic *s)
821 {
822 unsigned long tmict;
824 tmict = vlapic_get_reg(s, APIC_TMICT);
825 if ( tmict > 0 )
826 {
827 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
828 (uint32_t)tmict * s->hw.timer_divisor;
829 uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT);
831 s->pt.irq = lvtt & APIC_VECTOR_MASK;
832 create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
833 !vlapic_lvtt_period(s), vlapic_pt_cb,
834 &s->timer_last_update);
835 s->timer_last_update = s->pt.last_plt_gtime;
837 printk("lapic_load to rearm the actimer:"
838 "bus cycle is %uns, "
839 "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
840 APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
841 }
843 lapic_info(s);
844 }
846 static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
847 {
848 struct vcpu *v;
849 struct vlapic *s;
851 for_each_vcpu(d, v)
852 {
853 s = vcpu_vlapic(v);
854 lapic_info(s);
856 if ( hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw) != 0 )
857 return 1;
858 }
859 return 0;
860 }
862 static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
863 {
864 struct vcpu *v;
865 struct vlapic *s;
867 for_each_vcpu(d, v)
868 {
869 s = vcpu_vlapic(v);
870 if ( hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs) != 0 )
871 return 1;
872 }
873 return 0;
874 }
876 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
877 {
878 uint16_t vcpuid;
879 struct vcpu *v;
880 struct vlapic *s;
882 /* Which vlapic to load? */
883 vcpuid = hvm_load_instance(h);
884 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
885 {
886 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
887 return -EINVAL;
888 }
889 s = vcpu_vlapic(v);
891 if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 )
892 return -EINVAL;
894 lapic_info(s);
896 vmx_vlapic_msr_changed(v);
898 return 0;
899 }
901 static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
902 {
903 uint16_t vcpuid;
904 struct vcpu *v;
905 struct vlapic *s;
907 /* Which vlapic to load? */
908 vcpuid = hvm_load_instance(h);
909 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
910 {
911 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
912 return -EINVAL;
913 }
914 s = vcpu_vlapic(v);
916 if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
917 return -EINVAL;
919 lapic_rearm(s);
920 return 0;
921 }
923 HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
924 1, HVMSR_PER_VCPU);
925 HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
926 1, HVMSR_PER_VCPU);
928 int vlapic_init(struct vcpu *v)
929 {
930 struct vlapic *vlapic = vcpu_vlapic(v);
931 unsigned int memflags = 0;
933 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
935 #ifdef __i386__
936 /* 32-bit VMX may be limited to 32-bit physical addresses. */
937 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
938 memflags = MEMF_bits(32);
939 #endif
941 vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
942 if ( vlapic->regs_page == NULL )
943 {
944 dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
945 v->domain->domain_id, v->vcpu_id);
946 return -ENOMEM;
947 }
949 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
950 if ( vlapic->regs == NULL )
951 {
952 dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
953 v->domain->domain_id, v->vcpu_id);
954 return -ENOMEM;
955 }
957 clear_page(vlapic->regs);
959 vlapic_reset(vlapic);
961 vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE |
962 APIC_DEFAULT_PHYS_BASE);
963 if ( v->vcpu_id == 0 )
964 vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
966 return 0;
967 }
969 void vlapic_destroy(struct vcpu *v)
970 {
971 struct vlapic *vlapic = vcpu_vlapic(v);
973 destroy_periodic_time(&vlapic->pt);
974 unmap_domain_page_global(vlapic->regs);
975 free_domheap_page(vlapic->regs_page);
976 }
978 int is_lvtt(struct vcpu *v, int vector)
979 {
980 return vcpu_vlapic(v)->pt.enabled &&
981 vector == vlapic_lvt_vector(vcpu_vlapic(v), APIC_LVTT);
982 }
984 int is_lvtt_enabled(struct vcpu *v)
985 {
986 if ( unlikely(!vlapic_enabled(vcpu_vlapic(v))) ||
987 !vlapic_lvt_enabled(vcpu_vlapic(v), APIC_LVTT))
988 return 0;
990 return 1;
991 }