ia64/xen-unstable

view xen/arch/x86/hvm/vlapic.c @ 16603:4553bc1087d9

hvm: Reduce vpt.c dependencies on external timer details.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 12 15:41:20 2007 +0000 (2007-12-12)
parents f2f7c92bf1c1
children 3ee37b6279b7
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/xmalloc.h>
25 #include <xen/domain_page.h>
26 #include <asm/page.h>
27 #include <xen/event.h>
28 #include <xen/trace.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #include <asm/hvm/vmx/vmx.h>
36 #include <public/hvm/ioreq.h>
37 #include <public/hvm/params.h>
39 #define VLAPIC_VERSION 0x00050014
40 #define VLAPIC_LVT_NUM 6
42 /* vlapic's frequence is 100 MHz */
43 #define APIC_BUS_CYCLE_NS 10
45 #define LVT_MASK \
46 APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK
48 #define LINT_MASK \
49 LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
50 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
52 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
53 {
54 /* LVTT */
55 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
56 /* LVTTHMR */
57 LVT_MASK | APIC_MODE_MASK,
58 /* LVTPC */
59 LVT_MASK | APIC_MODE_MASK,
60 /* LVT0-1 */
61 LINT_MASK, LINT_MASK,
62 /* LVTERR */
63 LVT_MASK
64 };
66 /* Following could belong in apicdef.h */
67 #define APIC_SHORT_MASK 0xc0000
68 #define APIC_DEST_NOSHORT 0x0
69 #define APIC_DEST_MASK 0x800
71 #define vlapic_lvt_vector(vlapic, lvt_type) \
72 (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
74 #define vlapic_lvt_dm(vlapic, lvt_type) \
75 (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
77 #define vlapic_lvtt_period(vlapic) \
78 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
81 /*
82 * Generic APIC bitmap vector update & search routines.
83 */
85 #define VEC_POS(v) ((v)%32)
86 #define REG_POS(v) (((v)/32)* 0x10)
87 #define vlapic_test_and_set_vector(vec, bitmap) \
88 test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
89 #define vlapic_test_and_clear_vector(vec, bitmap) \
90 test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
91 #define vlapic_set_vector(vec, bitmap) \
92 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
93 #define vlapic_clear_vector(vec, bitmap) \
94 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
96 static int vlapic_find_highest_vector(void *bitmap)
97 {
98 uint32_t *word = bitmap;
99 int word_offset = MAX_VECTOR / 32;
101 /* Work backwards through the bitmap (first 32-bit word in every four). */
102 while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
103 continue;
105 return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
106 }
109 /*
110 * IRR-specific bitmap update & search routines.
111 */
113 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
114 {
115 return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
116 }
118 static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
119 {
120 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
121 }
123 int vlapic_find_highest_irr(struct vlapic *vlapic)
124 {
125 int result;
127 result = vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
128 ASSERT((result == -1) || (result >= 16));
130 return result;
131 }
133 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
134 {
135 int ret;
137 ret = !vlapic_test_and_set_irr(vec, vlapic);
138 if ( trig )
139 vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
141 /* We may need to wake up target vcpu, besides set pending bit here */
142 return ret;
143 }
145 int vlapic_find_highest_isr(struct vlapic *vlapic)
146 {
147 int result;
149 result = vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]);
150 ASSERT((result == -1) || (result >= 16));
152 return result;
153 }
155 uint32_t vlapic_get_ppr(struct vlapic *vlapic)
156 {
157 uint32_t tpr, isrv, ppr;
158 int isr;
160 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
161 isr = vlapic_find_highest_isr(vlapic);
162 isrv = (isr != -1) ? isr : 0;
164 if ( (tpr & 0xf0) >= (isrv & 0xf0) )
165 ppr = tpr & 0xff;
166 else
167 ppr = isrv & 0xf0;
169 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
170 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
171 vlapic, ppr, isr, isrv);
173 return ppr;
174 }
176 int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
177 {
178 int result = 0;
179 uint8_t logical_id;
181 logical_id = GET_APIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
183 switch ( vlapic_get_reg(vlapic, APIC_DFR) )
184 {
185 case APIC_DFR_FLAT:
186 if ( logical_id & mda )
187 result = 1;
188 break;
189 case APIC_DFR_CLUSTER:
190 if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
191 result = 1;
192 break;
193 default:
194 gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d: %08x\n",
195 vlapic_vcpu(vlapic)->vcpu_id,
196 vlapic_get_reg(vlapic, APIC_DFR));
197 break;
198 }
200 return result;
201 }
203 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
204 int short_hand, int dest, int dest_mode)
205 {
206 int result = 0;
207 struct vlapic *target = vcpu_vlapic(v);
209 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
210 "dest_mode 0x%x, short_hand 0x%x",
211 target, source, dest, dest_mode, short_hand);
213 switch ( short_hand )
214 {
215 case APIC_DEST_NOSHORT:
216 if ( dest_mode == 0 )
217 {
218 /* Physical mode. */
219 if ( (dest == 0xFF) || (dest == VLAPIC_ID(target)) )
220 result = 1;
221 }
222 else
223 {
224 /* Logical mode. */
225 result = vlapic_match_logical_addr(target, dest);
226 }
227 break;
229 case APIC_DEST_SELF:
230 if ( target == source )
231 result = 1;
232 break;
234 case APIC_DEST_ALLINC:
235 result = 1;
236 break;
238 case APIC_DEST_ALLBUT:
239 if ( target != source )
240 result = 1;
241 break;
243 default:
244 gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
245 break;
246 }
248 return result;
249 }
251 /* Add a pending IRQ into lapic. */
252 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
253 int vector, int level, int trig_mode)
254 {
255 int result = 0;
256 struct vlapic *vlapic = vcpu_vlapic(v);
258 switch ( delivery_mode )
259 {
260 case APIC_DM_FIXED:
261 case APIC_DM_LOWEST:
262 /* FIXME add logic for vcpu on reset */
263 if ( unlikely(!vlapic_enabled(vlapic)) )
264 break;
266 if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
267 {
268 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
269 "level trig mode repeatedly for vector %d", vector);
270 break;
271 }
273 if ( trig_mode )
274 {
275 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
276 "level trig mode for vector %d", vector);
277 vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
278 }
280 vcpu_kick(v);
282 result = 1;
283 break;
285 case APIC_DM_REMRD:
286 gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n");
287 break;
289 case APIC_DM_SMI:
290 gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
291 break;
293 case APIC_DM_NMI:
294 if ( !test_and_set_bool(v->nmi_pending) )
295 vcpu_kick(v);
296 break;
298 case APIC_DM_INIT:
299 /* No work on INIT de-assert for P4-type APIC. */
300 if ( trig_mode && !(level & APIC_INT_ASSERT) )
301 break;
302 /* FIXME How to check the situation after vcpu reset? */
303 if ( v->is_initialised )
304 hvm_vcpu_reset(v);
305 v->arch.hvm_vcpu.init_sipi_sipi_state =
306 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
307 result = 1;
308 break;
310 case APIC_DM_STARTUP:
311 if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
312 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
313 break;
315 v->arch.hvm_vcpu.init_sipi_sipi_state =
316 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
318 if ( v->is_initialised )
319 {
320 gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
321 goto exit_and_crash;
322 }
324 if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
325 result = 0;
326 break;
328 default:
329 gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
330 delivery_mode);
331 goto exit_and_crash;
332 }
334 return result;
336 exit_and_crash:
337 domain_crash(v->domain);
338 return 0;
339 }
341 /* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
342 struct vlapic *apic_round_robin(
343 struct domain *d, uint8_t vector, uint32_t bitmap)
344 {
345 int next, old;
346 struct vlapic *target = NULL;
348 old = next = d->arch.hvm_domain.irq.round_robin_prev_vcpu;
350 do {
351 if ( ++next == MAX_VIRT_CPUS )
352 next = 0;
353 if ( (d->vcpu[next] == NULL) || !test_bit(next, &bitmap) )
354 continue;
355 target = vcpu_vlapic(d->vcpu[next]);
356 if ( vlapic_enabled(target) )
357 break;
358 target = NULL;
359 } while ( next != old );
361 d->arch.hvm_domain.irq.round_robin_prev_vcpu = next;
363 return target;
364 }
366 void vlapic_EOI_set(struct vlapic *vlapic)
367 {
368 int vector = vlapic_find_highest_isr(vlapic);
370 /* Some EOI writes may not have a matching to an in-service interrupt. */
371 if ( vector == -1 )
372 return;
374 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
376 if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
377 vioapic_update_EOI(vlapic_domain(vlapic), vector);
378 }
380 static void vlapic_ipi(struct vlapic *vlapic)
381 {
382 uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
383 uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
385 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
386 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
387 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
388 unsigned int level = icr_low & APIC_INT_ASSERT;
389 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
390 unsigned int delivery_mode =icr_low & APIC_MODE_MASK;
391 unsigned int vector = icr_low & APIC_VECTOR_MASK;
393 struct vlapic *target;
394 struct vcpu *v;
395 uint32_t lpr_map = 0;
397 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
398 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
399 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
400 icr_high, icr_low, short_hand, dest,
401 trig_mode, level, dest_mode, delivery_mode, vector);
403 for_each_vcpu ( vlapic_domain(vlapic), v )
404 {
405 if ( vlapic_match_dest(v, vlapic, short_hand, dest, dest_mode) )
406 {
407 if ( delivery_mode == APIC_DM_LOWEST )
408 __set_bit(v->vcpu_id, &lpr_map);
409 else
410 vlapic_accept_irq(v, delivery_mode,
411 vector, level, trig_mode);
412 }
413 }
415 if ( delivery_mode == APIC_DM_LOWEST )
416 {
417 target = apic_round_robin(vlapic_domain(v), vector, lpr_map);
418 if ( target != NULL )
419 vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
420 vector, level, trig_mode);
421 }
422 }
424 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
425 {
426 struct vcpu *v = current;
427 uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
428 uint64_t counter_passed;
430 counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
431 * 1000000000ULL / ticks_per_sec(v)
432 / APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
433 tmcct = tmict - counter_passed;
435 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
436 "timer initial count %d, timer current count %d, "
437 "offset %"PRId64,
438 tmict, tmcct, counter_passed);
440 return tmcct;
441 }
443 static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val)
444 {
445 /* Only bits 0, 1 and 3 are settable; others are MBZ. */
446 val &= 0xb;
447 vlapic_set_reg(vlapic, APIC_TDCR, val);
449 /* Update the demangled hw.timer_divisor. */
450 val = ((val & 3) | ((val & 8) >> 1)) + 1;
451 vlapic->hw.timer_divisor = 1 << (val & 7);
453 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
454 "timer_divisor: %d", vlapic->hw.timer_divisor);
455 }
457 static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset,
458 unsigned int len, unsigned int *result)
459 {
460 ASSERT((len == 4) && (offset >= 0) && (offset <= APIC_TDCR));
462 switch ( offset )
463 {
464 case APIC_PROCPRI:
465 *result = vlapic_get_ppr(vlapic);
466 break;
468 case APIC_TMCCT: /* Timer CCR */
469 *result = vlapic_get_tmcct(vlapic);
470 break;
472 default:
473 *result = vlapic_get_reg(vlapic, offset);
474 break;
475 }
476 }
478 static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
479 unsigned long len)
480 {
481 unsigned int alignment;
482 unsigned int tmp;
483 unsigned long result;
484 struct vlapic *vlapic = vcpu_vlapic(v);
485 unsigned int offset = address - vlapic_base_address(vlapic);
487 if ( offset > APIC_TDCR )
488 return 0;
490 /* some bugs on kernel cause read this with byte*/
491 if ( len != 4 )
492 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
493 "read with len=0x%lx, should be 4 instead",
494 len);
496 alignment = offset & 0x3;
498 vlapic_read_aligned(vlapic, offset & ~0x3, 4, &tmp);
499 switch ( len )
500 {
501 case 1:
502 result = *((unsigned char *)&tmp + alignment);
503 break;
505 case 2:
506 ASSERT( alignment != 3 );
507 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
508 break;
510 case 4:
511 ASSERT( alignment == 0 );
512 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
513 break;
515 default:
516 gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
517 "should be 4 instead.\n", len);
518 goto exit_and_crash;
519 }
521 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
522 "and the result is 0x%lx", offset, len, result);
524 return result;
526 exit_and_crash:
527 domain_crash(v->domain);
528 return 0;
529 }
531 void vlapic_pt_cb(struct vcpu *v, void *data)
532 {
533 *(s_time_t *)data = hvm_get_guest_time(v);
534 }
536 static void vlapic_write(struct vcpu *v, unsigned long address,
537 unsigned long len, unsigned long val)
538 {
539 struct vlapic *vlapic = vcpu_vlapic(v);
540 unsigned int offset = address - vlapic_base_address(vlapic);
542 if ( offset != 0xb0 )
543 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
544 "offset 0x%x with length 0x%lx, and value is 0x%lx",
545 offset, len, val);
547 /*
548 * According to the IA32 Manual, all accesses should be 32 bits.
549 * Some OSes do 8- or 16-byte accesses, however.
550 */
551 val &= 0xffffffff;
552 if ( len != 4 )
553 {
554 unsigned int tmp;
555 unsigned char alignment;
557 gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
559 alignment = offset & 0x3;
560 tmp = vlapic_read(v, offset & ~0x3, 4);
562 switch ( len )
563 {
564 case 1:
565 val = (tmp & ~(0xff << (8*alignment))) |
566 ((val & 0xff) << (8*alignment));
567 break;
569 case 2:
570 if ( alignment & 1 )
571 {
572 gdprintk(XENLOG_ERR, "Uneven alignment error for "
573 "2-byte vlapic access\n");
574 goto exit_and_crash;
575 }
577 val = (tmp & ~(0xffff << (8*alignment))) |
578 ((val & 0xffff) << (8*alignment));
579 break;
581 default:
582 gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
583 "should be 4 instead\n", len);
584 exit_and_crash:
585 domain_crash(v->domain);
586 return;
587 }
588 }
590 offset &= 0xff0;
592 switch ( offset )
593 {
594 case APIC_TASKPRI:
595 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
596 hvm_update_vtpr(v, (val >> 4) & 0x0f);
597 break;
599 case APIC_EOI:
600 vlapic_EOI_set(vlapic);
601 break;
603 case APIC_LDR:
604 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
605 break;
607 case APIC_DFR:
608 vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
609 break;
611 case APIC_SPIV:
612 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
614 if ( !(val & APIC_SPIV_APIC_ENABLED) )
615 {
616 int i;
617 uint32_t lvt_val;
619 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
621 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
622 {
623 lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
624 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
625 lvt_val | APIC_LVT_MASKED);
626 }
627 }
628 else
629 vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED;
630 break;
632 case APIC_ESR:
633 /* Nothing to do. */
634 break;
636 case APIC_ICR:
637 /* No delay here, so we always clear the pending bit*/
638 vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
639 vlapic_ipi(vlapic);
640 break;
642 case APIC_ICR2:
643 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
644 break;
646 case APIC_LVTT: /* LVT Timer Reg */
647 vlapic->pt.irq = val & APIC_VECTOR_MASK;
648 case APIC_LVTTHMR: /* LVT Thermal Monitor */
649 case APIC_LVTPC: /* LVT Performance Counter */
650 case APIC_LVT0: /* LVT LINT0 Reg */
651 case APIC_LVT1: /* LVT Lint1 Reg */
652 case APIC_LVTERR: /* LVT Error Reg */
653 if ( vlapic_sw_disabled(vlapic) )
654 val |= APIC_LVT_MASKED;
655 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
656 vlapic_set_reg(vlapic, offset, val);
657 break;
659 case APIC_TMICT:
660 {
661 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
662 (uint32_t)val * vlapic->hw.timer_divisor;
664 vlapic_set_reg(vlapic, APIC_TMICT, val);
665 create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
666 !vlapic_lvtt_period(vlapic), vlapic_pt_cb,
667 &vlapic->timer_last_update);
668 vlapic->timer_last_update = vlapic->pt.last_plt_gtime;
670 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
671 "bus cycle is %uns, "
672 "initial count %lu, period %"PRIu64"ns",
673 APIC_BUS_CYCLE_NS, val, period);
674 }
675 break;
677 case APIC_TDCR:
678 vlapic_set_tdcr(vlapic, val & 0xb);
679 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x",
680 vlapic->hw.timer_divisor);
681 break;
683 default:
684 gdprintk(XENLOG_DEBUG,
685 "Local APIC Write to read-only register 0x%x\n", offset);
686 break;
687 }
688 }
690 static int vlapic_range(struct vcpu *v, unsigned long addr)
691 {
692 struct vlapic *vlapic = vcpu_vlapic(v);
693 unsigned long offset = addr - vlapic_base_address(vlapic);
694 return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
695 }
697 struct hvm_mmio_handler vlapic_mmio_handler = {
698 .check_handler = vlapic_range,
699 .read_handler = vlapic_read,
700 .write_handler = vlapic_write
701 };
703 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
704 {
705 if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
706 {
707 if ( value & MSR_IA32_APICBASE_ENABLE )
708 {
709 vlapic_reset(vlapic);
710 vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED;
711 }
712 else
713 {
714 vlapic->hw.disabled |= VLAPIC_HW_DISABLED;
715 }
716 }
718 vlapic->hw.apic_base_msr = value;
720 vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
722 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
723 "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
724 }
726 int vlapic_accept_pic_intr(struct vcpu *v)
727 {
728 struct vlapic *vlapic = vcpu_vlapic(v);
729 uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
731 /*
732 * Only CPU0 is wired to the 8259A. INTA cycles occur if LINT0 is set up
733 * accept ExtInts, or if the LAPIC is disabled (so LINT0 behaves as INTR).
734 */
735 return ((v->vcpu_id == 0) &&
736 (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
737 vlapic_hw_disabled(vlapic)));
738 }
740 int vlapic_has_pending_irq(struct vcpu *v)
741 {
742 struct vlapic *vlapic = vcpu_vlapic(v);
743 int irr, isr;
745 if ( !vlapic_enabled(vlapic) )
746 return -1;
748 irr = vlapic_find_highest_irr(vlapic);
749 if ( irr == -1 )
750 return -1;
752 isr = vlapic_find_highest_isr(vlapic);
753 isr = (isr != -1) ? isr : 0;
754 if ( (isr & 0xf0) >= (irr & 0xf0) )
755 return -1;
757 return irr;
758 }
760 int vlapic_ack_pending_irq(struct vcpu *v, int vector)
761 {
762 struct vlapic *vlapic = vcpu_vlapic(v);
764 vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
765 vlapic_clear_irr(vector, vlapic);
767 return 1;
768 }
770 /* Reset the VLPAIC back to its power-on/reset state. */
771 void vlapic_reset(struct vlapic *vlapic)
772 {
773 struct vcpu *v = vlapic_vcpu(vlapic);
774 int i;
776 vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
777 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
779 for ( i = 0; i < 8; i++ )
780 {
781 vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
782 vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
783 vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
784 }
785 vlapic_set_reg(vlapic, APIC_ICR, 0);
786 vlapic_set_reg(vlapic, APIC_ICR2, 0);
787 vlapic_set_reg(vlapic, APIC_LDR, 0);
788 vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
789 vlapic_set_reg(vlapic, APIC_TMICT, 0);
790 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
791 vlapic_set_tdcr(vlapic, 0);
793 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
795 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
796 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
798 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
799 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
800 }
802 #ifdef HVM_DEBUG_SUSPEND
803 static void lapic_info(struct vlapic *s)
804 {
805 printk("*****lapic state:*****\n");
806 printk("lapic 0x%"PRIx64".\n", s->hw.apic_base_msr);
807 printk("lapic 0x%x.\n", s->hw.disabled);
808 printk("lapic 0x%x.\n", s->hw.timer_divisor);
809 }
810 #else
811 static void lapic_info(struct vlapic *s)
812 {
813 }
814 #endif
816 /* rearm the actimer if needed, after a HVM restore */
817 static void lapic_rearm(struct vlapic *s)
818 {
819 unsigned long tmict;
821 tmict = vlapic_get_reg(s, APIC_TMICT);
822 if ( tmict > 0 )
823 {
824 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
825 (uint32_t)tmict * s->hw.timer_divisor;
826 uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT);
828 s->pt.irq = lvtt & APIC_VECTOR_MASK;
829 create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
830 !vlapic_lvtt_period(s), vlapic_pt_cb,
831 &s->timer_last_update);
832 s->timer_last_update = s->pt.last_plt_gtime;
834 printk("lapic_load to rearm the actimer:"
835 "bus cycle is %uns, "
836 "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
837 APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
838 }
840 lapic_info(s);
841 }
843 static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
844 {
845 struct vcpu *v;
846 struct vlapic *s;
848 for_each_vcpu(d, v)
849 {
850 s = vcpu_vlapic(v);
851 lapic_info(s);
853 if ( hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw) != 0 )
854 return 1;
855 }
856 return 0;
857 }
859 static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
860 {
861 struct vcpu *v;
862 struct vlapic *s;
864 for_each_vcpu(d, v)
865 {
866 s = vcpu_vlapic(v);
867 if ( hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs) != 0 )
868 return 1;
869 }
870 return 0;
871 }
873 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
874 {
875 uint16_t vcpuid;
876 struct vcpu *v;
877 struct vlapic *s;
879 /* Which vlapic to load? */
880 vcpuid = hvm_load_instance(h);
881 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
882 {
883 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
884 return -EINVAL;
885 }
886 s = vcpu_vlapic(v);
888 if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 )
889 return -EINVAL;
891 lapic_info(s);
893 vmx_vlapic_msr_changed(v);
895 return 0;
896 }
898 static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
899 {
900 uint16_t vcpuid;
901 struct vcpu *v;
902 struct vlapic *s;
904 /* Which vlapic to load? */
905 vcpuid = hvm_load_instance(h);
906 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
907 {
908 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
909 return -EINVAL;
910 }
911 s = vcpu_vlapic(v);
913 if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
914 return -EINVAL;
916 lapic_rearm(s);
917 return 0;
918 }
920 HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
921 1, HVMSR_PER_VCPU);
922 HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
923 1, HVMSR_PER_VCPU);
925 int vlapic_init(struct vcpu *v)
926 {
927 struct vlapic *vlapic = vcpu_vlapic(v);
928 unsigned int memflags = 0;
930 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
932 vlapic->pt.source = PTSRC_lapic;
934 #ifdef __i386__
935 /* 32-bit VMX may be limited to 32-bit physical addresses. */
936 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
937 memflags = MEMF_bits(32);
938 #endif
940 vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
941 if ( vlapic->regs_page == NULL )
942 {
943 dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
944 v->domain->domain_id, v->vcpu_id);
945 return -ENOMEM;
946 }
948 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
949 if ( vlapic->regs == NULL )
950 {
951 dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
952 v->domain->domain_id, v->vcpu_id);
953 return -ENOMEM;
954 }
956 clear_page(vlapic->regs);
958 vlapic_reset(vlapic);
960 vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE |
961 APIC_DEFAULT_PHYS_BASE);
962 if ( v->vcpu_id == 0 )
963 vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
965 return 0;
966 }
968 void vlapic_destroy(struct vcpu *v)
969 {
970 struct vlapic *vlapic = vcpu_vlapic(v);
972 destroy_periodic_time(&vlapic->pt);
973 unmap_domain_page_global(vlapic->regs);
974 free_domheap_page(vlapic->regs_page);
975 }