ia64/xen-unstable

view xen/arch/x86/hvm/vlapic.c @ 11075:f393ced88d14

[HVM] Fix some IOAPIC and LAPIC device model bugs.

Fix some boundary checking errors.
Fix the confusion using variables 'level' and 'trig_mode'.
Also fix some other misc mistakes and do some cleanup.

Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author kfraser@localhost.localdomain
date Thu Aug 10 10:47:37 2006 +0100 (2006-08-10)
parents 415614d3a1ee
children d20e1835c24b
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <xen/xmalloc.h>
24 #include <asm/shadow.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/hvm/hvm.h>
29 #include <asm/hvm/io.h>
30 #include <asm/hvm/support.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #include <public/hvm/ioreq.h>
36 #include <public/hvm/params.h>
38 /* XXX remove this definition after GFW enabled */
39 #define VLAPIC_NO_BIOS
41 extern u32 get_apic_bus_cycle(void);
43 #define APIC_BUS_CYCLE_NS (((s_time_t)get_apic_bus_cycle()) / 1000)
45 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
46 {
47 /* LVTT */
48 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
49 /* LVTTHMR */
50 LVT_MASK | APIC_MODE_MASK,
51 /* LVTPC */
52 LVT_MASK | APIC_MODE_MASK,
53 /* LVT0-1 */
54 LINT_MASK, LINT_MASK,
55 /* LVTERR */
56 LVT_MASK
57 };
59 int hvm_apic_support(struct domain *d)
60 {
61 return d->arch.hvm_domain.params[HVM_PARAM_APIC_ENABLED];
62 }
64 int vlapic_find_highest_irr(struct vlapic *vlapic)
65 {
66 int result;
68 result = find_highest_bit((unsigned long *)(vlapic->regs + APIC_IRR),
69 MAX_VECTOR);
71 ASSERT( result == -1 || result >= 16);
73 return result;
74 }
76 s_time_t get_apictime_scheduled(struct vcpu *v)
77 {
78 struct vlapic *vlapic = VLAPIC(v);
80 if ( !hvm_apic_support(v->domain) ||
81 !vlapic_lvt_enabled(vlapic, APIC_LVTT) )
82 return -1;
84 return vlapic->vlapic_timer.expires;
85 }
87 int vlapic_find_highest_isr(struct vlapic *vlapic)
88 {
89 int result;
91 result = find_highest_bit((unsigned long *)(vlapic->regs + APIC_ISR),
92 MAX_VECTOR);
94 ASSERT( result == -1 || result >= 16);
96 return result;
97 }
99 uint32_t vlapic_update_ppr(struct vlapic *vlapic)
100 {
101 uint32_t tpr, isrv, ppr;
102 int isr;
104 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
106 isr = vlapic_find_highest_isr(vlapic);
108 if ( isr != -1 )
109 isrv = (isr >> 4) & 0xf; /* ditto */
110 else
111 isrv = 0;
113 if ( (tpr >> 4) >= isrv )
114 ppr = tpr & 0xff;
115 else
116 ppr = isrv << 4; /* low 4 bits of PPR have to be cleared */
118 vlapic_set_reg(vlapic, APIC_PROCPRI, ppr);
120 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
121 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x.",
122 vlapic, ppr, isr, isrv);
124 return ppr;
125 }
127 /* This only for fixed delivery mode */
128 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
129 int short_hand, int dest, int dest_mode,
130 int delivery_mode)
131 {
132 int result = 0;
133 struct vlapic *target = VLAPIC(v);
135 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
136 "dest_mode 0x%x, short_hand 0x%x, delivery_mode 0x%x.",
137 target, source, dest, dest_mode, short_hand, delivery_mode);
139 if ( unlikely(target == NULL) &&
140 ((delivery_mode != APIC_DM_INIT) &&
141 (delivery_mode != APIC_DM_STARTUP) &&
142 (delivery_mode != APIC_DM_NMI)) )
143 {
144 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "uninitialized target vcpu %p, "
145 "delivery_mode 0x%x, dest 0x%x.\n", v, delivery_mode, dest);
146 return result;
147 }
149 switch ( short_hand ) {
150 case APIC_DEST_NOSHORT: /* no shorthand */
151 if ( !dest_mode ) /* Physical */
152 {
153 result = ( ((target != NULL) ?
154 GET_APIC_ID(vlapic_get_reg(target, APIC_ID)):
155 v->vcpu_id)) == dest;
156 }
157 else /* Logical */
158 {
159 uint32_t ldr;
160 if ( target == NULL )
161 break;
162 ldr = vlapic_get_reg(target, APIC_LDR);
164 /* Flat mode */
165 if ( vlapic_get_reg(target, APIC_DFR) == APIC_DFR_FLAT)
166 {
167 result = GET_APIC_LOGICAL_ID(ldr) & dest;
168 }
169 else
170 {
171 if ( (delivery_mode == APIC_DM_LOWEST) &&
172 (dest == 0xff) )
173 {
174 /* What shall we do now? */
175 printk("Broadcast IPI with lowest priority "
176 "delivery mode\n");
177 domain_crash_synchronous();
178 }
179 result = (GET_APIC_LOGICAL_ID(ldr) == (dest & 0xf)) ?
180 (GET_APIC_LOGICAL_ID(ldr) >> 4) & (dest >> 4) : 0;
181 }
182 }
183 break;
185 case APIC_DEST_SELF:
186 if ( target == source )
187 result = 1;
188 break;
190 case APIC_DEST_ALLINC:
191 result = 1;
192 break;
194 case APIC_DEST_ALLBUT:
195 if ( target != source )
196 result = 1;
197 break;
199 default:
200 break;
201 }
203 return result;
204 }
206 /*
207 * Add a pending IRQ into lapic.
208 * Return 1 if successfully added and 0 if discarded.
209 */
210 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
211 int vector, int level, int trig_mode)
212 {
213 int result = 0;
214 struct vlapic *vlapic = VLAPIC(v);
216 switch ( delivery_mode ) {
217 case APIC_DM_FIXED:
218 case APIC_DM_LOWEST:
219 /* FIXME add logic for vcpu on reset */
220 if ( unlikely(vlapic == NULL || !vlapic_enabled(vlapic)) )
221 break;
223 if ( test_and_set_bit(vector, vlapic->regs + APIC_IRR) && trig_mode)
224 {
225 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
226 "level trig mode repeatedly for vector %d\n", vector);
227 break;
228 }
230 if ( trig_mode )
231 {
232 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
233 "level trig mode for vector %d\n", vector);
234 set_bit(vector, vlapic->regs + APIC_TMR);
235 }
236 hvm_prod_vcpu(v);
238 result = 1;
239 break;
241 case APIC_DM_REMRD:
242 printk("Ignore deliver mode 3 in vlapic_accept_irq\n");
243 break;
245 case APIC_DM_SMI:
246 case APIC_DM_NMI:
247 /* Fixme */
248 printk("TODO: for guest SMI/NMI\n");
249 break;
251 case APIC_DM_INIT:
252 if ( trig_mode && !(level & APIC_INT_ASSERT) ) //Deassert
253 printk("This hvm_vlapic is for P4, no work for De-assert init\n");
254 else
255 {
256 /* FIXME How to check the situation after vcpu reset? */
257 if ( test_and_clear_bit(_VCPUF_initialised, &v->vcpu_flags) )
258 {
259 printk("Reset hvm vcpu not supported yet\n");
260 domain_crash_synchronous();
261 }
262 v->arch.hvm_vcpu.init_sipi_sipi_state =
263 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
264 result = 1;
265 }
266 break;
268 case APIC_DM_STARTUP:
269 if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
270 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
271 break;
273 v->arch.hvm_vcpu.init_sipi_sipi_state =
274 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
276 if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
277 {
278 printk("SIPI for initialized vcpu vcpuid %x\n", v->vcpu_id);
279 domain_crash_synchronous();
280 }
282 if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
283 result = 0;
284 break;
286 default:
287 printk("TODO: not support interrupt type %x\n", delivery_mode);
288 domain_crash_synchronous();
289 break;
290 }
292 return result;
293 }
295 /*
296 * This function is used by both ioapic and local APIC
297 * The bitmap is for vcpu_id
298 */
299 struct vlapic *apic_round_robin(struct domain *d,
300 uint8_t dest_mode,
301 uint8_t vector,
302 uint32_t bitmap)
303 {
304 int next, old;
305 struct vlapic* target = NULL;
307 if ( dest_mode == 0 ) //Physical mode
308 {
309 printk("<apic_round_robin> lowest priority for physical mode.\n");
310 return NULL;
311 }
313 if ( !bitmap )
314 {
315 printk("<apic_round_robin> no bit set in bitmap.\n");
316 return NULL;
317 }
319 spin_lock(&d->arch.hvm_domain.round_robin_lock);
321 old = next = d->arch.hvm_domain.round_info[vector];
323 /* the vcpu array is arranged according to vcpu_id */
324 do
325 {
326 if ( ++next == MAX_VIRT_CPUS )
327 next = 0;
328 if ( d->vcpu[next] == NULL ||
329 !test_bit(_VCPUF_initialised, &d->vcpu[next]->vcpu_flags) )
330 continue;
332 if ( test_bit(next, &bitmap) )
333 {
334 target = d->vcpu[next]->arch.hvm_vcpu.vlapic;
336 if ( target == NULL || !vlapic_enabled(target) )
337 {
338 printk("warning: targe round robin local apic disabled\n");
339 /* XXX should we domain crash?? Or should we return NULL */
340 }
341 break;
342 }
343 } while ( next != old );
345 d->arch.hvm_domain.round_info[vector] = next;
346 spin_unlock(&d->arch.hvm_domain.round_robin_lock);
348 return target;
349 }
351 void vlapic_EOI_set(struct vlapic *vlapic)
352 {
353 int vector = vlapic_find_highest_isr(vlapic);
355 /* Not every write EOI will has correpsoning ISR,
356 one example is when Kernel check timer on setup_IO_APIC */
357 if ( vector == -1 )
358 return ;
360 clear_bit(vector, vlapic->regs + APIC_ISR);
361 vlapic_update_ppr(vlapic);
363 if ( test_and_clear_bit(vector, vlapic->regs + APIC_TMR) )
364 ioapic_update_EOI(vlapic->domain, vector);
365 }
367 static int vlapic_check_vector(struct vlapic *vlapic,
368 uint32_t dm, uint32_t vector)
369 {
370 if ( (dm == APIC_DM_FIXED) && (vector < 16) )
371 {
372 vlapic->err_status |= 0x40;
373 vlapic_accept_irq(vlapic->vcpu, APIC_DM_FIXED,
374 vlapic_lvt_vector(vlapic, APIC_LVTERR), 0, 0);
375 printk("<vlapic_check_vector>: check failed "
376 " dm %x vector %x\n", dm, vector);
377 return 0;
378 }
379 return 1;
380 }
382 static void vlapic_ipi(struct vlapic *vlapic)
383 {
384 uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
385 uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
387 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
388 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
389 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
390 unsigned int level = icr_low & APIC_INT_ASSERT;
391 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
392 unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
393 unsigned int vector = icr_low & APIC_VECTOR_MASK;
395 struct vlapic *target;
396 struct vcpu *v = NULL;
397 uint32_t lpr_map=0;
399 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
400 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
401 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x.",
402 icr_high, icr_low, short_hand, dest,
403 trig_mode, level, dest_mode, delivery_mode, vector);
405 for_each_vcpu ( vlapic->domain, v )
406 {
407 if ( vlapic_match_dest(v, vlapic, short_hand,
408 dest, dest_mode, delivery_mode) )
409 {
410 if ( delivery_mode == APIC_DM_LOWEST)
411 set_bit(v->vcpu_id, &lpr_map);
412 else
413 vlapic_accept_irq(v, delivery_mode,
414 vector, level, trig_mode);
415 }
416 }
418 if ( delivery_mode == APIC_DM_LOWEST)
419 {
420 v = vlapic->vcpu;
421 target = apic_round_robin(v->domain, dest_mode, vector, lpr_map);
423 if ( target )
424 vlapic_accept_irq(target->vcpu, delivery_mode,
425 vector, level, trig_mode);
426 }
427 }
429 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
430 {
431 uint32_t counter_passed;
432 s_time_t passed, now = NOW();
433 uint32_t tmcct = vlapic_get_reg(vlapic, APIC_TMCCT);
435 ASSERT(vlapic != NULL);
437 if ( unlikely(now <= vlapic->timer_last_update) )
438 {
439 passed = ~0x0LL - vlapic->timer_last_update + now;
440 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "time elapsed.");
441 }
442 else
443 passed = now - vlapic->timer_last_update;
445 counter_passed = passed /
446 (APIC_BUS_CYCLE_NS * vlapic->timer_divide_count);
448 tmcct -= counter_passed;
450 if ( tmcct <= 0 )
451 {
452 if ( unlikely(!vlapic_lvtt_period(vlapic)) )
453 {
454 tmcct = 0;
455 // FIXME: should we add interrupt here?
456 }
457 else
458 {
459 do {
460 tmcct += vlapic_get_reg(vlapic, APIC_TMICT);
461 } while ( tmcct <= 0 );
462 }
463 }
465 vlapic->timer_last_update = now;
466 vlapic_set_reg(vlapic, APIC_TMCCT, tmcct);
468 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
469 "timer initial count 0x%x, timer current count 0x%x, "
470 "update 0x%016"PRIx64", now 0x%016"PRIx64", offset 0x%x.",
471 vlapic_get_reg(vlapic, APIC_TMICT),
472 vlapic_get_reg(vlapic, APIC_TMCCT),
473 vlapic->timer_last_update, now, counter_passed);
475 return tmcct;
476 }
478 static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset,
479 unsigned int len, unsigned int *result)
480 {
481 ASSERT(len == 4 && offset > 0 && offset <= APIC_TDCR);
483 *result = 0;
485 switch ( offset ) {
486 case APIC_ARBPRI:
487 printk("access local APIC ARBPRI register which is for P6\n");
488 break;
490 case APIC_TMCCT: //Timer CCR
491 *result = vlapic_get_tmcct(vlapic);
492 break;
494 case APIC_ESR:
495 vlapic->err_write_count = 0;
496 *result = vlapic_get_reg(vlapic, offset);
497 break;
499 default:
500 *result = vlapic_get_reg(vlapic, offset);
501 break;
502 }
503 }
505 static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
506 unsigned long len)
507 {
508 unsigned int alignment;
509 unsigned int tmp;
510 unsigned long result;
511 struct vlapic *vlapic = VLAPIC(v);
512 unsigned int offset = address - vlapic->base_address;
514 if ( offset > APIC_TDCR)
515 return 0;
517 /* some bugs on kernel cause read this with byte*/
518 if ( len != 4 )
519 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
520 "read with len=0x%lx, should be 4 instead.\n",
521 len);
523 alignment = offset & 0x3;
525 vlapic_read_aligned(vlapic, offset & ~0x3, 4, &tmp);
526 switch ( len ) {
527 case 1:
528 result = *((unsigned char *)&tmp + alignment);
529 break;
531 case 2:
532 ASSERT( alignment != 3 );
533 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
534 break;
536 case 4:
537 ASSERT( alignment == 0 );
538 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
539 break;
541 default:
542 printk("Local APIC read with len=0x%lx, should be 4 instead.\n", len);
543 domain_crash_synchronous();
544 break;
545 }
547 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
548 "and the result is 0x%lx.", offset, len, result);
550 return result;
551 }
553 static void vlapic_write(struct vcpu *v, unsigned long address,
554 unsigned long len, unsigned long val)
555 {
556 struct vlapic *vlapic = VLAPIC(v);
557 unsigned int offset = address - vlapic->base_address;
559 if ( offset != 0xb0 )
560 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
561 "offset 0x%x with length 0x%lx, and value is 0x%lx.",
562 offset, len, val);
564 /*
565 * According to IA 32 Manual, all resgiters should be accessed with
566 * 32 bits alignment.
567 */
568 if ( len != 4 )
569 {
570 unsigned int tmp;
571 unsigned char alignment;
573 /* Some kernels do will access with byte/word alignment */
574 printk("Notice: Local APIC write with len = %lx\n",len);
575 alignment = offset & 0x3;
576 tmp = vlapic_read(v, offset & ~0x3, 4);
577 switch ( len ) {
578 case 1:
579 /* XXX the saddr is a tmp variable from caller, so should be ok
580 But we should still change the following ref to val to
581 local variable later */
582 val = (tmp & ~(0xff << (8*alignment))) |
583 ((val & 0xff) << (8*alignment));
584 break;
586 case 2:
587 if ( alignment != 0x0 && alignment != 0x2 )
588 {
589 printk("alignment error for vlapic with len == 2\n");
590 domain_crash_synchronous();
591 }
593 val = (tmp & ~(0xffff << (8*alignment))) |
594 ((val & 0xffff) << (8*alignment));
595 break;
597 case 3:
598 /* will it happen? */
599 printk("vlapic_write with len = 3 !!!\n");
600 domain_crash_synchronous();
601 break;
603 default:
604 printk("Local APIC write with len = %lx, should be 4 instead\n", len);
605 domain_crash_synchronous();
606 break;
607 }
608 }
610 offset &= 0xff0;
612 switch ( offset ) {
613 case APIC_ID: /* Local APIC ID */
614 vlapic_set_reg(vlapic, APIC_ID, val);
615 break;
617 case APIC_TASKPRI:
618 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
619 vlapic_update_ppr(vlapic);
620 break;
622 case APIC_EOI:
623 vlapic_EOI_set(vlapic);
624 break;
626 case APIC_LDR:
627 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
628 break;
630 case APIC_DFR:
631 vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
632 break;
634 case APIC_SPIV:
635 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
637 if ( !( val & APIC_SPIV_APIC_ENABLED) )
638 {
639 int i;
640 uint32_t lvt_val;
642 vlapic->status |= VLAPIC_SOFTWARE_DISABLE_MASK;
644 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
645 {
646 lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
647 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
648 lvt_val | APIC_LVT_MASKED);
649 }
651 if ( (vlapic_get_reg(vlapic, APIC_LVT0) & APIC_MODE_MASK)
652 == APIC_DM_EXTINT )
653 clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
654 }
655 else
656 {
657 vlapic->status &= ~VLAPIC_SOFTWARE_DISABLE_MASK;
658 if ( (vlapic_get_reg(vlapic, APIC_LVT0) & APIC_MODE_MASK)
659 == APIC_DM_EXTINT )
660 set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
661 }
662 break;
664 case APIC_ESR:
665 vlapic->err_write_count = !vlapic->err_write_count;
666 if ( !vlapic->err_write_count )
667 vlapic->err_status = 0;
668 break;
670 case APIC_ICR:
671 /* No delay here, so we always clear the pending bit*/
672 vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
673 vlapic_ipi(vlapic);
674 break;
676 case APIC_ICR2:
677 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
678 break;
680 case APIC_LVTT: // LVT Timer Reg
681 case APIC_LVTTHMR: // LVT Thermal Monitor
682 case APIC_LVTPC: // LVT Performance Counter
683 case APIC_LVT0: // LVT LINT0 Reg
684 case APIC_LVT1: // LVT Lint1 Reg
685 case APIC_LVTERR: // LVT Error Reg
686 {
687 if ( vlapic->status & VLAPIC_SOFTWARE_DISABLE_MASK )
688 val |= APIC_LVT_MASKED;
690 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
692 vlapic_set_reg(vlapic, offset, val);
694 /* On hardware, when write vector less than 0x20 will error */
695 if ( !(val & APIC_LVT_MASKED) )
696 vlapic_check_vector(vlapic, vlapic_lvt_dm(vlapic, offset),
697 vlapic_lvt_vector(vlapic, offset));
699 if ( !vlapic->vcpu_id && (offset == APIC_LVT0) )
700 {
701 if ( (val & APIC_MODE_MASK) == APIC_DM_EXTINT )
702 if ( val & APIC_LVT_MASKED)
703 clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
704 else
705 set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
706 else
707 clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
708 }
710 }
711 break;
713 case APIC_TMICT:
714 {
715 s_time_t now = NOW(), offset;
717 stop_timer(&vlapic->vlapic_timer);
719 vlapic_set_reg(vlapic, APIC_TMICT, val);
720 vlapic_set_reg(vlapic, APIC_TMCCT, val);
721 vlapic->timer_last_update = now;
723 offset = APIC_BUS_CYCLE_NS *
724 vlapic->timer_divide_count * val;
726 set_timer(&vlapic->vlapic_timer, now + offset);
728 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
729 "bus cycle is %"PRId64"ns, now 0x%016"PRIx64", "
730 "timer initial count 0x%x, offset 0x%016"PRIx64", "
731 "expire @ 0x%016"PRIx64".",
732 APIC_BUS_CYCLE_NS, now,
733 vlapic_get_reg(vlapic, APIC_TMICT),
734 offset, now + offset);
735 }
736 break;
738 case APIC_TDCR:
739 {
740 unsigned int tmp1, tmp2;
742 tmp1 = val & 0xf;
743 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
744 vlapic->timer_divide_count = 0x1 << (tmp2 & 0x7);
746 vlapic_set_reg(vlapic, APIC_TDCR, val);
748 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divide count is 0x%x",
749 vlapic->timer_divide_count);
750 }
751 break;
753 default:
754 printk("Local APIC Write to read-only register\n");
755 break;
756 }
757 }
759 static int vlapic_range(struct vcpu *v, unsigned long addr)
760 {
761 struct vlapic *vlapic = VLAPIC(v);
763 if ( vlapic_global_enabled(vlapic) &&
764 (addr >= vlapic->base_address) &&
765 (addr < vlapic->base_address + VLOCAL_APIC_MEM_LENGTH) )
766 return 1;
768 return 0;
769 }
771 struct hvm_mmio_handler vlapic_mmio_handler = {
772 .check_handler = vlapic_range,
773 .read_handler = vlapic_read,
774 .write_handler = vlapic_write
775 };
777 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
778 {
779 /* When apic disabled */
780 if ( vlapic == NULL )
781 return;
783 if ( vlapic->vcpu_id )
784 value &= ~MSR_IA32_APICBASE_BSP;
786 vlapic->apic_base_msr = value;
787 vlapic->base_address = vlapic->apic_base_msr &
788 MSR_IA32_APICBASE_BASE;
790 /* with FSB delivery interrupt, we can restart APIC functionality */
791 if ( !(value & MSR_IA32_APICBASE_ENABLE) )
792 set_bit(_VLAPIC_GLOB_DISABLE, &vlapic->status );
793 else
794 clear_bit(_VLAPIC_GLOB_DISABLE, &vlapic->status);
796 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
797 "apic base msr is 0x%016"PRIx64", and base address is 0x%lx.",
798 vlapic->apic_base_msr, vlapic->base_address);
799 }
801 void vlapic_timer_fn(void *data)
802 {
803 struct vlapic *vlapic = data;
804 struct vcpu *v;
805 uint32_t timer_vector;
806 s_time_t now;
808 if ( unlikely(!vlapic_enabled(vlapic) ||
809 !vlapic_lvt_enabled(vlapic, APIC_LVTT)) )
810 return;
812 v = vlapic->vcpu;
813 timer_vector = vlapic_lvt_vector(vlapic, APIC_LVTT);
814 now = NOW();
816 vlapic->timer_last_update = now;
818 if ( test_and_set_bit(timer_vector, vlapic->regs + APIC_IRR ))
819 vlapic->intr_pending_count[timer_vector]++;
821 if ( vlapic_lvtt_period(vlapic) )
822 {
823 s_time_t offset;
824 uint32_t tmict = vlapic_get_reg(vlapic, APIC_TMICT);
826 vlapic_set_reg(vlapic, APIC_TMCCT, tmict);
828 offset = APIC_BUS_CYCLE_NS *
829 vlapic->timer_divide_count * tmict;
831 set_timer(&vlapic->vlapic_timer, now + offset);
832 }
833 else
834 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
836 #if 0
837 if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
838 {
839 /* TODO: add guest time handling here */
840 }
841 #endif
843 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
844 "now 0x%016"PRIx64", expire @ 0x%016"PRIx64", "
845 "timer initial count 0x%x, timer current count 0x%x.",
846 now, vlapic->vlapic_timer.expires,
847 vlapic_get_reg(vlapic, APIC_TMICT),
848 vlapic_get_reg(vlapic, APIC_TMCCT));
849 }
851 #if 0
852 static int
853 vlapic_check_direct_intr(struct vcpu *v, int * mode)
854 {
855 struct vlapic *vlapic = VLAPIC(v);
856 int type;
858 type = fls(vlapic->direct_intr.deliver_mode) - 1;
859 if ( type == -1 )
860 return -1;
862 *mode = type;
863 return 0;
864 }
865 #endif
867 int vlapic_accept_pic_intr(struct vcpu *v)
868 {
869 struct vlapic *vlapic = VLAPIC(v);
871 return vlapic ? test_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status) : 1;
872 }
874 int cpu_get_apic_interrupt(struct vcpu *v, int *mode)
875 {
876 struct vlapic *vlapic = VLAPIC(v);
878 if ( vlapic && vlapic_enabled(vlapic) )
879 {
880 int highest_irr = vlapic_find_highest_irr(vlapic);
882 if ( highest_irr != -1 &&
883 ( (highest_irr & 0xF0) > vlapic_get_reg(vlapic, APIC_PROCPRI) ) )
884 {
885 if ( highest_irr < 0x10 )
886 {
887 uint32_t err_vector;
889 vlapic->err_status |= 0x20;
890 err_vector = vlapic_lvt_vector(vlapic, APIC_LVTERR);
892 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
893 "Sending an illegal vector 0x%x.", highest_irr);
895 set_bit(err_vector, vlapic->regs + APIC_IRR);
896 highest_irr = err_vector;
897 }
899 *mode = APIC_DM_FIXED;
900 return highest_irr;
901 }
902 }
903 return -1;
904 }
906 int cpu_has_apic_interrupt(struct vcpu* v)
907 {
908 struct vlapic *vlapic = VLAPIC(v);
910 if (vlapic && vlapic_enabled(vlapic)) {
911 int highest_irr = vlapic_find_highest_irr(vlapic);
913 if ( highest_irr != -1 &&
914 ( (highest_irr & 0xF0) > vlapic_get_reg(vlapic, APIC_PROCPRI) ) ) {
915 return 1;
916 }
917 }
918 return 0;
919 }
921 void vlapic_post_injection(struct vcpu *v, int vector, int deliver_mode)
922 {
923 struct vlapic *vlapic = VLAPIC(v);
925 if ( unlikely(vlapic == NULL) )
926 return;
928 switch ( deliver_mode ) {
929 case APIC_DM_FIXED:
930 case APIC_DM_LOWEST:
931 set_bit(vector, vlapic->regs + APIC_ISR);
932 clear_bit(vector, vlapic->regs + APIC_IRR);
933 vlapic_update_ppr(vlapic);
935 if ( vector == vlapic_lvt_vector(vlapic, APIC_LVTT) )
936 {
937 vlapic->intr_pending_count[vector]--;
938 if ( vlapic->intr_pending_count[vector] > 0 )
939 test_and_set_bit(vector, vlapic->regs + APIC_IRR);
940 }
941 break;
943 /*XXX deal with these later */
944 case APIC_DM_REMRD:
945 printk("Ignore deliver mode 3 in vlapic_post_injection\n");
946 break;
948 case APIC_DM_SMI:
949 case APIC_DM_NMI:
950 case APIC_DM_INIT:
951 case APIC_DM_STARTUP:
952 vlapic->direct_intr.deliver_mode &= (1 << (deliver_mode >> 8));
953 break;
955 default:
956 printk("<vlapic_post_injection> invalid deliver mode\n");
957 break;
958 }
959 }
961 static int vlapic_reset(struct vlapic *vlapic)
962 {
963 struct vcpu *v;
964 int i;
966 ASSERT( vlapic != NULL );
968 v = vlapic->vcpu;
970 ASSERT( v != NULL );
972 vlapic->domain = v->domain;
974 vlapic->vcpu_id = v->vcpu_id;
976 vlapic_set_reg(vlapic, APIC_ID, v->vcpu_id << 24);
978 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
980 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
981 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
983 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
985 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
987 vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
989 if ( v->vcpu_id == 0 )
990 vlapic->apic_base_msr |= MSR_IA32_APICBASE_BSP;
992 vlapic->base_address = vlapic->apic_base_msr &
993 MSR_IA32_APICBASE_BASE;
995 hvm_vioapic_add_lapic(vlapic, v);
997 init_timer(&vlapic->vlapic_timer,
998 vlapic_timer_fn, vlapic, v->processor);
1000 #ifdef VLAPIC_NO_BIOS
1001 /*
1002 * XXX According to mp sepcific, BIOS will enable LVT0/1,
1003 * remove it after BIOS enabled
1004 */
1005 if ( !v->vcpu_id )
1007 vlapic_set_reg(vlapic, APIC_LVT0, APIC_MODE_EXTINT << 8);
1008 vlapic_set_reg(vlapic, APIC_LVT1, APIC_MODE_NMI << 8);
1009 set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
1011 #endif
1013 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
1014 "vcpu=%p, id=%d, vlapic_apic_base_msr=0x%016"PRIx64", "
1015 "base_address=0x%0lx.",
1016 v, GET_APIC_ID(vlapic_get_reg(vlapic, APIC_ID)),
1017 vlapic->apic_base_msr, vlapic->base_address);
1019 return 1;
1022 int vlapic_init(struct vcpu *v)
1024 struct vlapic *vlapic = NULL;
1026 ASSERT( v != NULL );
1028 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id);
1030 vlapic = xmalloc_bytes(sizeof(struct vlapic));
1031 if ( vlapic == NULL )
1033 printk("malloc vlapic error for vcpu %x\n", v->vcpu_id);
1034 return -ENOMEM;
1037 memset(vlapic, 0, sizeof(struct vlapic));
1039 vlapic->regs_page = alloc_domheap_page(NULL);
1040 if ( vlapic->regs_page == NULL )
1042 printk("malloc vlapic regs error for vcpu %x\n", v->vcpu_id);
1043 xfree(vlapic);
1044 return -ENOMEM;
1047 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
1049 memset(vlapic->regs, 0, PAGE_SIZE);
1051 VLAPIC(v) = vlapic;
1053 vlapic->vcpu = v;
1055 vlapic_reset(vlapic);
1057 return 0;