ia64/xen-unstable

view xen/arch/ia64/vmx/vlsapic.c @ 8918:c18c63f87b7d

[IA64] VTI: Fix two bugs

1. Vmx_check_pending_irq should not be called in vmx_vcpu_set_eoi,
because vmx_vcpu_increment_iip is called after vmx_vcpu_set_eoi. That
is, the first instruction of guest interrupt handler will be skipped.
2. Remove code segment which was used to send events to VTIdomain, when
dom0 was VTIdomain. This is not needed any more, and will cause
VTIdomain on SMP-HOST complain "Unexpected interrupt ..." .

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Fri Feb 24 13:38:26 2006 -0700 (2006-02-24)
parents 0f59ace5442c
children a693ccb4d581
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vlsapic.c: virtual lsapic model including ITC timer.
5 * Copyright (c) 2005, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #include <linux/sched.h>
24 #include <public/arch-ia64.h>
25 #include <asm/ia64_int.h>
26 #include <asm/vcpu.h>
27 #include <asm/regionreg.h>
28 #include <asm/tlb.h>
29 #include <asm/processor.h>
30 #include <asm/delay.h>
31 #include <asm/vmx_vcpu.h>
32 #include <asm/vmx_vcpu.h>
33 #include <asm/regs.h>
34 #include <asm/gcc_intrin.h>
35 #include <asm/vmx_mm_def.h>
36 #include <asm/vmx.h>
37 #include <asm/hw_irq.h>
38 #include <asm/vmx_pal_vsa.h>
39 #include <asm/kregs.h>
40 #include <asm/vmx_platform.h>
41 #include <asm/hvm/vioapic.h>
43 //u64 fire_itc;
44 //u64 fire_itc2;
45 //u64 fire_itm;
46 //u64 fire_itm2;
47 /*
48 * Update the checked last_itc.
49 */
51 extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
52 UINT64 vector,REGS *regs);
53 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
54 {
55 vtm->last_itc = cur_itc;
56 }
58 /*
59 * ITC value saw in guest (host+offset+drift).
60 */
61 static uint64_t now_itc(vtime_t *vtm)
62 {
63 uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc();
65 if ( vtm->vtm_local_drift ) {
66 // guest_itc -= vtm->vtm_local_drift;
67 }
68 if ( (long)(guest_itc - vtm->last_itc) > 0 ) {
69 return guest_itc;
71 }
72 else {
73 /* guest ITC backwarded due after LP switch */
74 return vtm->last_itc;
75 }
76 }
78 /*
79 * Interval time components reset.
80 */
81 static void vtm_reset(VCPU *vcpu)
82 {
83 uint64_t cur_itc;
84 vtime_t *vtm;
86 vtm=&(vcpu->arch.arch_vmx.vtm);
87 vtm->vtm_offset = 0;
88 vtm->vtm_local_drift = 0;
89 VCPU(vcpu, itm) = 0;
90 VCPU(vcpu, itv) = 0x10000;
91 cur_itc = ia64_get_itc();
92 vtm->last_itc = vtm->vtm_offset + cur_itc;
93 }
95 /* callback function when vtm_timer expires */
96 static void vtm_timer_fn(void *data)
97 {
98 vtime_t *vtm;
99 VCPU *vcpu = data;
100 u64 cur_itc,vitm;
102 UINT64 vec;
104 vec = VCPU(vcpu, itv) & 0xff;
105 vmx_vcpu_pend_interrupt(vcpu, vec);
107 vtm=&(vcpu->arch.arch_vmx.vtm);
108 cur_itc = now_itc(vtm);
109 vitm =VCPU(vcpu, itm);
110 //fire_itc2 = cur_itc;
111 //fire_itm2 = vitm;
112 update_last_itc(vtm,cur_itc); // pseudo read to update vITC
113 }
115 void vtm_init(VCPU *vcpu)
116 {
117 vtime_t *vtm;
118 uint64_t itc_freq;
120 vtm=&(vcpu->arch.arch_vmx.vtm);
122 itc_freq = local_cpu_data->itc_freq;
123 vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
124 vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
125 init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, 0);
126 vtm_reset(vcpu);
127 }
129 /*
130 * Action when guest read ITC.
131 */
132 uint64_t vtm_get_itc(VCPU *vcpu)
133 {
134 uint64_t guest_itc, spsr;
135 vtime_t *vtm;
137 vtm=&(vcpu->arch.arch_vmx.vtm);
138 // FIXME: should use local_irq_disable & local_irq_enable ??
139 local_irq_save(spsr);
140 guest_itc = now_itc(vtm);
141 // update_last_itc(vtm, guest_itc);
143 local_irq_restore(spsr);
144 return guest_itc;
145 }
147 void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
148 {
149 uint64_t spsr;
150 vtime_t *vtm;
152 vtm=&(vcpu->arch.arch_vmx.vtm);
153 local_irq_save(spsr);
154 vtm->vtm_offset = new_itc - ia64_get_itc();
155 vtm->last_itc = new_itc;
156 vtm_interruption_update(vcpu, vtm);
157 local_irq_restore(spsr);
158 }
160 void vtm_set_itv(VCPU *vcpu)
161 {
162 uint64_t spsr,itv;
163 vtime_t *vtm;
165 vtm=&(vcpu->arch.arch_vmx.vtm);
166 local_irq_save(spsr);
167 itv = VCPU(vcpu, itv);
168 if ( ITV_IRQ_MASK(itv) )
169 stop_timer(&vtm->vtm_timer);
170 vtm_interruption_update(vcpu, vtm);
171 local_irq_restore(spsr);
172 }
175 /*
176 * Update interrupt or hook the vtm timer for fire
177 * At this point vtm_timer should be removed if itv is masked.
178 */
179 /* Interrupt must be disabled at this point */
181 extern u64 cycle_to_ns(u64 cyle);
182 #define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */
183 void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
184 {
185 uint64_t cur_itc,vitm,vitv;
186 uint64_t expires;
187 long diff_now, diff_last;
188 uint64_t spsr;
190 vitv = VCPU(vcpu, itv);
191 if ( ITV_IRQ_MASK(vitv) ) {
192 return;
193 }
195 vitm =VCPU(vcpu, itm);
196 local_irq_save(spsr);
197 cur_itc =now_itc(vtm);
198 diff_last = vtm->last_itc - vitm;
199 diff_now = cur_itc - vitm;
200 update_last_itc (vtm,cur_itc);
202 if ( diff_last >= 0 ) {
203 // interrupt already fired.
204 stop_timer(&vtm->vtm_timer);
205 }
206 else if ( diff_now >= 0 ) {
207 // ITV is fired.
208 vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
209 }
210 /* Both last_itc & cur_itc < itm, wait for fire condition */
211 else {
212 expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
213 set_timer(&vtm->vtm_timer, expires);
214 }
215 local_irq_restore(spsr);
216 }
218 /*
219 * Action for vtm when the domain is scheduled out.
220 * Remove the timer for vtm.
221 */
222 void vtm_domain_out(VCPU *vcpu)
223 {
224 if(!is_idle_domain(vcpu->domain))
225 stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
226 }
228 /*
229 * Action for vtm when the domain is scheduled in.
230 * Fire vtm IRQ or add the timer for vtm.
231 */
232 void vtm_domain_in(VCPU *vcpu)
233 {
234 vtime_t *vtm;
236 if(!is_idle_domain(vcpu->domain)) {
237 vtm=&(vcpu->arch.arch_vmx.vtm);
238 vtm_interruption_update(vcpu, vtm);
239 }
240 }
242 /*
243 * Next for vLSapic
244 */
246 #define NMI_VECTOR 2
247 #define ExtINT_VECTOR 0
248 #define NULL_VECTOR -1
249 static void update_vhpi(VCPU *vcpu, int vec)
250 {
251 u64 vhpi;
252 if ( vec == NULL_VECTOR ) {
253 vhpi = 0;
254 }
255 else if ( vec == NMI_VECTOR ) { // NMI
256 vhpi = 32;
257 } else if (vec == ExtINT_VECTOR) { //ExtINT
258 vhpi = 16;
259 }
260 else {
261 vhpi = vec / 16;
262 }
264 VCPU(vcpu,vhpi) = vhpi;
265 // TODO: Add support for XENO
266 if ( VCPU(vcpu,vac).a_int ) {
267 ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
268 (uint64_t) &(vcpu->arch.privregs), 0, 0,0,0,0,0);
269 }
270 }
272 #ifdef V_IOSAPIC_READY
273 /* Assist to check virtual interrupt lines */
274 void vmx_virq_line_assist(struct vcpu *v)
275 {
276 global_iodata_t *spg = &get_sp(v->domain)->sp_global;
277 uint16_t *virq_line, irqs;
279 virq_line = &spg->pic_irr;
280 if (*virq_line) {
281 do {
282 irqs = *(volatile uint16_t*)virq_line;
283 } while ((uint16_t)cmpxchg(virq_line, irqs, 0) != irqs);
284 hvm_vioapic_do_irqs(v->domain, irqs);
285 }
287 virq_line = &spg->pic_clear_irr;
288 if (*virq_line) {
289 do {
290 irqs = *(volatile uint16_t*)virq_line;
291 } while ((uint16_t)cmpxchg(virq_line, irqs, 0) != irqs);
292 hvm_vioapic_do_irqs_clear(v->domain, irqs);
293 }
294 }
296 void vmx_virq_line_init(struct domain *d)
297 {
298 global_iodata_t *spg = &get_sp(d)->sp_global;
300 spg->pic_elcr = 0xdef8; /* Level/Edge trigger mode */
301 spg->pic_irr = 0;
302 spg->pic_last_irr = 0;
303 spg->pic_clear_irr = 0;
304 }
306 int ioapic_match_logical_addr(hvm_vioapic_t *s, int number, uint16_t dest)
307 {
308 return (VLAPIC_ID(s->lapic_info[number]) == dest);
309 }
311 struct vlapic* apic_round_robin(struct domain *d,
312 uint8_t dest_mode,
313 uint8_t vector,
314 uint32_t bitmap)
315 {
316 uint8_t bit;
317 hvm_vioapic_t *s;
319 if (!bitmap) {
320 printk("<apic_round_robin> no bit on bitmap\n");
321 return NULL;
322 }
324 s = &d->arch.vmx_platform.vioapic;
325 for (bit = 0; bit < s->lapic_count; bit++) {
326 if (bitmap & (1 << bit))
327 return s->lapic_info[bit];
328 }
330 return NULL;
331 }
332 #endif
334 void vlsapic_reset(VCPU *vcpu)
335 {
336 int i;
338 VCPU(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
339 VCPU(vcpu, ivr) = 0;
340 VCPU(vcpu,tpr) = 0x10000;
341 VCPU(vcpu, eoi) = 0;
342 VCPU(vcpu, irr[0]) = 0;
343 VCPU(vcpu, irr[1]) = 0;
344 VCPU(vcpu, irr[2]) = 0;
345 VCPU(vcpu, irr[3]) = 0;
346 VCPU(vcpu, pmv) = 0x10000;
347 VCPU(vcpu, cmcv) = 0x10000;
348 VCPU(vcpu, lrr0) = 0x10000; // default reset value?
349 VCPU(vcpu, lrr1) = 0x10000; // default reset value?
350 update_vhpi(vcpu, NULL_VECTOR);
351 for ( i=0; i<4; i++) {
352 VLSAPIC_INSVC(vcpu,i) = 0;
353 }
355 #ifdef V_IOSAPIC_READY
356 vcpu->arch.arch_vmx.vlapic.vcpu = vcpu;
357 hvm_vioapic_add_lapic(&vcpu->arch.arch_vmx.vlapic, vcpu);
358 #endif
359 DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
360 }
362 /*
363 * Find highest signaled bits in 4 words (long).
364 *
365 * return 0-255: highest bits.
366 * -1 : Not found.
367 */
368 static __inline__ int highest_bits(uint64_t *dat)
369 {
370 uint64_t bits, bitnum;
371 int i;
373 /* loop for all 256 bits */
374 for ( i=3; i >= 0 ; i -- ) {
375 bits = dat[i];
376 if ( bits ) {
377 bitnum = ia64_fls(bits);
378 return i*64+bitnum;
379 }
380 }
381 return NULL_VECTOR;
382 }
384 /*
385 * Return 0-255 for pending irq.
386 * NULL_VECTOR: when no pending.
387 */
388 static int highest_pending_irq(VCPU *vcpu)
389 {
390 if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
391 if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
392 return highest_bits(&VCPU(vcpu, irr[0]));
393 }
395 static int highest_inservice_irq(VCPU *vcpu)
396 {
397 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
398 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
399 return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
400 }
402 /*
403 * The pending irq is higher than the inservice one.
404 *
405 */
406 static int is_higher_irq(int pending, int inservice)
407 {
408 return ( (pending >> 4) > (inservice>>4) ||
409 ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
410 }
412 static int is_higher_class(int pending, int mic)
413 {
414 return ( (pending >> 4) > mic );
415 }
417 static int is_invalid_irq(int vec)
418 {
419 return (vec == 1 || ((vec <= 14 && vec >= 3)));
420 }
422 #define IRQ_NO_MASKED 0
423 #define IRQ_MASKED_BY_VTPR 1
424 #define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
426 /* See Table 5-8 in SDM vol2 for the definition */
427 static int
428 _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
429 {
430 tpr_t vtpr;
431 uint64_t mmi;
433 vtpr.val = VCPU(vcpu, tpr);
435 if ( h_inservice == NMI_VECTOR ) {
436 return IRQ_MASKED_BY_INSVC;
437 }
438 if ( h_pending == NMI_VECTOR ) {
439 // Non Maskable Interrupt
440 return IRQ_NO_MASKED;
441 }
442 if ( h_inservice == ExtINT_VECTOR ) {
443 return IRQ_MASKED_BY_INSVC;
444 }
445 mmi = vtpr.mmi;
446 if ( h_pending == ExtINT_VECTOR ) {
447 if ( mmi ) {
448 // mask all external IRQ
449 return IRQ_MASKED_BY_VTPR;
450 }
451 else {
452 return IRQ_NO_MASKED;
453 }
454 }
456 if ( is_higher_irq(h_pending, h_inservice) ) {
457 if ( !mmi && is_higher_class(h_pending, vtpr.mic) ) {
458 return IRQ_NO_MASKED;
459 }
460 else {
461 return IRQ_MASKED_BY_VTPR;
462 }
463 }
464 else {
465 return IRQ_MASKED_BY_INSVC;
466 }
467 }
469 static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
470 {
471 int mask;
473 mask = _xirq_masked(vcpu, h_pending, h_inservice);
474 return mask;
475 }
478 /*
479 * May come from virtualization fault or
480 * nested host interrupt.
481 */
482 int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
483 {
484 uint64_t spsr;
485 int ret;
487 if (vector & ~0xff) {
488 DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
489 return -1;
490 }
491 local_irq_save(spsr);
492 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
493 local_irq_restore(spsr);
494 vcpu->arch.irq_new_pending = 1;
495 return ret;
496 }
498 /*
499 * Add batch of pending interrupt.
500 * The interrupt source is contained in pend_irr[0-3] with
501 * each bits stand for one interrupt.
502 */
503 void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
504 {
505 uint64_t spsr;
506 int i;
508 local_irq_save(spsr);
509 for (i=0 ; i<4; i++ ) {
510 VCPU(vcpu,irr[i]) |= pend_irr[i];
511 }
512 local_irq_restore(spsr);
513 vcpu->arch.irq_new_pending = 1;
514 }
516 /*
517 * If the new pending interrupt is enabled and not masked, we directly inject
518 * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when
519 * the interrupt becomes unmasked, it gets injected.
520 * RETURN:
521 * TRUE: Interrupt is injected.
522 * FALSE: Not injected but may be in VHPI when vac.a_int=1
523 *
524 * Optimization: We defer setting the VHPI until the EOI time, if a higher
525 * priority interrupt is in-service. The idea is to reduce the
526 * number of unnecessary calls to inject_vhpi.
527 */
528 int vmx_check_pending_irq(VCPU *vcpu)
529 {
530 uint64_t spsr, mask;
531 int h_pending, h_inservice;
532 int injected=0;
533 uint64_t isr;
534 IA64_PSR vpsr;
535 REGS *regs=vcpu_regs(vcpu);
536 local_irq_save(spsr);
537 h_pending = highest_pending_irq(vcpu);
538 if ( h_pending == NULL_VECTOR ) goto chk_irq_exit;
539 h_inservice = highest_inservice_irq(vcpu);
541 vpsr.val = vmx_vcpu_get_psr(vcpu);
542 mask = irq_masked(vcpu, h_pending, h_inservice);
543 if ( vpsr.i && IRQ_NO_MASKED == mask ) {
544 isr = vpsr.val & IA64_PSR_RI;
545 if ( !vpsr.ic )
546 panic("Interrupt when IC=0\n");
547 vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ
548 injected = 1;
549 }
550 else if ( mask == IRQ_MASKED_BY_INSVC ) {
551 // cann't inject VHPI
552 // DPRINTK("IRQ masked by higher inservice\n");
553 }
554 else {
555 // masked by vpsr.i or vtpr.
556 update_vhpi(vcpu,h_pending);
557 }
559 chk_irq_exit:
560 local_irq_restore(spsr);
561 return injected;
562 }
564 /*
565 * Only coming from virtualization fault.
566 */
567 void guest_write_eoi(VCPU *vcpu)
568 {
569 int vec;
570 uint64_t spsr;
572 vec = highest_inservice_irq(vcpu);
573 if ( vec == NULL_VECTOR ) panic("Wrong vector to EOI\n");
574 local_irq_save(spsr);
575 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
576 local_irq_restore(spsr);
577 VCPU(vcpu, eoi)=0; // overwrite the data
578 vcpu->arch.irq_new_pending=1;
579 // vmx_check_pending_irq(vcpu);
580 }
582 uint64_t guest_read_vivr(VCPU *vcpu)
583 {
584 int vec, h_inservice;
585 uint64_t spsr;
587 local_irq_save(spsr);
588 vec = highest_pending_irq(vcpu);
589 h_inservice = highest_inservice_irq(vcpu);
590 if ( vec == NULL_VECTOR ||
591 irq_masked(vcpu, vec, h_inservice) != IRQ_NO_MASKED ) {
592 local_irq_restore(spsr);
593 return IA64_SPURIOUS_INT_VECTOR;
594 }
596 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
597 VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
598 update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
599 local_irq_restore(spsr);
600 return (uint64_t)vec;
601 }
603 static void generate_exirq(VCPU *vcpu)
604 {
605 IA64_PSR vpsr;
606 uint64_t isr;
607 REGS *regs=vcpu_regs(vcpu);
608 vpsr.val = vmx_vcpu_get_psr(vcpu);
609 update_vhpi(vcpu, NULL_VECTOR);
610 isr = vpsr.val & IA64_PSR_RI;
611 if ( !vpsr.ic )
612 panic("Interrupt when IC=0\n");
613 vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
614 }
616 void vhpi_detection(VCPU *vcpu)
617 {
618 uint64_t threshold,vhpi;
619 tpr_t vtpr;
620 IA64_PSR vpsr;
622 vpsr.val = vmx_vcpu_get_psr(vcpu);
623 vtpr.val = VCPU(vcpu, tpr);
625 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
626 vhpi = VCPU(vcpu,vhpi);
627 if ( vhpi > threshold ) {
628 // interrupt actived
629 generate_exirq (vcpu);
630 }
631 }
633 void vmx_vexirq(VCPU *vcpu)
634 {
635 static uint64_t vexirq_count=0;
637 vexirq_count ++;
638 printk("Virtual ex-irq %ld\n", vexirq_count);
639 generate_exirq (vcpu);
640 }