ia64/xen-unstable

view xen/arch/ia64/vmx/vlsapic.c @ 10695:6703fed8870f

[IA64] enable acceleration of external interrupt

This patch is to enable acceleration of externel interrupt
which is described in VTI spec.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jul 12 13:20:15 2006 -0600 (2006-07-12)
parents 8ad37880564d
children 4834d1e8f26e
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vlsapic.c: virtual lsapic model including ITC timer.
5 * Copyright (c) 2005, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #include <linux/sched.h>
24 #include <public/arch-ia64.h>
25 #include <asm/ia64_int.h>
26 #include <asm/vcpu.h>
27 #include <asm/regionreg.h>
28 #include <asm/tlb.h>
29 #include <asm/processor.h>
30 #include <asm/delay.h>
31 #include <asm/vmx_vcpu.h>
32 #include <asm/vmx_vcpu.h>
33 #include <asm/regs.h>
34 #include <asm/gcc_intrin.h>
35 #include <asm/vmx_mm_def.h>
36 #include <asm/vmx.h>
37 #include <asm/hw_irq.h>
38 #include <asm/vmx_pal_vsa.h>
39 #include <asm/kregs.h>
40 #include <asm/vmx_platform.h>
41 #include <asm/hvm/vioapic.h>
42 #include <asm/linux/jiffies.h>
44 //u64 fire_itc;
45 //u64 fire_itc2;
46 //u64 fire_itm;
47 //u64 fire_itm2;
48 /*
49 * Update the checked last_itc.
50 */
52 extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
53 UINT64 vector,REGS *regs);
54 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
55 {
56 vtm->last_itc = cur_itc;
57 }
59 /*
60 * ITC value saw in guest (host+offset+drift).
61 */
62 static uint64_t now_itc(vtime_t *vtm)
63 {
64 uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc();
66 if ( vtm->vtm_local_drift ) {
67 // guest_itc -= vtm->vtm_local_drift;
68 }
69 if ( (long)(guest_itc - vtm->last_itc) > 0 ) {
70 return guest_itc;
72 }
73 else {
74 /* guest ITC backwarded due after LP switch */
75 return vtm->last_itc;
76 }
77 }
79 /*
80 * Interval time components reset.
81 */
82 static void vtm_reset(VCPU *vcpu)
83 {
84 uint64_t cur_itc;
85 vtime_t *vtm;
87 vtm=&(vcpu->arch.arch_vmx.vtm);
88 vtm->vtm_offset = 0;
89 vtm->vtm_local_drift = 0;
90 VCPU(vcpu, itm) = 0;
91 VCPU(vcpu, itv) = 0x10000;
92 cur_itc = ia64_get_itc();
93 vtm->last_itc = vtm->vtm_offset + cur_itc;
94 }
96 /* callback function when vtm_timer expires */
97 static void vtm_timer_fn(void *data)
98 {
99 vtime_t *vtm;
100 VCPU *vcpu = data;
101 u64 cur_itc,vitv;
103 vitv = VCPU(vcpu, itv);
104 if ( !ITV_IRQ_MASK(vitv) ){
105 vmx_vcpu_pend_interrupt(vcpu, vitv & 0xff);
106 vcpu_unblock(vcpu);
107 }
108 vtm=&(vcpu->arch.arch_vmx.vtm);
109 cur_itc = now_itc(vtm);
110 // vitm =VCPU(vcpu, itm);
111 //fire_itc2 = cur_itc;
112 //fire_itm2 = vitm;
113 update_last_itc(vtm,cur_itc); // pseudo read to update vITC
114 }
116 void vtm_init(VCPU *vcpu)
117 {
118 vtime_t *vtm;
119 uint64_t itc_freq;
121 vtm=&(vcpu->arch.arch_vmx.vtm);
123 itc_freq = local_cpu_data->itc_freq;
124 vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
125 vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
126 init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, vcpu->processor);
127 vtm_reset(vcpu);
128 }
130 /*
131 * Action when guest read ITC.
132 */
133 uint64_t vtm_get_itc(VCPU *vcpu)
134 {
135 uint64_t guest_itc;
136 vtime_t *vtm;
138 vtm=&(vcpu->arch.arch_vmx.vtm);
139 guest_itc = now_itc(vtm);
140 return guest_itc;
141 }
144 void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
145 {
146 uint64_t vitm, vitv;
147 vtime_t *vtm;
148 vitm = VCPU(vcpu,itm);
149 vitv = VCPU(vcpu,itv);
150 vtm=&(vcpu->arch.arch_vmx.vtm);
151 if(vcpu->vcpu_id == 0){
152 vtm->vtm_offset = new_itc - ia64_get_itc();
153 vtm->last_itc = new_itc;
154 }
155 else{
156 vtm->vtm_offset = vcpu->domain->vcpu[0]->arch.arch_vmx.vtm.vtm_offset;
157 new_itc=vtm->vtm_offset + ia64_get_itc();
158 vtm->last_itc = new_itc;
159 }
160 if(vitm < new_itc){
161 clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0]));
162 stop_timer(&vtm->vtm_timer);
163 }
164 }
167 #define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */
169 void vtm_set_itm(VCPU *vcpu, uint64_t val)
170 {
171 vtime_t *vtm;
172 uint64_t vitv, cur_itc, expires;
173 vitv = VCPU(vcpu, itv);
174 vtm=&(vcpu->arch.arch_vmx.vtm);
175 // TODO; need to handle VHPI in future
176 clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0]));
177 VCPU(vcpu,itm)=val;
178 cur_itc =now_itc(vtm);
179 if(time_before(val, cur_itc))
180 val = cur_itc;
181 if(val > vtm->last_itc){
182 expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP;
183 set_timer(&vtm->vtm_timer, expires);
184 }else{
185 stop_timer(&vtm->vtm_timer);
186 }
187 }
190 void vtm_set_itv(VCPU *vcpu, uint64_t val)
191 {
192 uint64_t olditv;
193 olditv = VCPU(vcpu, itv);
194 VCPU(vcpu, itv) = val;
195 if(ITV_IRQ_MASK(val)){
196 clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0]));
197 }else if(ITV_VECTOR(olditv)!=ITV_VECTOR(val)){
198 if(test_and_clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0])))
199 set_bit(ITV_VECTOR(val), &VCPU(vcpu, irr[0]));
200 }
201 }
204 /*
205 * Update interrupt or hook the vtm timer for fire
206 * At this point vtm_timer should be removed if itv is masked.
207 */
208 /* Interrupt must be disabled at this point */
209 /*
210 void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
211 {
212 uint64_t cur_itc,vitm,vitv;
213 uint64_t expires;
214 long diff_now, diff_last;
215 uint64_t spsr;
217 vitv = VCPU(vcpu, itv);
218 if ( ITV_IRQ_MASK(vitv) ) {
219 return;
220 }
222 vitm =VCPU(vcpu, itm);
223 local_irq_save(spsr);
224 cur_itc =now_itc(vtm);
225 diff_last = vtm->last_itc - vitm;
226 diff_now = cur_itc - vitm;
228 if ( diff_last >= 0 ) {
229 // interrupt already fired.
230 stop_timer(&vtm->vtm_timer);
231 }
232 else if ( diff_now >= 0 ) {
233 // ITV is fired.
234 vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
235 }
236 */
237 /* Both last_itc & cur_itc < itm, wait for fire condition */
238 /* else {
239 expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
240 set_timer(&vtm->vtm_timer, expires);
241 }
242 local_irq_restore(spsr);
243 }
244 */
246 /*
247 * Action for vtm when the domain is scheduled out.
248 * Remove the timer for vtm.
249 */
250 /*
251 void vtm_domain_out(VCPU *vcpu)
252 {
253 if(!is_idle_domain(vcpu->domain))
254 stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
255 }
256 */
257 /*
258 * Action for vtm when the domain is scheduled in.
259 * Fire vtm IRQ or add the timer for vtm.
260 */
261 /*
262 void vtm_domain_in(VCPU *vcpu)
263 {
264 vtime_t *vtm;
266 if(!is_idle_domain(vcpu->domain)) {
267 vtm=&(vcpu->arch.arch_vmx.vtm);
268 vtm_interruption_update(vcpu, vtm);
269 }
270 }
271 */
273 /*
274 * Next for vLSapic
275 */
277 #define NMI_VECTOR 2
278 #define ExtINT_VECTOR 0
279 #define NULL_VECTOR -1
280 static void update_vhpi(VCPU *vcpu, int vec)
281 {
282 u64 vhpi;
283 if ( vec == NULL_VECTOR ) {
284 vhpi = 0;
285 }
286 else if ( vec == NMI_VECTOR ) { // NMI
287 vhpi = 32;
288 } else if (vec == ExtINT_VECTOR) { //ExtINT
289 vhpi = 16;
290 }
291 else {
292 vhpi = vec >> 4;
293 }
295 VCPU(vcpu,vhpi) = vhpi;
296 // TODO: Add support for XENO
297 if ( VCPU(vcpu,vac).a_int ) {
298 ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
299 (uint64_t) &(vcpu->arch.privregs), 0, 0,0,0,0,0);
300 }
301 }
303 #ifdef V_IOSAPIC_READY
304 /* Assist to check virtual interrupt lines */
305 void vmx_virq_line_assist(struct vcpu *v)
306 {
307 global_iodata_t *spg = &get_sp(v->domain)->sp_global;
308 uint16_t *virq_line, irqs;
310 virq_line = &spg->pic_irr;
311 if (*virq_line) {
312 do {
313 irqs = *(volatile uint16_t*)virq_line;
314 } while ((uint16_t)cmpxchg(virq_line, irqs, 0) != irqs);
315 hvm_vioapic_do_irqs(v->domain, irqs);
316 }
318 virq_line = &spg->pic_clear_irr;
319 if (*virq_line) {
320 do {
321 irqs = *(volatile uint16_t*)virq_line;
322 } while ((uint16_t)cmpxchg(virq_line, irqs, 0) != irqs);
323 hvm_vioapic_do_irqs_clear(v->domain, irqs);
324 }
325 }
327 void vmx_virq_line_init(struct domain *d)
328 {
329 global_iodata_t *spg = &get_sp(d)->sp_global;
331 spg->pic_elcr = 0xdef8; /* Level/Edge trigger mode */
332 spg->pic_irr = 0;
333 spg->pic_last_irr = 0;
334 spg->pic_clear_irr = 0;
335 }
337 int ioapic_match_logical_addr(hvm_vioapic_t *s, int number, uint16_t dest)
338 {
339 return (VLAPIC_ID(s->lapic_info[number]) == dest);
340 }
342 struct vlapic* apic_round_robin(struct domain *d,
343 uint8_t dest_mode,
344 uint8_t vector,
345 uint32_t bitmap)
346 {
347 uint8_t bit;
348 hvm_vioapic_t *s;
350 if (!bitmap) {
351 printk("<apic_round_robin> no bit on bitmap\n");
352 return NULL;
353 }
355 s = &d->arch.vmx_platform.vioapic;
356 for (bit = 0; bit < s->lapic_count; bit++) {
357 if (bitmap & (1 << bit))
358 return s->lapic_info[bit];
359 }
361 return NULL;
362 }
363 #endif
365 void vlsapic_reset(VCPU *vcpu)
366 {
367 int i;
369 VCPU(vcpu, lid) = VCPU_LID(vcpu);
370 VCPU(vcpu, ivr) = 0;
371 VCPU(vcpu,tpr) = 0x10000;
372 VCPU(vcpu, eoi) = 0;
373 VCPU(vcpu, irr[0]) = 0;
374 VCPU(vcpu, irr[1]) = 0;
375 VCPU(vcpu, irr[2]) = 0;
376 VCPU(vcpu, irr[3]) = 0;
377 VCPU(vcpu, pmv) = 0x10000;
378 VCPU(vcpu, cmcv) = 0x10000;
379 VCPU(vcpu, lrr0) = 0x10000; // default reset value?
380 VCPU(vcpu, lrr1) = 0x10000; // default reset value?
381 update_vhpi(vcpu, NULL_VECTOR);
382 for ( i=0; i<4; i++) {
383 VLSAPIC_INSVC(vcpu,i) = 0;
384 }
386 #ifdef V_IOSAPIC_READY
387 vcpu->arch.arch_vmx.vlapic.vcpu = vcpu;
388 hvm_vioapic_add_lapic(&vcpu->arch.arch_vmx.vlapic, vcpu);
389 #endif
390 DPRINTK("VLSAPIC inservice base=%p\n", &VLSAPIC_INSVC(vcpu,0) );
391 }
393 /*
394 * Find highest signaled bits in 4 words (long).
395 *
396 * return 0-255: highest bits.
397 * -1 : Not found.
398 */
399 static __inline__ int highest_bits(uint64_t *dat)
400 {
401 uint64_t bits, bitnum;
402 int i;
404 /* loop for all 256 bits */
405 for ( i=3; i >= 0 ; i -- ) {
406 bits = dat[i];
407 if ( bits ) {
408 bitnum = ia64_fls(bits);
409 return i*64+bitnum;
410 }
411 }
412 return NULL_VECTOR;
413 }
415 /*
416 * Return 0-255 for pending irq.
417 * NULL_VECTOR: when no pending.
418 */
419 static int highest_pending_irq(VCPU *vcpu)
420 {
421 if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
422 if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
423 return highest_bits(&VCPU(vcpu, irr[0]));
424 }
426 static int highest_inservice_irq(VCPU *vcpu)
427 {
428 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
429 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
430 return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
431 }
433 /*
434 * The pending irq is higher than the inservice one.
435 *
436 */
437 static int is_higher_irq(int pending, int inservice)
438 {
439 return ( (pending > inservice) ||
440 ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
441 }
443 static int is_higher_class(int pending, int mic)
444 {
445 return ( (pending >> 4) > mic );
446 }
447 #if 0
448 static int is_invalid_irq(int vec)
449 {
450 return (vec == 1 || ((vec <= 14 && vec >= 3)));
451 }
452 #endif //shadow it due to no use currently
454 #define IRQ_NO_MASKED 0
455 #define IRQ_MASKED_BY_VTPR 1
456 #define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
458 /* See Table 5-8 in SDM vol2 for the definition */
459 static int
460 _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
461 {
462 tpr_t vtpr;
464 vtpr.val = VCPU(vcpu, tpr);
466 if ( h_inservice == NMI_VECTOR ) {
467 return IRQ_MASKED_BY_INSVC;
468 }
469 if ( h_pending == NMI_VECTOR ) {
470 // Non Maskable Interrupt
471 return IRQ_NO_MASKED;
472 }
473 if ( h_inservice == ExtINT_VECTOR ) {
474 return IRQ_MASKED_BY_INSVC;
475 }
477 if ( h_pending == ExtINT_VECTOR ) {
478 if ( vtpr.mmi ) {
479 // mask all external IRQ
480 return IRQ_MASKED_BY_VTPR;
481 }
482 else {
483 return IRQ_NO_MASKED;
484 }
485 }
487 if ( is_higher_irq(h_pending, h_inservice) ) {
488 if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) {
489 return IRQ_NO_MASKED;
490 }
491 else {
492 return IRQ_MASKED_BY_VTPR;
493 }
494 }
495 else {
496 return IRQ_MASKED_BY_INSVC;
497 }
498 }
500 static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
501 {
502 int mask;
504 mask = _xirq_masked(vcpu, h_pending, h_inservice);
505 return mask;
506 }
509 /*
510 * May come from virtualization fault or
511 * nested host interrupt.
512 */
513 int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
514 {
515 uint64_t spsr;
516 int ret;
518 if (vector & ~0xff) {
519 DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
520 return -1;
521 }
522 local_irq_save(spsr);
523 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
524 local_irq_restore(spsr);
525 vcpu->arch.irq_new_pending = 1;
526 return ret;
527 }
529 /*
530 * Add batch of pending interrupt.
531 * The interrupt source is contained in pend_irr[0-3] with
532 * each bits stand for one interrupt.
533 */
534 void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
535 {
536 uint64_t spsr;
537 int i;
539 local_irq_save(spsr);
540 for (i=0 ; i<4; i++ ) {
541 VCPU(vcpu,irr[i]) |= pend_irr[i];
542 }
543 local_irq_restore(spsr);
544 vcpu->arch.irq_new_pending = 1;
545 }
547 /*
548 * If the new pending interrupt is enabled and not masked, we directly inject
549 * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when
550 * the interrupt becomes unmasked, it gets injected.
551 * RETURN:
552 * the highest unmasked interrupt.
553 *
554 * Optimization: We defer setting the VHPI until the EOI time, if a higher
555 * priority interrupt is in-service. The idea is to reduce the
556 * number of unnecessary calls to inject_vhpi.
557 */
558 int vmx_check_pending_irq(VCPU *vcpu)
559 {
560 uint64_t spsr, mask;
561 int h_pending, h_inservice;
562 uint64_t isr;
563 IA64_PSR vpsr;
564 REGS *regs=vcpu_regs(vcpu);
565 local_irq_save(spsr);
566 h_pending = highest_pending_irq(vcpu);
567 if ( h_pending == NULL_VECTOR ) {
568 h_pending = SPURIOUS_VECTOR;
569 goto chk_irq_exit;
570 }
571 h_inservice = highest_inservice_irq(vcpu);
573 vpsr.val = vmx_vcpu_get_psr(vcpu);
574 mask = irq_masked(vcpu, h_pending, h_inservice);
575 if ( vpsr.i && IRQ_NO_MASKED == mask ) {
576 isr = vpsr.val & IA64_PSR_RI;
577 if ( !vpsr.ic )
578 panic_domain(regs,"Interrupt when IC=0\n");
579 if (VCPU(vcpu, vhpi))
580 update_vhpi(vcpu, NULL_VECTOR);
581 vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ
582 }
583 else if ( mask == IRQ_MASKED_BY_INSVC ) {
584 // cann't inject VHPI
585 // DPRINTK("IRQ masked by higher inservice\n");
586 }
587 else {
588 // masked by vpsr.i or vtpr.
589 update_vhpi(vcpu,h_pending);
590 }
592 chk_irq_exit:
593 local_irq_restore(spsr);
594 return h_pending;
595 }
597 /*
598 * Only coming from virtualization fault.
599 */
600 void guest_write_eoi(VCPU *vcpu)
601 {
602 int vec;
603 uint64_t spsr;
605 vec = highest_inservice_irq(vcpu);
606 if ( vec == NULL_VECTOR )
607 panic_domain(vcpu_regs(vcpu),"Wrong vector to EOI\n");
608 local_irq_save(spsr);
609 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
610 local_irq_restore(spsr);
611 VCPU(vcpu, eoi)=0; // overwrite the data
612 vcpu->arch.irq_new_pending=1;
613 // vmx_check_pending_irq(vcpu);
614 }
616 int is_unmasked_irq(VCPU *vcpu)
617 {
618 int h_pending, h_inservice;
620 h_pending = highest_pending_irq(vcpu);
621 h_inservice = highest_inservice_irq(vcpu);
622 if ( h_pending == NULL_VECTOR ||
623 irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) {
624 return 0;
625 }
626 else
627 return 1;
628 }
630 uint64_t guest_read_vivr(VCPU *vcpu)
631 {
632 int vec, h_inservice;
633 uint64_t spsr;
635 local_irq_save(spsr);
636 vec = highest_pending_irq(vcpu);
637 h_inservice = highest_inservice_irq(vcpu);
638 if ( vec == NULL_VECTOR ||
639 irq_masked(vcpu, vec, h_inservice) != IRQ_NO_MASKED ) {
640 local_irq_restore(spsr);
641 return IA64_SPURIOUS_INT_VECTOR;
642 }
644 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
645 VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
646 if (VCPU(vcpu, vhpi))
647 update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
648 local_irq_restore(spsr);
649 return (uint64_t)vec;
650 }
652 static void generate_exirq(VCPU *vcpu)
653 {
654 IA64_PSR vpsr;
655 uint64_t isr;
656 REGS *regs=vcpu_regs(vcpu);
657 vpsr.val = vmx_vcpu_get_psr(vcpu);
658 update_vhpi(vcpu, NULL_VECTOR);
659 isr = vpsr.val & IA64_PSR_RI;
660 if ( !vpsr.ic )
661 panic_domain(regs,"Interrupt when IC=0\n");
662 vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
663 }
665 void vhpi_detection(VCPU *vcpu)
666 {
667 uint64_t threshold,vhpi;
668 tpr_t vtpr;
669 IA64_PSR vpsr;
671 vpsr.val = vmx_vcpu_get_psr(vcpu);
672 vtpr.val = VCPU(vcpu, tpr);
674 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
675 vhpi = VCPU(vcpu,vhpi);
676 if ( vhpi > threshold ) {
677 // interrupt actived
678 generate_exirq (vcpu);
679 }
680 }
682 void vmx_vexirq(VCPU *vcpu)
683 {
684 static uint64_t vexirq_count=0;
686 vexirq_count ++;
687 printk("Virtual ex-irq %ld\n", vexirq_count);
688 generate_exirq (vcpu);
689 }