ia64/xen-unstable

view xen/arch/ia64/vmx/vlsapic.c @ 18501:3d96f88fb220

[IA64] fix INIT injection.

xm trigger command sometimes causes an unexpected domain panic.
There are several symptoms:
* Guest nested fault (INIT handler runs with vpsr.cpl != 0)
* Interrupt when IC=0
* Unexpected virtual <--> physical mode transition

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Signed-off-by: Kazuhiro Suzuki <kaz@jp.fujitsu.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Thu Sep 18 17:54:15 2008 +0900 (2008-09-18)
parents 408fcc50fd35
children 5839491bbf20
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vlsapic.c: virtual lsapic model including ITC timer.
5 * Copyright (c) 2005, Intel Corporation.
6 *
7 * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp>
8 * VA Linux Systems Japan K.K.
9 * save/restore support
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
25 */
27 #include <linux/sched.h>
28 #include <public/xen.h>
29 #include <asm/ia64_int.h>
30 #include <asm/vcpu.h>
31 #include <asm/regionreg.h>
32 #include <asm/processor.h>
33 #include <asm/delay.h>
34 #include <asm/vmx_vcpu.h>
35 #include <asm/regs.h>
36 #include <asm/gcc_intrin.h>
37 #include <asm/vmx_mm_def.h>
38 #include <asm/vmx.h>
39 #include <asm/vmx_vpd.h>
40 #include <asm/hw_irq.h>
41 #include <asm/vmx_pal_vsa.h>
42 #include <asm/kregs.h>
43 #include <asm/vmx_platform.h>
44 #include <asm/viosapic.h>
45 #include <asm/vlsapic.h>
46 #include <asm/vmx_phy_mode.h>
47 #include <asm/linux/jiffies.h>
48 #include <xen/domain.h>
49 #include <asm/hvm/support.h>
50 #include <public/hvm/save.h>
51 #include <public/arch-ia64/hvm/memmap.h>
53 #ifdef IPI_DEBUG
54 #define IPI_DPRINTK(x...) printk(x)
55 #else
56 #define IPI_DPRINTK(x...)
57 #endif
59 //u64 fire_itc;
60 //u64 fire_itc2;
61 //u64 fire_itm;
62 //u64 fire_itm2;
63 /*
64 * Update the checked last_itc.
65 */
67 extern void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
68 u64 vector, REGS *regs);
69 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
70 {
71 vtm->last_itc = cur_itc;
72 }
74 /*
75 * Next for vLSapic
76 */
78 #define NMI_VECTOR 2
79 #define ExtINT_VECTOR 0
80 #define NULL_VECTOR -1
82 static void update_vhpi(VCPU *vcpu, int vec)
83 {
84 u64 vhpi;
86 if (vec == NULL_VECTOR)
87 vhpi = 0;
88 else if (vec == NMI_VECTOR)
89 vhpi = 32;
90 else if (vec == ExtINT_VECTOR)
91 vhpi = 16;
92 else
93 vhpi = vec >> 4;
95 VCPU(vcpu,vhpi) = vhpi;
96 // TODO: Add support for XENO
97 if (VCPU(vcpu,vac).a_int) {
98 vmx_vpd_pin(vcpu);
99 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
100 (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
101 vmx_vpd_unpin(vcpu);
102 }
103 }
106 /*
107 * May come from virtualization fault or
108 * nested host interrupt.
109 */
110 static int vmx_vcpu_unpend_interrupt(VCPU *vcpu, uint8_t vector)
111 {
112 int ret;
114 if (vector & ~0xff) {
115 dprintk(XENLOG_WARNING, "vmx_vcpu_pend_interrupt: bad vector\n");
116 return -1;
117 }
119 ret = test_and_clear_bit(vector, &VCPU(vcpu, irr[0]));
121 if (ret) {
122 vcpu->arch.irq_new_pending = 1;
123 wmb();
124 }
126 return ret;
127 }
129 /*
130 * ITC value saw in guest (host+offset+drift).
131 */
132 static uint64_t now_itc(vtime_t *vtm)
133 {
134 uint64_t guest_itc = vtm->vtm_offset + ia64_get_itc();
136 if (guest_itc >= vtm->last_itc)
137 return guest_itc;
138 else
139 /* guest ITC went backward due to LP switch */
140 return vtm->last_itc;
141 }
143 /*
144 * Interval time components reset.
145 */
146 static void vtm_reset(VCPU *vcpu)
147 {
148 int i;
149 u64 vtm_offset;
150 VCPU *v;
151 struct domain *d = vcpu->domain;
152 vtime_t *vtm = &VMX(vcpu, vtm);
154 if (vcpu->vcpu_id == 0) {
155 vtm_offset = 0UL - ia64_get_itc();
156 for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
157 if ((v = d->vcpu[i]) != NULL) {
158 VMX(v, vtm).vtm_offset = vtm_offset;
159 VMX(v, vtm).last_itc = 0;
160 }
161 }
162 }
163 vtm->vtm_local_drift = 0;
164 VCPU(vcpu, itm) = 0;
165 VCPU(vcpu, itv) = 0x10000;
166 vtm->last_itc = 0;
167 }
169 /* callback function when vtm_timer expires */
170 static void vtm_timer_fn(void *data)
171 {
172 VCPU *vcpu = data;
173 vtime_t *vtm = &VMX(vcpu, vtm);
174 u64 vitv;
176 vitv = VCPU(vcpu, itv);
177 if (!ITV_IRQ_MASK(vitv)) {
178 vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(vitv));
179 vcpu_unblock(vcpu);
180 } else
181 vtm->pending = 1;
183 /*
184 * "+ 1" is for fixing oops message at timer_interrupt() on VTI guest.
185 * If oops checking condition changed to timer_after_eq() on VTI guest,
186 * this parameter should be erased.
187 */
188 update_last_itc(vtm, VCPU(vcpu, itm) + 1); // update vITC
189 }
191 void vtm_init(VCPU *vcpu)
192 {
193 vtime_t *vtm;
194 uint64_t itc_freq;
196 vtm = &VMX(vcpu, vtm);
198 itc_freq = local_cpu_data->itc_freq;
199 vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
200 vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
201 init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, vcpu->processor);
202 vtm_reset(vcpu);
203 }
205 /*
206 * Action when guest read ITC.
207 */
208 uint64_t vtm_get_itc(VCPU *vcpu)
209 {
210 uint64_t guest_itc;
211 vtime_t *vtm = &VMX(vcpu, vtm);
213 guest_itc = now_itc(vtm);
214 return guest_itc;
215 }
218 void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
219 {
220 int i;
221 uint64_t vitm, vtm_offset;
222 vtime_t *vtm;
223 VCPU *v;
224 struct domain *d = vcpu->domain;
226 vitm = VCPU(vcpu, itm);
227 vtm = &VMX(vcpu, vtm);
228 if (vcpu->vcpu_id == 0) {
229 vtm_offset = new_itc - ia64_get_itc();
230 for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
231 if ((v = d->vcpu[i]) != NULL) {
232 VMX(v, vtm).vtm_offset = vtm_offset;
233 VMX(v, vtm).last_itc = 0;
234 }
235 }
236 }
237 vtm->last_itc = 0;
238 if (vitm <= new_itc)
239 stop_timer(&vtm->vtm_timer);
240 else
241 vtm_set_itm(vcpu, vitm);
242 }
245 #define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */
246 extern u64 cycle_to_ns(u64 cyle);
249 void vtm_set_itm(VCPU *vcpu, uint64_t val)
250 {
251 vtime_t *vtm;
252 uint64_t vitv, cur_itc, expires;
254 vitv = VCPU(vcpu, itv);
255 vtm = &VMX(vcpu, vtm);
256 VCPU(vcpu, itm) = val;
257 if (val > vtm->last_itc) {
258 cur_itc = now_itc(vtm);
259 if (time_before(val, cur_itc))
260 val = cur_itc;
261 expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP;
262 vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
263 set_timer(&vtm->vtm_timer, expires);
264 }else{
265 stop_timer(&vtm->vtm_timer);
266 }
267 }
270 void vtm_set_itv(VCPU *vcpu, uint64_t val)
271 {
272 vtime_t *vtm = &VMX(vcpu, vtm);
274 VCPU(vcpu, itv) = val;
276 if (!ITV_IRQ_MASK(val) && vtm->pending) {
277 vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
278 vtm->pending = 0;
279 }
280 }
283 void vlsapic_reset(VCPU *vcpu)
284 {
285 int i;
287 VCPU(vcpu, lid) = VCPU_LID(vcpu);
288 VCPU(vcpu, ivr) = 0;
289 VCPU(vcpu,tpr) = 0x10000;
290 VCPU(vcpu, eoi) = 0;
291 VCPU(vcpu, irr[0]) = 0;
292 VCPU(vcpu, irr[1]) = 0;
293 VCPU(vcpu, irr[2]) = 0;
294 VCPU(vcpu, irr[3]) = 0;
295 VCPU(vcpu, pmv) = 0x10000;
296 VCPU(vcpu, cmcv) = 0x10000;
297 VCPU(vcpu, lrr0) = 0x10000; // default reset value?
298 VCPU(vcpu, lrr1) = 0x10000; // default reset value?
299 update_vhpi(vcpu, NULL_VECTOR);
300 VLSAPIC_XTP(vcpu) = 0x80; // disabled
301 for ( i=0; i<4; i++) {
302 VLSAPIC_INSVC(vcpu,i) = 0;
303 }
305 dprintk(XENLOG_INFO, "VLSAPIC inservice base=%p\n", &VLSAPIC_INSVC(vcpu,0) );
306 }
308 /*
309 * Find highest signaled bits in 4 words (long).
310 *
311 * return 0-255: highest bits.
312 * -1 : Not found.
313 */
314 static __inline__ int highest_bits(uint64_t *dat)
315 {
316 uint64_t bits, bitnum;
317 int i;
319 /* loop for all 256 bits */
320 for ( i=3; i >= 0 ; i -- ) {
321 bits = dat[i];
322 if ( bits ) {
323 bitnum = ia64_fls(bits);
324 return i*64+bitnum;
325 }
326 }
327 return NULL_VECTOR;
328 }
330 /*
331 * Return 0-255 for pending irq.
332 * NULL_VECTOR: when no pending.
333 */
334 static int highest_pending_irq(VCPU *vcpu)
335 {
336 if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
337 if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
338 return highest_bits(&VCPU(vcpu, irr[0]));
339 }
341 static int highest_inservice_irq(VCPU *vcpu)
342 {
343 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
344 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
345 return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
346 }
348 /*
349 * The pending irq is higher than the inservice one.
350 *
351 */
352 static int is_higher_irq(int pending, int inservice)
353 {
354 return ( (pending > inservice) ||
355 ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
356 }
358 static int is_higher_class(int pending, int mic)
359 {
360 return ( (pending >> 4) > mic );
361 }
363 #define IRQ_NO_MASKED 0
364 #define IRQ_MASKED_BY_VTPR 1
365 #define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
367 /* See Table 5-8 in SDM vol2 for the definition */
368 static int
369 _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
370 {
371 tpr_t vtpr;
373 vtpr.val = VCPU(vcpu, tpr);
375 if ( h_inservice == NMI_VECTOR ) {
376 return IRQ_MASKED_BY_INSVC;
377 }
378 if ( h_pending == NMI_VECTOR ) {
379 // Non Maskable Interrupt
380 return IRQ_NO_MASKED;
381 }
382 if ( h_inservice == ExtINT_VECTOR ) {
383 return IRQ_MASKED_BY_INSVC;
384 }
386 if ( h_pending == ExtINT_VECTOR ) {
387 if ( vtpr.mmi ) {
388 // mask all external IRQ
389 return IRQ_MASKED_BY_VTPR;
390 }
391 else {
392 return IRQ_NO_MASKED;
393 }
394 }
396 if ( is_higher_irq(h_pending, h_inservice) ) {
397 if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) {
398 return IRQ_NO_MASKED;
399 }
400 else {
401 return IRQ_MASKED_BY_VTPR;
402 }
403 }
404 else {
405 return IRQ_MASKED_BY_INSVC;
406 }
407 }
409 static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
410 {
411 int mask;
413 mask = _xirq_masked(vcpu, h_pending, h_inservice);
414 return mask;
415 }
418 /*
419 * May come from virtualization fault or
420 * nested host interrupt.
421 */
422 int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
423 {
424 int ret;
426 if (vector & ~0xff) {
427 gdprintk(XENLOG_INFO, "vmx_vcpu_pend_interrupt: bad vector\n");
428 return -1;
429 }
430 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
432 if (!ret) {
433 vcpu->arch.irq_new_pending = 1;
434 wmb();
435 }
437 return ret;
438 }
441 /*
442 * Add batch of pending interrupt.
443 * The interrupt source is contained in pend_irr[0-3] with
444 * each bits stand for one interrupt.
445 */
446 void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, u64 *pend_irr)
447 {
448 uint64_t spsr;
449 int i;
451 local_irq_save(spsr);
452 for (i=0 ; i<4; i++ ) {
453 VCPU(vcpu,irr[i]) |= pend_irr[i];
454 }
455 local_irq_restore(spsr);
456 vcpu->arch.irq_new_pending = 1;
457 wmb();
458 }
460 /*
461 * If the new pending interrupt is enabled and not masked, we directly inject
462 * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when
463 * the interrupt becomes unmasked, it gets injected.
464 * RETURN:
465 * the highest unmasked interrupt.
466 *
467 * Optimization: We defer setting the VHPI until the EOI time, if a higher
468 * priority interrupt is in-service. The idea is to reduce the
469 * number of unnecessary calls to inject_vhpi.
470 */
471 int vmx_check_pending_irq(VCPU *vcpu)
472 {
473 int mask, h_pending, h_inservice;
474 uint64_t isr;
475 IA64_PSR vpsr;
476 REGS *regs=vcpu_regs(vcpu);
477 h_pending = highest_pending_irq(vcpu);
478 if ( h_pending == NULL_VECTOR ) {
479 update_vhpi(vcpu, NULL_VECTOR);
480 h_pending = SPURIOUS_VECTOR;
481 goto chk_irq_exit;
482 }
483 h_inservice = highest_inservice_irq(vcpu);
485 vpsr.val = VCPU(vcpu, vpsr);
486 mask = irq_masked(vcpu, h_pending, h_inservice);
487 if ( vpsr.i && IRQ_NO_MASKED == mask ) {
488 isr = vpsr.val & IA64_PSR_RI;
489 if ( !vpsr.ic )
490 panic_domain(regs,"Interrupt when IC=0\n");
491 update_vhpi(vcpu, h_pending);
492 vmx_reflect_interruption(0, isr, 0, 12, regs); // EXT IRQ
493 } else if (mask == IRQ_MASKED_BY_INSVC) {
494 if (VCPU(vcpu, vhpi))
495 update_vhpi(vcpu, NULL_VECTOR);
496 }
497 else {
498 // masked by vpsr.i or vtpr.
499 update_vhpi(vcpu,h_pending);
500 }
502 chk_irq_exit:
503 return h_pending;
504 }
506 /*
507 * Set a INIT interruption request to vcpu[0] of target domain.
508 * The INIT interruption is injected into each vcpu by guest firmware.
509 */
510 void vmx_pend_pal_init(struct domain *d)
511 {
512 VCPU *vcpu;
514 vcpu = d->vcpu[0];
515 vcpu->arch.arch_vmx.pal_init_pending = 1;
516 }
518 /*
519 * Only coming from virtualization fault.
520 */
521 void guest_write_eoi(VCPU *vcpu)
522 {
523 int vec;
525 vec = highest_inservice_irq(vcpu);
526 if (vec == NULL_VECTOR) {
527 gdprintk(XENLOG_WARNING, "vcpu(%d): Wrong vector to EOI\n",
528 vcpu->vcpu_id);
529 return;
530 }
531 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
532 VCPU(vcpu, eoi)=0; // overwrite the data
533 vcpu->arch.irq_new_pending=1;
534 wmb();
535 }
537 int is_unmasked_irq(VCPU *vcpu)
538 {
539 int h_pending, h_inservice;
541 h_pending = highest_pending_irq(vcpu);
542 h_inservice = highest_inservice_irq(vcpu);
543 if ( h_pending == NULL_VECTOR ||
544 irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) {
545 return 0;
546 }
547 else
548 return 1;
549 }
551 uint64_t guest_read_vivr(VCPU *vcpu)
552 {
553 int vec, h_inservice, mask;
554 vec = highest_pending_irq(vcpu);
555 h_inservice = highest_inservice_irq(vcpu);
556 mask = irq_masked(vcpu, vec, h_inservice);
557 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
558 if (VCPU(vcpu, vhpi))
559 update_vhpi(vcpu, NULL_VECTOR);
560 return IA64_SPURIOUS_INT_VECTOR;
561 }
562 if (mask == IRQ_MASKED_BY_VTPR) {
563 update_vhpi(vcpu, vec);
564 return IA64_SPURIOUS_INT_VECTOR;
565 }
566 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
567 vmx_vcpu_unpend_interrupt(vcpu, vec);
568 return (uint64_t)vec;
569 }
571 static void generate_exirq(VCPU *vcpu)
572 {
573 IA64_PSR vpsr;
574 uint64_t isr;
575 REGS *regs=vcpu_regs(vcpu);
576 vpsr.val = VCPU(vcpu, vpsr);
577 isr = vpsr.val & IA64_PSR_RI;
578 if ( !vpsr.ic )
579 panic_domain(regs,"Interrupt when IC=0\n");
580 vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
581 }
583 void vhpi_detection(VCPU *vcpu)
584 {
585 uint64_t threshold,vhpi;
586 tpr_t vtpr;
587 IA64_PSR vpsr;
588 vpsr.val = VCPU(vcpu, vpsr);
589 vtpr.val = VCPU(vcpu, tpr);
591 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
592 vhpi = VCPU(vcpu,vhpi);
593 if ( vhpi > threshold ) {
594 // interrupt actived
595 generate_exirq (vcpu);
596 }
597 }
599 void vmx_vexirq(VCPU *vcpu)
600 {
601 generate_exirq (vcpu);
602 }
604 struct vcpu *lid_to_vcpu(struct domain *d, uint16_t dest)
605 {
606 int id = dest >> 8;
608 /* Fast look: assume EID=0 ID=vcpu_id. */
609 if ((dest & 0xff) == 0 && id < MAX_VIRT_CPUS)
610 return d->vcpu[id];
611 return NULL;
612 }
614 /*
615 * To inject INIT to guest, we must set the PAL_INIT entry
616 * and set psr to switch to physical mode
617 */
618 #define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
619 IA64_PSR_IC | IA64_PSR_RI | IA64_PSR_I | IA64_PSR_CPL)
621 static void vmx_inject_guest_pal_init(VCPU *vcpu)
622 {
623 REGS *regs = vcpu_regs(vcpu);
624 uint64_t psr = vmx_vcpu_get_psr(vcpu);
626 regs->cr_iip = PAL_INIT_ENTRY;
628 psr = psr & ~PSR_SET_BITS;
629 vmx_vcpu_set_psr(vcpu, psr);
630 }
633 /*
634 * Deliver IPI message. (Only U-VP is supported now)
635 * offset: address offset to IPI space.
636 * value: deliver value.
637 */
638 static int vcpu_deliver_int(VCPU *vcpu, uint64_t dm, uint64_t vector)
639 {
640 int running = vcpu->is_running;
642 IPI_DPRINTK("deliver_int %lx %lx\n", dm, vector);
644 switch (dm) {
645 case SAPIC_FIXED: // INT
646 vmx_vcpu_pend_interrupt(vcpu, vector);
647 break;
648 case SAPIC_LOWEST_PRIORITY:
649 {
650 struct vcpu *lowest = vcpu_viosapic(vcpu)->lowest_vcpu;
652 if (lowest == NULL)
653 lowest = vcpu;
654 vmx_vcpu_pend_interrupt(lowest, vector);
655 break;
656 }
657 case SAPIC_PMI:
658 // TODO -- inject guest PMI
659 panic_domain(NULL, "Inject guest PMI!\n");
660 break;
661 case SAPIC_NMI:
662 vmx_vcpu_pend_interrupt(vcpu, 2);
663 break;
664 case SAPIC_INIT:
665 vmx_inject_guest_pal_init(vcpu);
666 break;
667 case SAPIC_EXTINT: // ExtINT
668 vmx_vcpu_pend_interrupt(vcpu, 0);
669 break;
670 default:
671 return -EINVAL;
672 }
674 /* Kick vcpu. */
675 vcpu_unblock(vcpu);
676 if (running)
677 smp_send_event_check_cpu(vcpu->processor);
679 return 0;
680 }
682 int vlsapic_deliver_int(struct domain *d,
683 uint16_t dest, uint64_t dm, uint64_t vector)
684 {
685 VCPU *vcpu;
687 vcpu = lid_to_vcpu(d, dest);
688 if (vcpu == NULL)
689 return -ESRCH;
691 if (!vcpu->is_initialised || test_bit(_VPF_down, &vcpu->pause_flags))
692 return -ENOEXEC;
694 return vcpu_deliver_int (vcpu, dm, vector);
695 }
697 /*
698 * Deliver the INIT interruption to guest.
699 */
700 void deliver_pal_init(VCPU *vcpu)
701 {
702 vcpu_deliver_int(vcpu, SAPIC_INIT, 0);
703 }
705 /*
706 * execute write IPI op.
707 */
708 static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
709 {
710 VCPU *targ;
711 struct domain *d = vcpu->domain;
713 targ = lid_to_vcpu(vcpu->domain,
714 (((ipi_a_t)addr).id << 8) | ((ipi_a_t)addr).eid);
715 if (targ == NULL)
716 panic_domain(NULL, "Unknown IPI cpu\n");
718 if (!targ->is_initialised ||
719 test_bit(_VPF_down, &targ->pause_flags)) {
721 struct pt_regs *targ_regs = vcpu_regs(targ);
723 if (arch_set_info_guest(targ, NULL) != 0) {
724 printk("arch_boot_vcpu: failure\n");
725 return;
726 }
727 /* First or next rendez-vous: set registers. */
728 vcpu_init_regs(targ);
729 targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
730 targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
732 if (test_and_clear_bit(_VPF_down,&targ->pause_flags)) {
733 vcpu_wake(targ);
734 printk(XENLOG_DEBUG "arch_boot_vcpu: vcpu %d awaken %016lx!\n",
735 targ->vcpu_id, targ_regs->cr_iip);
736 } else {
737 printk("arch_boot_vcpu: huh, already awake!");
738 }
739 } else {
740 if (((ipi_d_t)value).dm == SAPIC_LOWEST_PRIORITY ||
741 vcpu_deliver_int(targ, ((ipi_d_t)value).dm,
742 ((ipi_d_t)value).vector) < 0)
743 panic_domain(NULL, "Deliver reserved interrupt!\n");
744 }
745 return;
746 }
749 unsigned long vlsapic_read(struct vcpu *v,
750 unsigned long addr,
751 unsigned long length)
752 {
753 uint64_t result = 0;
755 addr &= (PIB_SIZE - 1);
757 switch (addr) {
758 case PIB_OFST_INTA:
759 if (length == 1) // 1 byte load
760 ; // There is no i8259, there is no INTA access
761 else
762 panic_domain(NULL,"Undefined read on PIB INTA\n");
764 break;
765 case PIB_OFST_XTP:
766 if (length == 1) {
767 result = VLSAPIC_XTP(v);
768 // printk("read xtp %lx\n", result);
769 } else {
770 panic_domain(NULL, "Undefined read on PIB XTP\n");
771 }
772 break;
773 default:
774 if (PIB_LOW_HALF(addr)) { // lower half
775 if (length != 8 )
776 panic_domain(NULL, "Undefined IPI-LHF read!\n");
777 else
778 IPI_DPRINTK("IPI-LHF read %lx\n", pib_off);
779 } else { // upper half
780 IPI_DPRINTK("IPI-UHF read %lx\n", addr);
781 }
782 break;
783 }
784 return result;
785 }
787 static void vlsapic_write_xtp(struct vcpu *v, uint8_t val)
788 {
789 struct viosapic * viosapic;
790 struct vcpu *lvcpu, *vcpu;
791 viosapic = vcpu_viosapic(v);
793 spin_lock(&viosapic->lock);
794 lvcpu = viosapic->lowest_vcpu;
795 VLSAPIC_XTP(v) = val;
797 for_each_vcpu(v->domain, vcpu) {
798 if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu))
799 lvcpu = vcpu;
800 }
802 if (VLSAPIC_XTP(lvcpu) & 0x80) // Disabled
803 lvcpu = NULL;
805 viosapic->lowest_vcpu = lvcpu;
806 spin_unlock(&viosapic->lock);
807 }
809 void vlsapic_write(struct vcpu *v,
810 unsigned long addr,
811 unsigned long length,
812 unsigned long val)
813 {
814 addr &= (PIB_SIZE - 1);
816 switch (addr) {
817 case PIB_OFST_INTA:
818 panic_domain(NULL, "Undefined write on PIB INTA\n");
819 break;
820 case PIB_OFST_XTP:
821 if (length == 1) {
822 // printk("write xtp %lx\n", val);
823 vlsapic_write_xtp(v, val);
824 } else {
825 panic_domain(NULL, "Undefined write on PIB XTP\n");
826 }
827 break;
828 default:
829 if (PIB_LOW_HALF(addr)) { // lower half
830 if (length != 8)
831 panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n",
832 length);
833 else
834 vlsapic_write_ipi(v, addr, val);
835 }
836 else { // upper half
837 // printk("IPI-UHF write %lx\n",addr);
838 panic_domain(NULL, "No support for SM-VP yet\n");
839 }
840 break;
841 }
842 }
844 static int vlsapic_save(struct domain *d, hvm_domain_context_t *h)
845 {
846 struct vcpu *v;
848 for_each_vcpu(d, v) {
849 struct hvm_hw_ia64_vlsapic vlsapic;
850 int i;
852 if (test_bit(_VPF_down, &v->pause_flags))
853 continue;
855 memset(&vlsapic, 0, sizeof(vlsapic));
856 for (i = 0; i < 4; i++)
857 vlsapic.insvc[i] = VLSAPIC_INSVC(v,i);
859 vlsapic.vhpi = VCPU(v, vhpi);
860 vlsapic.xtp = VLSAPIC_XTP(v);
861 vlsapic.pal_init_pending = v->arch.arch_vmx.pal_init_pending;
863 if (hvm_save_entry(VLSAPIC, v->vcpu_id, h, &vlsapic))
864 return -EINVAL;
865 }
867 return 0;
868 }
870 static int vlsapic_load(struct domain *d, hvm_domain_context_t *h)
871 {
872 uint16_t vcpuid;
873 struct vcpu *v;
874 struct hvm_hw_ia64_vlsapic vlsapic;
875 int i;
877 vcpuid = hvm_load_instance(h);
878 if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
879 gdprintk(XENLOG_ERR,
880 "%s: domain has no vlsapic %u\n", __func__, vcpuid);
881 return -EINVAL;
882 }
884 if (hvm_load_entry(VLSAPIC, h, &vlsapic) != 0)
885 return -EINVAL;
887 for (i = 0; i < 4; i++)
888 VLSAPIC_INSVC(v,i) = vlsapic.insvc[i];
890 VCPU(v, vhpi) = vlsapic.vhpi;
891 VLSAPIC_XTP(v) = vlsapic.xtp;
892 v->arch.arch_vmx.pal_init_pending = vlsapic.pal_init_pending;
893 v->arch.irq_new_pending = 1; /* to force checking irq */
895 return 0;
896 }
898 HVM_REGISTER_SAVE_RESTORE(VLSAPIC, vlsapic_save, vlsapic_load,
899 1, HVMSR_PER_VCPU);
901 static int vtime_save(struct domain *d, hvm_domain_context_t *h)
902 {
903 struct vcpu *v;
905 for_each_vcpu(d, v) {
906 vtime_t *vtm = &VMX(v, vtm);
907 struct hvm_hw_ia64_vtime vtime;
909 if (test_bit(_VPF_down, &v->pause_flags))
910 continue;
912 stop_timer(&vtm->vtm_timer);//XXX should wait for callback not running.
914 memset(&vtime, 0, sizeof(vtime));
915 vtime.itc = now_itc(vtm);
916 vtime.itm = VCPU(v, itm);
917 vtime.last_itc = vtm->last_itc;
918 vtime.pending = vtm->pending;
920 vtm_set_itm(v, vtime.itm);// this may start timer.
922 if (hvm_save_entry(VTIME, v->vcpu_id, h, &vtime))
923 return -EINVAL;
924 }
926 return 0;
927 }
929 static int vtime_load(struct domain *d, hvm_domain_context_t *h)
930 {
931 uint16_t vcpuid;
932 struct vcpu *v;
933 struct hvm_hw_ia64_vtime vtime;
934 vtime_t *vtm;
936 vcpuid = hvm_load_instance(h);
937 if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
938 gdprintk(XENLOG_ERR,
939 "%s: domain has no vtime %u\n", __func__, vcpuid);
940 return -EINVAL;
941 }
943 if (hvm_load_entry(VTIME, h, &vtime) != 0)
944 return -EINVAL;
946 vtm = &VMX(v, vtm);
947 stop_timer(&vtm->vtm_timer); //XXX should wait for callback not running.
949 vtm->last_itc = vtime.last_itc;
950 vtm->pending = vtime.pending;
952 migrate_timer(&vtm->vtm_timer, v->processor);
953 vtm_set_itm(v, vtime.itm);
954 vtm_set_itc(v, vtime.itc); // This may start timer.
956 if (test_and_clear_bit(_VPF_down, &v->pause_flags))
957 vcpu_wake(v);
959 return 0;
960 }
962 HVM_REGISTER_SAVE_RESTORE(VTIME, vtime_save, vtime_load, 1, HVMSR_PER_VCPU);