ia64/xen-unstable

view xen/arch/ia64/vmx/vlsapic.c @ 13958:b0aeca575dfb

[IA64] vcpu hot-plug/remove for VTi

Return to SAL added for VTi by adding a new SAL (OEM defined) function.
Using this patch I was able to hot-add/hot-remove under linux (in fact
virtualized hot).

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author awilliam@xenbuild2.aw
date Fri Feb 16 15:49:05 2007 -0700 (2007-02-16)
parents b4df7de0cbf7
children 779d21cf58e7
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vlsapic.c: virtual lsapic model including ITC timer.
5 * Copyright (c) 2005, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #include <linux/sched.h>
24 #include <public/xen.h>
25 #include <asm/ia64_int.h>
26 #include <asm/vcpu.h>
27 #include <asm/regionreg.h>
28 #include <asm/processor.h>
29 #include <asm/delay.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/regs.h>
32 #include <asm/gcc_intrin.h>
33 #include <asm/vmx_mm_def.h>
34 #include <asm/vmx.h>
35 #include <asm/hw_irq.h>
36 #include <asm/vmx_pal_vsa.h>
37 #include <asm/kregs.h>
38 #include <asm/vmx_platform.h>
39 #include <asm/viosapic.h>
40 #include <asm/vlsapic.h>
41 #include <asm/linux/jiffies.h>
42 #include <xen/domain.h>
44 #ifdef IPI_DEBUG
45 #define IPI_DPRINTK(x...) printk(x)
46 #else
47 #define IPI_DPRINTK(x...)
48 #endif
50 //u64 fire_itc;
51 //u64 fire_itc2;
52 //u64 fire_itm;
53 //u64 fire_itm2;
54 /*
55 * Update the checked last_itc.
56 */
58 extern void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
59 u64 vector, REGS *regs);
60 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
61 {
62 vtm->last_itc = cur_itc;
63 }
65 /*
66 * Next for vLSapic
67 */
69 #define NMI_VECTOR 2
70 #define ExtINT_VECTOR 0
71 #define NULL_VECTOR -1
73 static void update_vhpi(VCPU *vcpu, int vec)
74 {
75 u64 vhpi;
77 if (vec == NULL_VECTOR)
78 vhpi = 0;
79 else if (vec == NMI_VECTOR)
80 vhpi = 32;
81 else if (vec == ExtINT_VECTOR)
82 vhpi = 16;
83 else
84 vhpi = vec >> 4;
86 VCPU(vcpu,vhpi) = vhpi;
87 // TODO: Add support for XENO
88 if (VCPU(vcpu,vac).a_int)
89 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
90 (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
91 }
94 /*
95 * May come from virtualization fault or
96 * nested host interrupt.
97 */
98 static int vmx_vcpu_unpend_interrupt(VCPU *vcpu, uint8_t vector)
99 {
100 uint64_t spsr;
101 int ret;
103 if (vector & ~0xff) {
104 dprintk(XENLOG_WARNING, "vmx_vcpu_pend_interrupt: bad vector\n");
105 return -1;
106 }
108 local_irq_save(spsr);
109 ret = test_and_clear_bit(vector, &VCPU(vcpu, irr[0]));
110 local_irq_restore(spsr);
112 if (ret) {
113 vcpu->arch.irq_new_pending = 1;
114 wmb();
115 }
117 return ret;
118 }
120 /*
121 * ITC value saw in guest (host+offset+drift).
122 */
123 static uint64_t now_itc(vtime_t *vtm)
124 {
125 uint64_t guest_itc = vtm->vtm_offset + ia64_get_itc();
127 if (guest_itc >= vtm->last_itc)
128 return guest_itc;
129 else
130 /* guest ITC went backward due to LP switch */
131 return vtm->last_itc;
132 }
134 /*
135 * Interval time components reset.
136 */
137 static void vtm_reset(VCPU *vcpu)
138 {
139 int i;
140 u64 vtm_offset;
141 VCPU *v;
142 struct domain *d = vcpu->domain;
143 vtime_t *vtm = &VMX(vcpu, vtm);
145 if (vcpu->vcpu_id == 0) {
146 vtm_offset = 0UL - ia64_get_itc();
147 for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
148 if ((v = d->vcpu[i]) != NULL) {
149 VMX(v, vtm).vtm_offset = vtm_offset;
150 VMX(v, vtm).last_itc = 0;
151 }
152 }
153 }
154 vtm->vtm_local_drift = 0;
155 VCPU(vcpu, itm) = 0;
156 VCPU(vcpu, itv) = 0x10000;
157 vtm->last_itc = 0;
158 }
160 /* callback function when vtm_timer expires */
161 static void vtm_timer_fn(void *data)
162 {
163 VCPU *vcpu = data;
164 vtime_t *vtm = &VMX(vcpu, vtm);
165 u64 vitv;
167 vitv = VCPU(vcpu, itv);
168 if (!ITV_IRQ_MASK(vitv)) {
169 vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(vitv));
170 vcpu_unblock(vcpu);
171 } else
172 vtm->pending = 1;
174 /*
175 * "+ 1" is for fixing oops message at timer_interrupt() on VTI guest.
176 * If oops checking condition changed to timer_after_eq() on VTI guest,
177 * this parameter should be erased.
178 */
179 update_last_itc(vtm, VCPU(vcpu, itm) + 1); // update vITC
180 }
182 void vtm_init(VCPU *vcpu)
183 {
184 vtime_t *vtm;
185 uint64_t itc_freq;
187 vtm = &VMX(vcpu, vtm);
189 itc_freq = local_cpu_data->itc_freq;
190 vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
191 vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
192 init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, vcpu->processor);
193 vtm_reset(vcpu);
194 }
196 /*
197 * Action when guest read ITC.
198 */
199 uint64_t vtm_get_itc(VCPU *vcpu)
200 {
201 uint64_t guest_itc;
202 vtime_t *vtm = &VMX(vcpu, vtm);
204 guest_itc = now_itc(vtm);
205 return guest_itc;
206 }
209 void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
210 {
211 int i;
212 uint64_t vitm, vtm_offset;
213 vtime_t *vtm;
214 VCPU *v;
215 struct domain *d = vcpu->domain;
217 vitm = VCPU(vcpu, itm);
218 vtm = &VMX(vcpu, vtm);
219 if (vcpu->vcpu_id == 0) {
220 vtm_offset = new_itc - ia64_get_itc();
221 for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
222 if ((v = d->vcpu[i]) != NULL) {
223 VMX(v, vtm).vtm_offset = vtm_offset;
224 VMX(v, vtm).last_itc = 0;
225 }
226 }
227 }
228 vtm->last_itc = 0;
229 if (vitm <= new_itc)
230 stop_timer(&vtm->vtm_timer);
231 else
232 vtm_set_itm(vcpu, vitm);
233 }
236 #define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */
237 extern u64 cycle_to_ns(u64 cyle);
240 void vtm_set_itm(VCPU *vcpu, uint64_t val)
241 {
242 vtime_t *vtm;
243 uint64_t vitv, cur_itc, expires;
245 vitv = VCPU(vcpu, itv);
246 vtm = &VMX(vcpu, vtm);
247 VCPU(vcpu, itm) = val;
248 if (val > vtm->last_itc) {
249 cur_itc = now_itc(vtm);
250 if (time_before(val, cur_itc))
251 val = cur_itc;
252 expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP;
253 vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
254 set_timer(&vtm->vtm_timer, expires);
255 }else{
256 stop_timer(&vtm->vtm_timer);
257 }
258 }
261 void vtm_set_itv(VCPU *vcpu, uint64_t val)
262 {
263 vtime_t *vtm = &VMX(vcpu, vtm);
265 VCPU(vcpu, itv) = val;
267 if (!ITV_IRQ_MASK(val) && vtm->pending) {
268 vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
269 vtm->pending = 0;
270 }
271 }
274 void vlsapic_reset(VCPU *vcpu)
275 {
276 int i;
278 VCPU(vcpu, lid) = VCPU_LID(vcpu);
279 VCPU(vcpu, ivr) = 0;
280 VCPU(vcpu,tpr) = 0x10000;
281 VCPU(vcpu, eoi) = 0;
282 VCPU(vcpu, irr[0]) = 0;
283 VCPU(vcpu, irr[1]) = 0;
284 VCPU(vcpu, irr[2]) = 0;
285 VCPU(vcpu, irr[3]) = 0;
286 VCPU(vcpu, pmv) = 0x10000;
287 VCPU(vcpu, cmcv) = 0x10000;
288 VCPU(vcpu, lrr0) = 0x10000; // default reset value?
289 VCPU(vcpu, lrr1) = 0x10000; // default reset value?
290 update_vhpi(vcpu, NULL_VECTOR);
291 VLSAPIC_XTP(vcpu) = 0x80; // disabled
292 for ( i=0; i<4; i++) {
293 VLSAPIC_INSVC(vcpu,i) = 0;
294 }
296 dprintk(XENLOG_INFO, "VLSAPIC inservice base=%p\n", &VLSAPIC_INSVC(vcpu,0) );
297 }
299 /*
300 * Find highest signaled bits in 4 words (long).
301 *
302 * return 0-255: highest bits.
303 * -1 : Not found.
304 */
305 static __inline__ int highest_bits(uint64_t *dat)
306 {
307 uint64_t bits, bitnum;
308 int i;
310 /* loop for all 256 bits */
311 for ( i=3; i >= 0 ; i -- ) {
312 bits = dat[i];
313 if ( bits ) {
314 bitnum = ia64_fls(bits);
315 return i*64+bitnum;
316 }
317 }
318 return NULL_VECTOR;
319 }
321 /*
322 * Return 0-255 for pending irq.
323 * NULL_VECTOR: when no pending.
324 */
325 static int highest_pending_irq(VCPU *vcpu)
326 {
327 if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
328 if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
329 return highest_bits(&VCPU(vcpu, irr[0]));
330 }
332 static int highest_inservice_irq(VCPU *vcpu)
333 {
334 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
335 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
336 return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
337 }
339 /*
340 * The pending irq is higher than the inservice one.
341 *
342 */
343 static int is_higher_irq(int pending, int inservice)
344 {
345 return ( (pending > inservice) ||
346 ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
347 }
349 static int is_higher_class(int pending, int mic)
350 {
351 return ( (pending >> 4) > mic );
352 }
354 #define IRQ_NO_MASKED 0
355 #define IRQ_MASKED_BY_VTPR 1
356 #define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
358 /* See Table 5-8 in SDM vol2 for the definition */
359 static int
360 _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
361 {
362 tpr_t vtpr;
364 vtpr.val = VCPU(vcpu, tpr);
366 if ( h_inservice == NMI_VECTOR ) {
367 return IRQ_MASKED_BY_INSVC;
368 }
369 if ( h_pending == NMI_VECTOR ) {
370 // Non Maskable Interrupt
371 return IRQ_NO_MASKED;
372 }
373 if ( h_inservice == ExtINT_VECTOR ) {
374 return IRQ_MASKED_BY_INSVC;
375 }
377 if ( h_pending == ExtINT_VECTOR ) {
378 if ( vtpr.mmi ) {
379 // mask all external IRQ
380 return IRQ_MASKED_BY_VTPR;
381 }
382 else {
383 return IRQ_NO_MASKED;
384 }
385 }
387 if ( is_higher_irq(h_pending, h_inservice) ) {
388 if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) {
389 return IRQ_NO_MASKED;
390 }
391 else {
392 return IRQ_MASKED_BY_VTPR;
393 }
394 }
395 else {
396 return IRQ_MASKED_BY_INSVC;
397 }
398 }
400 static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
401 {
402 int mask;
404 mask = _xirq_masked(vcpu, h_pending, h_inservice);
405 return mask;
406 }
409 /*
410 * May come from virtualization fault or
411 * nested host interrupt.
412 */
413 int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
414 {
415 uint64_t spsr;
416 int ret;
418 if (vector & ~0xff) {
419 gdprintk(XENLOG_INFO, "vmx_vcpu_pend_interrupt: bad vector\n");
420 return -1;
421 }
422 local_irq_save(spsr);
423 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
424 local_irq_restore(spsr);
426 if (!ret) {
427 vcpu->arch.irq_new_pending = 1;
428 wmb();
429 }
431 return ret;
432 }
435 /*
436 * Add batch of pending interrupt.
437 * The interrupt source is contained in pend_irr[0-3] with
438 * each bits stand for one interrupt.
439 */
440 void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, u64 *pend_irr)
441 {
442 uint64_t spsr;
443 int i;
445 local_irq_save(spsr);
446 for (i=0 ; i<4; i++ ) {
447 VCPU(vcpu,irr[i]) |= pend_irr[i];
448 }
449 local_irq_restore(spsr);
450 vcpu->arch.irq_new_pending = 1;
451 wmb();
452 }
454 /*
455 * If the new pending interrupt is enabled and not masked, we directly inject
456 * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when
457 * the interrupt becomes unmasked, it gets injected.
458 * RETURN:
459 * the highest unmasked interrupt.
460 *
461 * Optimization: We defer setting the VHPI until the EOI time, if a higher
462 * priority interrupt is in-service. The idea is to reduce the
463 * number of unnecessary calls to inject_vhpi.
464 */
465 int vmx_check_pending_irq(VCPU *vcpu)
466 {
467 int mask, h_pending, h_inservice;
468 uint64_t isr;
469 IA64_PSR vpsr;
470 REGS *regs=vcpu_regs(vcpu);
471 h_pending = highest_pending_irq(vcpu);
472 if ( h_pending == NULL_VECTOR ) {
473 update_vhpi(vcpu, NULL_VECTOR);
474 h_pending = SPURIOUS_VECTOR;
475 goto chk_irq_exit;
476 }
477 h_inservice = highest_inservice_irq(vcpu);
479 vpsr.val = VCPU(vcpu, vpsr);
480 mask = irq_masked(vcpu, h_pending, h_inservice);
481 if ( vpsr.i && IRQ_NO_MASKED == mask ) {
482 isr = vpsr.val & IA64_PSR_RI;
483 if ( !vpsr.ic )
484 panic_domain(regs,"Interrupt when IC=0\n");
485 update_vhpi(vcpu, h_pending);
486 vmx_reflect_interruption(0, isr, 0, 12, regs); // EXT IRQ
487 } else if (mask == IRQ_MASKED_BY_INSVC) {
488 if (VCPU(vcpu, vhpi))
489 update_vhpi(vcpu, NULL_VECTOR);
490 }
491 else {
492 // masked by vpsr.i or vtpr.
493 update_vhpi(vcpu,h_pending);
494 }
496 chk_irq_exit:
497 return h_pending;
498 }
500 /*
501 * Only coming from virtualization fault.
502 */
503 void guest_write_eoi(VCPU *vcpu)
504 {
505 int vec;
507 vec = highest_inservice_irq(vcpu);
508 if ( vec == NULL_VECTOR )
509 panic_domain(vcpu_regs(vcpu), "Wrong vector to EOI\n");
510 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
511 VCPU(vcpu, eoi)=0; // overwrite the data
512 vcpu->arch.irq_new_pending=1;
513 wmb();
514 }
516 int is_unmasked_irq(VCPU *vcpu)
517 {
518 int h_pending, h_inservice;
520 h_pending = highest_pending_irq(vcpu);
521 h_inservice = highest_inservice_irq(vcpu);
522 if ( h_pending == NULL_VECTOR ||
523 irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) {
524 return 0;
525 }
526 else
527 return 1;
528 }
530 uint64_t guest_read_vivr(VCPU *vcpu)
531 {
532 int vec, h_inservice, mask;
533 vec = highest_pending_irq(vcpu);
534 h_inservice = highest_inservice_irq(vcpu);
535 mask = irq_masked(vcpu, vec, h_inservice);
536 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
537 if (VCPU(vcpu, vhpi))
538 update_vhpi(vcpu, NULL_VECTOR);
539 return IA64_SPURIOUS_INT_VECTOR;
540 }
541 if (mask == IRQ_MASKED_BY_VTPR) {
542 update_vhpi(vcpu, vec);
543 return IA64_SPURIOUS_INT_VECTOR;
544 }
545 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
546 vmx_vcpu_unpend_interrupt(vcpu, vec);
547 return (uint64_t)vec;
548 }
550 static void generate_exirq(VCPU *vcpu)
551 {
552 IA64_PSR vpsr;
553 uint64_t isr;
554 REGS *regs=vcpu_regs(vcpu);
555 vpsr.val = VCPU(vcpu, vpsr);
556 isr = vpsr.val & IA64_PSR_RI;
557 if ( !vpsr.ic )
558 panic_domain(regs,"Interrupt when IC=0\n");
559 vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
560 }
562 void vhpi_detection(VCPU *vcpu)
563 {
564 uint64_t threshold,vhpi;
565 tpr_t vtpr;
566 IA64_PSR vpsr;
567 vpsr.val = VCPU(vcpu, vpsr);
568 vtpr.val = VCPU(vcpu, tpr);
570 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
571 vhpi = VCPU(vcpu,vhpi);
572 if ( vhpi > threshold ) {
573 // interrupt actived
574 generate_exirq (vcpu);
575 }
576 }
578 void vmx_vexirq(VCPU *vcpu)
579 {
580 generate_exirq (vcpu);
581 }
583 struct vcpu * vlsapic_lid_to_vcpu(struct domain *d, uint16_t dest)
584 {
585 struct vcpu * v;
586 for_each_vcpu ( d, v ) {
587 if ( (v->arch.privregs->lid >> 16) == dest )
588 return v;
589 }
590 return NULL;
591 }
594 /*
595 * To inject INIT to guest, we must set the PAL_INIT entry
596 * and set psr to switch to physical mode
597 */
598 #define PAL_INIT_ENTRY 0x80000000ffffffa0
599 #define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
600 IA64_PSR_IC | IA64_PSR_RI)
602 static void vmx_inject_guest_pal_init(VCPU *vcpu)
603 {
604 REGS *regs = vcpu_regs(vcpu);
605 uint64_t psr = vmx_vcpu_get_psr(vcpu);
607 regs->cr_iip = PAL_INIT_ENTRY;
609 psr = psr & ~PSR_SET_BITS;
610 vmx_vcpu_set_psr(vcpu, psr);
611 }
614 /*
615 * Deliver IPI message. (Only U-VP is supported now)
616 * offset: address offset to IPI space.
617 * value: deliver value.
618 */
619 static void vlsapic_deliver_ipi(VCPU *vcpu, uint64_t dm, uint64_t vector)
620 {
621 IPI_DPRINTK("deliver_ipi %lx %lx\n", dm, vector);
623 switch (dm) {
624 case SAPIC_FIXED: // INT
625 vmx_vcpu_pend_interrupt(vcpu, vector);
626 break;
627 case SAPIC_PMI:
628 // TODO -- inject guest PMI
629 panic_domain(NULL, "Inject guest PMI!\n");
630 break;
631 case SAPIC_NMI:
632 vmx_vcpu_pend_interrupt(vcpu, 2);
633 break;
634 case SAPIC_INIT:
635 vmx_inject_guest_pal_init(vcpu);
636 break;
637 case SAPIC_EXTINT: // ExtINT
638 vmx_vcpu_pend_interrupt(vcpu, 0);
639 break;
640 default:
641 panic_domain(NULL, "Deliver reserved IPI!\n");
642 break;
643 }
644 }
646 /*
647 * TODO: Use hash table for the lookup.
648 */
649 static inline VCPU *lid_to_vcpu(struct domain *d, uint8_t id, uint8_t eid)
650 {
651 VCPU *v;
652 LID lid;
654 for_each_vcpu(d, v) {
655 lid.val = VCPU_LID(v);
656 if (lid.id == id && lid.eid == eid)
657 return v;
658 }
659 return NULL;
660 }
663 /*
664 * execute write IPI op.
665 */
666 static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
667 {
668 VCPU *targ;
669 struct domain *d = vcpu->domain;
671 targ = lid_to_vcpu(vcpu->domain, ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
672 if (targ == NULL)
673 panic_domain(NULL, "Unknown IPI cpu\n");
675 if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags) ||
676 test_bit(_VCPUF_down, &targ->vcpu_flags)) {
678 struct pt_regs *targ_regs = vcpu_regs(targ);
679 struct vcpu_guest_context c;
681 memset (&c, 0, sizeof(c));
683 if (arch_set_info_guest(targ, &c) != 0) {
684 printk("arch_boot_vcpu: failure\n");
685 return;
686 }
687 /* First or next rendez-vous: set registers. */
688 vcpu_init_regs(targ);
689 targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
690 targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
692 if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
693 vcpu_wake(targ);
694 printk("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
695 targ->vcpu_id, targ_regs->cr_iip);
696 } else {
697 printk("arch_boot_vcpu: huh, already awake!");
698 }
699 } else {
700 int running = test_bit(_VCPUF_running, &targ->vcpu_flags);
701 vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm,
702 ((ipi_d_t)value).vector);
703 vcpu_unblock(targ);
704 if (running)
705 smp_send_event_check_cpu(targ->processor);
706 }
707 return;
708 }
711 unsigned long vlsapic_read(struct vcpu *v,
712 unsigned long addr,
713 unsigned long length)
714 {
715 uint64_t result = 0;
717 addr &= (PIB_SIZE - 1);
719 switch (addr) {
720 case PIB_OFST_INTA:
721 if (length == 1) // 1 byte load
722 ; // There is no i8259, there is no INTA access
723 else
724 panic_domain(NULL,"Undefined read on PIB INTA\n");
726 break;
727 case PIB_OFST_XTP:
728 if (length == 1) {
729 result = VLSAPIC_XTP(v);
730 // printk("read xtp %lx\n", result);
731 } else {
732 panic_domain(NULL, "Undefined read on PIB XTP\n");
733 }
734 break;
735 default:
736 if (PIB_LOW_HALF(addr)) { // lower half
737 if (length != 8 )
738 panic_domain(NULL, "Undefined IPI-LHF read!\n");
739 else
740 IPI_DPRINTK("IPI-LHF read %lx\n", pib_off);
741 } else { // upper half
742 IPI_DPRINTK("IPI-UHF read %lx\n", addr);
743 }
744 break;
745 }
746 return result;
747 }
749 static void vlsapic_write_xtp(struct vcpu *v, uint8_t val)
750 {
751 struct viosapic * viosapic;
752 struct vcpu *lvcpu, *vcpu;
753 viosapic = vcpu_viosapic(v);
754 lvcpu = viosapic->lowest_vcpu;
755 VLSAPIC_XTP(v) = val;
757 for_each_vcpu(v->domain, vcpu) {
758 if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu))
759 lvcpu = vcpu;
760 }
762 if (VLSAPIC_XTP(lvcpu) & 0x80) // Disabled
763 lvcpu = NULL;
765 viosapic->lowest_vcpu = lvcpu;
766 }
768 void vlsapic_write(struct vcpu *v,
769 unsigned long addr,
770 unsigned long length,
771 unsigned long val)
772 {
773 addr &= (PIB_SIZE - 1);
775 switch (addr) {
776 case PIB_OFST_INTA:
777 panic_domain(NULL, "Undefined write on PIB INTA\n");
778 break;
779 case PIB_OFST_XTP:
780 if (length == 1) {
781 // printk("write xtp %lx\n", val);
782 vlsapic_write_xtp(v, val);
783 } else {
784 panic_domain(NULL, "Undefined write on PIB XTP\n");
785 }
786 break;
787 default:
788 if (PIB_LOW_HALF(addr)) { // lower half
789 if (length != 8)
790 panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n",
791 length);
792 else
793 vlsapic_write_ipi(v, addr, val);
794 }
795 else { // upper half
796 // printk("IPI-UHF write %lx\n",addr);
797 panic_domain(NULL, "No support for SM-VP yet\n");
798 }
799 break;
800 }
801 }