ia64/xen-unstable

changeset 12798:3bd97b4fe77d

[IA64] Implement irq redirection of IOSAPIC

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild2.aw
date Thu Dec 07 05:34:07 2006 -0700 (2006-12-07)
parents 4f1a3ae07dbc
children 74de984434c9
files xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/viosapic.c xen/arch/ia64/vmx/vlsapic.c xen/include/asm-ia64/viosapic.h xen/include/asm-ia64/vlsapic.h xen/include/asm-ia64/vmx_platform.h xen/include/asm-ia64/vmx_vpd.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Thu Dec 07 04:15:54 2006 -0700
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Thu Dec 07 05:34:07 2006 -0700
     1.3 @@ -23,7 +23,6 @@
     1.4  
     1.5  #include <linux/sched.h>
     1.6  #include <xen/mm.h>
     1.7 -#include <asm/tlb.h>
     1.8  #include <asm/vmx_mm_def.h>
     1.9  #include <asm/gcc_intrin.h>
    1.10  #include <linux/interrupt.h>
    1.11 @@ -37,22 +36,7 @@
    1.12  #include <linux/event.h>
    1.13  #include <xen/domain.h>
    1.14  #include <asm/viosapic.h>
    1.15 -
    1.16 -/*
    1.17 -struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
    1.18 -{
    1.19 -    int     i;
    1.20 -    for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
    1.21 -        if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
    1.22 -            return &mio_base[i];
    1.23 -    }
    1.24 -    return NULL;
    1.25 -}
    1.26 -*/
    1.27 -
    1.28 -#define	PIB_LOW_HALF(ofst)	!(ofst&(1<<20))
    1.29 -#define PIB_OFST_INTA           0x1E0000
    1.30 -#define PIB_OFST_XTP            0x1E0008
    1.31 +#include <asm/vlsapic.h>
    1.32  
    1.33  #define HVM_BUFFERED_IO_RANGE_NR 1
    1.34  
    1.35 @@ -118,87 +102,6 @@ int hvm_buffered_io_intercept(ioreq_t *p
    1.36      return 1;
    1.37  }
    1.38  
    1.39 -static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
    1.40 -
    1.41 -static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
    1.42 -{
    1.43 -    switch (pib_off) {
    1.44 -    case PIB_OFST_INTA:
    1.45 -        panic_domain(NULL,"Undefined write on PIB INTA\n");
    1.46 -        break;
    1.47 -    case PIB_OFST_XTP:
    1.48 -        if ( s == 1 && ma == 4 /* UC */) {
    1.49 -            vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
    1.50 -        }
    1.51 -        else {
    1.52 -            panic_domain(NULL,"Undefined write on PIB XTP\n");
    1.53 -        }
    1.54 -        break;
    1.55 -    default:
    1.56 -        if ( PIB_LOW_HALF(pib_off) ) {   // lower half
    1.57 -            if ( s != 8 || ma != 0x4 /* UC */ ) {
    1.58 -                panic_domain
    1.59 -		  (NULL,"Undefined IPI-LHF write with s %ld, ma %d!\n", s, ma);
    1.60 -            }
    1.61 -            else {
    1.62 -                write_ipi(vcpu, pib_off, *(uint64_t *)src);
    1.63 -                // TODO for SM-VP
    1.64 -            }
    1.65 -        }
    1.66 -        else {      // upper half
    1.67 -            printk("IPI-UHF write %lx\n",pib_off);
    1.68 -            panic_domain(NULL,"Not support yet for SM-VP\n");
    1.69 -        }
    1.70 -        break;
    1.71 -    }
    1.72 -}
    1.73 -
    1.74 -static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
    1.75 -{
    1.76 -    switch (pib_off) {
    1.77 -    case PIB_OFST_INTA:
    1.78 -        // todo --- emit on processor system bus.
    1.79 -        if ( s == 1 && ma == 4) { // 1 byte load
    1.80 -            // TODO: INTA read from IOSAPIC
    1.81 -        }
    1.82 -        else {
    1.83 -            panic_domain(NULL,"Undefined read on PIB INTA\n");
    1.84 -        }
    1.85 -        break;
    1.86 -    case PIB_OFST_XTP:
    1.87 -        if ( s == 1 && ma == 4) {
    1.88 -            *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
    1.89 -        }
    1.90 -        else {
    1.91 -            panic_domain(NULL,"Undefined read on PIB XTP\n");
    1.92 -        }
    1.93 -        break;
    1.94 -    default:
    1.95 -        if ( PIB_LOW_HALF(pib_off) ) {   // lower half
    1.96 -            if ( s != 8 || ma != 4 ) {
    1.97 -                panic_domain(NULL,"Undefined IPI-LHF read!\n");
    1.98 -            }
    1.99 -            else {
   1.100 -#ifdef  IPI_DEBUG
   1.101 -                printk("IPI-LHF read %lx\n",pib_off);
   1.102 -#endif
   1.103 -                *(uint64_t *)dest = 0;  // TODO for SM-VP
   1.104 -            }
   1.105 -        }
   1.106 -        else {      // upper half
   1.107 -            if ( s != 1 || ma != 4 ) {
   1.108 -                panic_domain(NULL,"Undefined PIB-UHF read!\n");
   1.109 -            }
   1.110 -            else {
   1.111 -#ifdef  IPI_DEBUG
   1.112 -                printk("IPI-UHF read %lx\n",pib_off);
   1.113 -#endif
   1.114 -                *(uint8_t *)dest = 0;   // TODO for SM-VP
   1.115 -            }
   1.116 -        }
   1.117 -        break;
   1.118 -    }
   1.119 -}
   1.120  
   1.121  static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
   1.122  {
   1.123 @@ -283,11 +186,14 @@ static void mmio_access(VCPU *vcpu, u64 
   1.124  
   1.125      perfc_incra(vmx_mmio_access, iot >> 56);
   1.126      switch (iot) {
   1.127 -    case GPFN_PIB:
   1.128 -        if(!dir)
   1.129 -            pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
   1.130 +    case GPFN_PIB:       
   1.131 +        if (ma != 4)
   1.132 +            panic_domain(NULL, "Access PIB not with UC attribute\n");
   1.133 +
   1.134 +        if (!dir)
   1.135 +            vlsapic_write(vcpu, src_pa, s, *dest);
   1.136          else
   1.137 -            pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
   1.138 +            *dest = vlsapic_read(vcpu, src_pa, s);
   1.139          break;
   1.140      case GPFN_GFW:
   1.141          break;
   1.142 @@ -312,195 +218,6 @@ static void mmio_access(VCPU *vcpu, u64 
   1.143  }
   1.144  
   1.145  /*
   1.146 - * Read or write data in guest virtual address mode.
   1.147 - */
   1.148 -/*
   1.149 -void
   1.150 -memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
   1.151 -{
   1.152 -    uint64_t pa;
   1.153 -
   1.154 -    if (!vtlb->nomap)
   1.155 -        panic("Normal memory write shouldn't go to this point!");
   1.156 -    pa = PPN_2_PA(vtlb->ppn);
   1.157 -    pa += POFFSET((u64)dest, vtlb->ps);
   1.158 -    mmio_write (vcpu, src, pa, s, vtlb->ma);
   1.159 -}
   1.160 -
   1.161 -
   1.162 -void
   1.163 -memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
   1.164 -{
   1.165 -    uint64_t pa = (uint64_t)dest;
   1.166 -    int    ma;
   1.167 -
   1.168 -    if ( pa & (1UL <<63) ) {
   1.169 -        // UC
   1.170 -        ma = 4;
   1.171 -        pa <<=1;
   1.172 -        pa >>=1;
   1.173 -    }
   1.174 -    else {
   1.175 -        // WBL
   1.176 -        ma = 0;     // using WB for WBL
   1.177 -    }
   1.178 -    mmio_write (vcpu, src, pa, s, ma);
   1.179 -}
   1.180 -
   1.181 -void
   1.182 -memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
   1.183 -{
   1.184 -    uint64_t pa;
   1.185 -
   1.186 -    if (!vtlb->nomap)
   1.187 -        panic_domain(NULL,"Normal memory write shouldn't go to this point!");
   1.188 -    pa = PPN_2_PA(vtlb->ppn);
   1.189 -    pa += POFFSET((u64)src, vtlb->ps);
   1.190 -
   1.191 -    mmio_read(vcpu, pa, dest, s, vtlb->ma);
   1.192 -}
   1.193 -
   1.194 -void
   1.195 -memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
   1.196 -{
   1.197 -    uint64_t pa = (uint64_t)src;
   1.198 -    int    ma;
   1.199 -
   1.200 -    if ( pa & (1UL <<63) ) {
   1.201 -        // UC
   1.202 -        ma = 4;
   1.203 -        pa <<=1;
   1.204 -        pa >>=1;
   1.205 -    }
   1.206 -    else {
   1.207 -        // WBL
   1.208 -        ma = 0;     // using WB for WBL
   1.209 -    }
   1.210 -    mmio_read(vcpu, pa, dest, s, ma);
   1.211 -}
   1.212 -*/
   1.213 -
   1.214 -/*
   1.215 - * To inject INIT to guest, we must set the PAL_INIT entry 
   1.216 - * and set psr to switch to physical mode
   1.217 - */
   1.218 -#define PAL_INIT_ENTRY 0x80000000ffffffa0
   1.219 -#define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT |	\
   1.220 -                      IA64_PSR_IC | IA64_PSR_RI)
   1.221 -
   1.222 -static void vmx_inject_guest_pal_init(VCPU *vcpu)
   1.223 -{
   1.224 -    REGS *regs = vcpu_regs(vcpu);
   1.225 -    uint64_t psr = vmx_vcpu_get_psr(vcpu);
   1.226 -
   1.227 -    regs->cr_iip = PAL_INIT_ENTRY;
   1.228 -
   1.229 -    psr = psr & (~PSR_SET_BITS);
   1.230 -    vmx_vcpu_set_psr(vcpu,psr);
   1.231 -}
   1.232 -
   1.233 -/*
   1.234 - * Deliver IPI message. (Only U-VP is supported now)
   1.235 - *  offset: address offset to IPI space.
   1.236 - *  value:  deliver value.
   1.237 - */
   1.238 -static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
   1.239 -{
   1.240 -#ifdef  IPI_DEBUG
   1.241 -  printk ("deliver_ipi %lx %lx\n",dm,vector);
   1.242 -#endif
   1.243 -    switch ( dm ) {
   1.244 -    case 0:     // INT
   1.245 -        vmx_vcpu_pend_interrupt (vcpu, vector);
   1.246 -        break;
   1.247 -    case 2:     // PMI
   1.248 -        // TODO -- inject guest PMI
   1.249 -        panic_domain (NULL, "Inject guest PMI!\n");
   1.250 -        break;
   1.251 -    case 4:     // NMI
   1.252 -        vmx_vcpu_pend_interrupt (vcpu, 2);
   1.253 -        break;
   1.254 -    case 5:     // INIT
   1.255 -        vmx_inject_guest_pal_init(vcpu);
   1.256 -        break;
   1.257 -    case 7:     // ExtINT
   1.258 -        vmx_vcpu_pend_interrupt (vcpu, 0);
   1.259 -        break;
   1.260 -    case 1:
   1.261 -    case 3:
   1.262 -    case 6:
   1.263 -    default:
   1.264 -        panic_domain (NULL, "Deliver reserved IPI!\n");
   1.265 -        break;
   1.266 -    }
   1.267 -}
   1.268 -
   1.269 -/*
   1.270 - * TODO: Use hash table for the lookup.
   1.271 - */
   1.272 -static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
   1.273 -{
   1.274 -    int   i;
   1.275 -    VCPU  *vcpu;
   1.276 -    LID   lid;
   1.277 -    for (i=0; i<MAX_VIRT_CPUS; i++) {
   1.278 -        vcpu = d->vcpu[i];
   1.279 -        if (!vcpu)
   1.280 -            continue;
   1.281 -        lid.val = VCPU_LID(vcpu);
   1.282 -        if ( lid.id == id && lid.eid == eid )
   1.283 -            return vcpu;
   1.284 -    }
   1.285 -    return NULL;
   1.286 -}
   1.287 -
   1.288 -/*
   1.289 - * execute write IPI op.
   1.290 - */
   1.291 -static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
   1.292 -{
   1.293 -    VCPU   *targ;
   1.294 -    struct domain *d=vcpu->domain; 
   1.295 -    targ = lid_2_vcpu(vcpu->domain, 
   1.296 -           ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   1.297 -    if ( targ == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
   1.298 -
   1.299 -    if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
   1.300 -        struct pt_regs *targ_regs = vcpu_regs (targ);
   1.301 -        struct vcpu_guest_context c;
   1.302 -
   1.303 -        memset (&c, 0, sizeof (c));
   1.304 -
   1.305 -        if (arch_set_info_guest (targ, &c) != 0) {
   1.306 -            printk ("arch_boot_vcpu: failure\n");
   1.307 -            return;
   1.308 -        }
   1.309 -        /* First or next rendez-vous: set registers.  */
   1.310 -        vcpu_init_regs (targ);
   1.311 -        targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
   1.312 -        targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
   1.313 -
   1.314 -        if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
   1.315 -            vcpu_wake(targ);
   1.316 -            printk ("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
   1.317 -                    targ->vcpu_id, targ_regs->cr_iip);
   1.318 -        }
   1.319 -        else
   1.320 -            printk ("arch_boot_vcpu: huu, already awaken!");
   1.321 -    }
   1.322 -    else {
   1.323 -        int running = test_bit(_VCPUF_running,&targ->vcpu_flags);
   1.324 -        deliver_ipi (targ, ((ipi_d_t)value).dm, 
   1.325 -                    ((ipi_d_t)value).vector);
   1.326 -        vcpu_unblock(targ);
   1.327 -        if (running)
   1.328 -            smp_send_event_check_cpu(targ->processor);
   1.329 -    }
   1.330 -    return;
   1.331 -}
   1.332 -
   1.333 -
   1.334 -/*
   1.335     dir 1: read 0:write
   1.336      inst_type 0:integer 1:floating point
   1.337   */
     2.1 --- a/xen/arch/ia64/vmx/viosapic.c	Thu Dec 07 04:15:54 2006 -0700
     2.2 +++ b/xen/arch/ia64/vmx/viosapic.c	Thu Dec 07 05:34:07 2006 -0700
     2.3 @@ -31,7 +31,6 @@
     2.4  #include <xen/xmalloc.h>
     2.5  #include <xen/lib.h>
     2.6  #include <xen/errno.h>
     2.7 -#include <xen/sched.h>
     2.8  #include <public/hvm/ioreq.h>
     2.9  #include <asm/vlsapic.h>
    2.10  #include <asm/viosapic.h>
    2.11 @@ -47,11 +46,18 @@ static void viosapic_deliver(struct vios
    2.12  
    2.13      switch ( delivery_mode )
    2.14      {
    2.15 -    // don't support interrupt direct currently
    2.16      case SAPIC_FIXED:
    2.17 +    {
    2.18 +        v = vlsapic_lid_to_vcpu(viosapic_domain(viosapic), dest);
    2.19 +        vlsapic_set_irq(v, vector);
    2.20 +        vcpu_kick(v);
    2.21 +        break;
    2.22 +    }
    2.23      case SAPIC_LOWEST_PRIORITY:
    2.24      {
    2.25          v = vlsapic_lid_to_vcpu(viosapic_domain(viosapic), dest);
    2.26 +        if (viosapic->lowest_vcpu)
    2.27 +            v = viosapic->lowest_vcpu;
    2.28          vlsapic_set_irq(v, vector);
    2.29          vcpu_kick(v);
    2.30          break;
    2.31 @@ -72,10 +78,10 @@ static int iosapic_get_highest_irq(struc
    2.32  {
    2.33      uint64_t irqs = viosapic->irr & ~viosapic->isr ;
    2.34     
    2.35 -    if (irqs >> 32)
    2.36 -        return (fls(irqs >> 32) - 1 + 32);
    2.37 -    else
    2.38 -        return fls(irqs) - 1;
    2.39 +    if (irqs)
    2.40 +        return ia64_fls(irqs);
    2.41 +
    2.42 +    return -1;
    2.43  }
    2.44  
    2.45  
    2.46 @@ -327,5 +333,7 @@ void viosapic_init(struct domain *d)
    2.47  
    2.48      viosapic_reset(viosapic);
    2.49  
    2.50 +    viosapic->lowest_vcpu = NULL;
    2.51 +    
    2.52      viosapic->base_address = VIOSAPIC_DEFAULT_BASE_ADDRESS;
    2.53  }
     3.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Thu Dec 07 04:15:54 2006 -0700
     3.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Thu Dec 07 05:34:07 2006 -0700
     3.3 @@ -25,11 +25,9 @@
     3.4  #include <asm/ia64_int.h>
     3.5  #include <asm/vcpu.h>
     3.6  #include <asm/regionreg.h>
     3.7 -#include <asm/tlb.h>
     3.8  #include <asm/processor.h>
     3.9  #include <asm/delay.h>
    3.10  #include <asm/vmx_vcpu.h>
    3.11 -#include <asm/vmx_vcpu.h>
    3.12  #include <asm/regs.h>
    3.13  #include <asm/gcc_intrin.h>
    3.14  #include <asm/vmx_mm_def.h>
    3.15 @@ -39,7 +37,15 @@
    3.16  #include <asm/kregs.h>
    3.17  #include <asm/vmx_platform.h>
    3.18  #include <asm/viosapic.h>
    3.19 +#include <asm/vlsapic.h>
    3.20  #include <asm/linux/jiffies.h>
    3.21 +#include <xen/domain.h>
    3.22 +
    3.23 +#ifdef IPI_DEBUG
    3.24 +#define IPI_DPRINTK(x...) printk(x)
    3.25 +#else
    3.26 +#define IPI_DPRINTK(x...)
    3.27 +#endif
    3.28  
    3.29  //u64  fire_itc;
    3.30  //u64  fire_itc2;
    3.31 @@ -116,16 +122,13 @@ static int vmx_vcpu_unpend_interrupt(VCP
    3.32   */
    3.33  static uint64_t now_itc(vtime_t *vtm)
    3.34  {
    3.35 -        uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc();
    3.36 -        
    3.37 -        if ( vtm->vtm_local_drift ) {
    3.38 -//          guest_itc -= vtm->vtm_local_drift;
    3.39 -        }       
    3.40 -        if (guest_itc >= vtm->last_itc)
    3.41 -            return guest_itc;
    3.42 -        else
    3.43 -            /* guest ITC backwarded due after LP switch */
    3.44 -            return vtm->last_itc;
    3.45 +    uint64_t guest_itc = vtm->vtm_offset + ia64_get_itc();
    3.46 +
    3.47 +    if (guest_itc >= vtm->last_itc)
    3.48 +        return guest_itc;
    3.49 +    else
    3.50 +        /* guest ITC went backward due to LP switch */
    3.51 +        return vtm->last_itc;
    3.52  }
    3.53  
    3.54  /*
    3.55 @@ -175,7 +178,7 @@ void vtm_init(VCPU *vcpu)
    3.56  {
    3.57      vtime_t     *vtm;
    3.58      uint64_t    itc_freq;
    3.59 -    
    3.60 +
    3.61      vtm = &VMX(vcpu, vtm);
    3.62  
    3.63      itc_freq = local_cpu_data->itc_freq;
    3.64 @@ -263,76 +266,6 @@ void vtm_set_itv(VCPU *vcpu, uint64_t va
    3.65  }
    3.66  
    3.67  
    3.68 -/*
    3.69 - * Update interrupt or hook the vtm timer for fire
    3.70 - * At this point vtm_timer should be removed if itv is masked.
    3.71 - */
    3.72 -/* Interrupt must be disabled at this point */
    3.73 -/*
    3.74 -void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
    3.75 -{
    3.76 -    uint64_t    cur_itc,vitm,vitv;
    3.77 -    uint64_t    expires;
    3.78 -    long        diff_now, diff_last;
    3.79 -    uint64_t    spsr;
    3.80 -    
    3.81 -    vitv = VCPU(vcpu, itv);
    3.82 -    if ( ITV_IRQ_MASK(vitv) ) {
    3.83 -        return;
    3.84 -    }
    3.85 -    
    3.86 -    vitm =VCPU(vcpu, itm);
    3.87 -    local_irq_save(spsr);
    3.88 -    cur_itc =now_itc(vtm);
    3.89 -    diff_last = vtm->last_itc - vitm;
    3.90 -    diff_now = cur_itc - vitm;
    3.91 -
    3.92 -    if ( diff_last >= 0 ) {
    3.93 -        // interrupt already fired.
    3.94 -        stop_timer(&vtm->vtm_timer);
    3.95 -    }
    3.96 -    else if ( diff_now >= 0 ) {
    3.97 -        // ITV is fired.
    3.98 -        vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
    3.99 -    }
   3.100 -*/
   3.101 -    /* Both last_itc & cur_itc < itm, wait for fire condition */
   3.102 -/*    else {
   3.103 -        expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
   3.104 -        set_timer(&vtm->vtm_timer, expires);
   3.105 -    }
   3.106 -    local_irq_restore(spsr);
   3.107 -}
   3.108 - */
   3.109 -
   3.110 -/*
   3.111 - * Action for vtm when the domain is scheduled out.
   3.112 - * Remove the timer for vtm.
   3.113 - */
   3.114 -/*
   3.115 -void vtm_domain_out(VCPU *vcpu)
   3.116 -{
   3.117 -    if(!is_idle_domain(vcpu->domain))
   3.118 -	stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
   3.119 -}
   3.120 - */
   3.121 -/*
   3.122 - * Action for vtm when the domain is scheduled in.
   3.123 - * Fire vtm IRQ or add the timer for vtm.
   3.124 - */
   3.125 -/*
   3.126 -void vtm_domain_in(VCPU *vcpu)
   3.127 -{
   3.128 -    vtime_t     *vtm;
   3.129 -
   3.130 -    if(!is_idle_domain(vcpu->domain)) {
   3.131 -	vtm=&(vcpu->arch.arch_vmx.vtm);
   3.132 -	vtm_interruption_update(vcpu, vtm);
   3.133 -    }
   3.134 -}
   3.135 - */
   3.136 -
   3.137 -
   3.138  void vlsapic_reset(VCPU *vcpu)
   3.139  {
   3.140      int     i;
   3.141 @@ -350,6 +283,7 @@ void vlsapic_reset(VCPU *vcpu)
   3.142      VCPU(vcpu, lrr0) = 0x10000;   // default reset value?
   3.143      VCPU(vcpu, lrr1) = 0x10000;   // default reset value?
   3.144      update_vhpi(vcpu, NULL_VECTOR);
   3.145 +    VLSAPIC_XTP(vcpu) = 0x80;   // disabled
   3.146      for ( i=0; i<4; i++) {
   3.147          VLSAPIC_INSVC(vcpu,i) = 0;
   3.148      }
   3.149 @@ -367,7 +301,7 @@ static __inline__ int highest_bits(uint6
   3.150  {
   3.151      uint64_t  bits, bitnum;
   3.152      int i;
   3.153 -    
   3.154 +
   3.155      /* loop for all 256 bits */
   3.156      for ( i=3; i >= 0 ; i -- ) {
   3.157          bits = dat[i];
   3.158 @@ -411,12 +345,6 @@ static int is_higher_class(int pending, 
   3.159  {
   3.160      return ( (pending >> 4) > mic );
   3.161  }
   3.162 -#if 0
   3.163 -static int is_invalid_irq(int vec)
   3.164 -{
   3.165 -    return (vec == 1 || ((vec <= 14 && vec >= 3)));
   3.166 -}
   3.167 -#endif //shadow it due to no use currently
   3.168  
   3.169  #define   IRQ_NO_MASKED         0
   3.170  #define   IRQ_MASKED_BY_VTPR    1
   3.171 @@ -427,7 +355,7 @@ static int
   3.172  _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
   3.173  {
   3.174      tpr_t    vtpr;
   3.175 -    
   3.176 +
   3.177      vtpr.val = VCPU(vcpu, tpr);
   3.178  
   3.179      if ( h_inservice == NMI_VECTOR ) {
   3.180 @@ -467,7 +395,7 @@ static int
   3.181  static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
   3.182  {
   3.183      int mask;
   3.184 -    
   3.185 +
   3.186      mask = _xirq_masked(vcpu, h_pending, h_inservice);
   3.187      return mask;
   3.188  }
   3.189 @@ -655,4 +583,213 @@ struct vcpu * vlsapic_lid_to_vcpu(struct
   3.190              return v;
   3.191      }
   3.192      return NULL;
   3.193 -}                                     
   3.194 +}
   3.195 +
   3.196 +
   3.197 +/*
   3.198 + * To inject INIT to guest, we must set the PAL_INIT entry 
   3.199 + * and set psr to switch to physical mode
   3.200 + */
   3.201 +#define PAL_INIT_ENTRY 0x80000000ffffffa0
   3.202 +#define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
   3.203 +                      IA64_PSR_IC | IA64_PSR_RI)
   3.204 +
   3.205 +static void vmx_inject_guest_pal_init(VCPU *vcpu)
   3.206 +{
   3.207 +    REGS *regs = vcpu_regs(vcpu);
   3.208 +    uint64_t psr = vmx_vcpu_get_psr(vcpu);
   3.209 +
   3.210 +    regs->cr_iip = PAL_INIT_ENTRY;
   3.211 +
   3.212 +    psr = psr & ~PSR_SET_BITS;
   3.213 +    vmx_vcpu_set_psr(vcpu, psr);
   3.214 +}
   3.215 +
   3.216 +
   3.217 +/*
   3.218 + * Deliver IPI message. (Only U-VP is supported now)
   3.219 + *  offset: address offset to IPI space.
   3.220 + *  value:  deliver value.
   3.221 + */
   3.222 +static void vlsapic_deliver_ipi(VCPU *vcpu, uint64_t dm, uint64_t vector)
   3.223 +{
   3.224 +    IPI_DPRINTK("deliver_ipi %lx %lx\n", dm, vector);
   3.225 +
   3.226 +    switch (dm) {
   3.227 +    case SAPIC_FIXED:     // INT
   3.228 +        vmx_vcpu_pend_interrupt(vcpu, vector);
   3.229 +        break;
   3.230 +    case SAPIC_PMI:
   3.231 +        // TODO -- inject guest PMI
   3.232 +        panic_domain(NULL, "Inject guest PMI!\n");
   3.233 +        break;
   3.234 +    case SAPIC_NMI:
   3.235 +        vmx_vcpu_pend_interrupt(vcpu, 2);
   3.236 +        break;
   3.237 +    case SAPIC_INIT:
   3.238 +        vmx_inject_guest_pal_init(vcpu);
   3.239 +        break;
   3.240 +    case SAPIC_EXTINT:     // ExtINT
   3.241 +        vmx_vcpu_pend_interrupt(vcpu, 0);
   3.242 +        break;
   3.243 +    default:
   3.244 +        panic_domain(NULL, "Deliver reserved IPI!\n");
   3.245 +        break;
   3.246 +    }
   3.247 +}
   3.248 +
   3.249 +/*
   3.250 + * TODO: Use hash table for the lookup.
   3.251 + */
   3.252 +static inline VCPU *lid_to_vcpu(struct domain *d, uint8_t id, uint8_t eid)
   3.253 +{
   3.254 +    VCPU  *v;
   3.255 +    LID   lid; 
   3.256 +
   3.257 +    for_each_vcpu(d, v) {
   3.258 +        lid.val = VCPU_LID(v);
   3.259 +        if (lid.id == id && lid.eid == eid)
   3.260 +            return v;
   3.261 +    }
   3.262 +    return NULL;
   3.263 +}
   3.264 +
   3.265 +
   3.266 +/*
   3.267 + * execute write IPI op.
   3.268 + */
   3.269 +static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
   3.270 +{
   3.271 +    VCPU   *targ;
   3.272 +    struct domain *d = vcpu->domain; 
   3.273 +
   3.274 +    targ = lid_to_vcpu(vcpu->domain, ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   3.275 +    if (targ == NULL)
   3.276 +        panic_domain(NULL, "Unknown IPI cpu\n");
   3.277 +
   3.278 +    if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
   3.279 +        struct pt_regs *targ_regs = vcpu_regs(targ);
   3.280 +        struct vcpu_guest_context c;
   3.281 +
   3.282 +        memset (&c, 0, sizeof(c));
   3.283 +
   3.284 +        if (arch_set_info_guest(targ, &c) != 0) {
   3.285 +            printk("arch_boot_vcpu: failure\n");
   3.286 +            return;
   3.287 +        }
   3.288 +        /* First or next rendez-vous: set registers. */
   3.289 +        vcpu_init_regs(targ);
   3.290 +        targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
   3.291 +        targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
   3.292 +
   3.293 +        if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
   3.294 +            vcpu_wake(targ);
   3.295 +            printk("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
   3.296 +                   targ->vcpu_id, targ_regs->cr_iip);
   3.297 +        } else {
   3.298 +            printk("arch_boot_vcpu: huh, already awake!");
   3.299 +        }
   3.300 +    } else {
   3.301 +        int running = test_bit(_VCPUF_running, &targ->vcpu_flags);
   3.302 +        vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm, 
   3.303 +                            ((ipi_d_t)value).vector);
   3.304 +        vcpu_unblock(targ);
   3.305 +        if (running)
   3.306 +            smp_send_event_check_cpu(targ->processor);
   3.307 +    }
   3.308 +    return;
   3.309 +}
   3.310 +
   3.311 +
   3.312 +unsigned long vlsapic_read(struct vcpu *v,
   3.313 +                           unsigned long addr,
   3.314 +                           unsigned long length)
   3.315 +{
   3.316 +    uint64_t result = 0;
   3.317 +
   3.318 +    addr &= (PIB_SIZE - 1);
   3.319 +
   3.320 +    switch (addr) {
   3.321 +    case PIB_OFST_INTA:
   3.322 +        if (length == 1) // 1 byte load
   3.323 +            ; // There is no i8259, there is no INTA access
   3.324 +        else
   3.325 +            panic_domain(NULL,"Undefined read on PIB INTA\n");
   3.326 +
   3.327 +        break;
   3.328 +    case PIB_OFST_XTP:
   3.329 +        if (length == 1) {
   3.330 +            result = VLSAPIC_XTP(v);
   3.331 +            // printk("read xtp %lx\n", result);
   3.332 +        } else {
   3.333 +            panic_domain(NULL, "Undefined read on PIB XTP\n");
   3.334 +        }
   3.335 +        break;
   3.336 +    default:
   3.337 +        if (PIB_LOW_HALF(addr)) {  // lower half
   3.338 +            if (length != 8 )
   3.339 +                panic_domain(NULL, "Undefined IPI-LHF read!\n");
   3.340 +            else
   3.341 +                IPI_DPRINTK("IPI-LHF read %lx\n", pib_off);
   3.342 +        } else {  // upper half
   3.343 +            IPI_DPRINTK("IPI-UHF read %lx\n", addr);
   3.344 +        }
   3.345 +        break;
   3.346 +    }
   3.347 +    return result;
   3.348 +}
   3.349 +
   3.350 +static void vlsapic_write_xtp(struct vcpu *v, uint8_t val)
   3.351 +{
   3.352 +    struct viosapic * viosapic;
   3.353 +    struct vcpu *lvcpu, *vcpu;
   3.354 +    viosapic = vcpu_viosapic(v); 
   3.355 +    lvcpu = viosapic->lowest_vcpu;
   3.356 +    VLSAPIC_XTP(v) = val;
   3.357 +    
   3.358 +    for_each_vcpu(v->domain, vcpu) {
   3.359 +        if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu))
   3.360 +            lvcpu = vcpu;
   3.361 +    }
   3.362 +        
   3.363 +    if (VLSAPIC_XTP(lvcpu) & 0x80)  // Disabled
   3.364 +        lvcpu = NULL;
   3.365 +
   3.366 +    viosapic->lowest_vcpu = lvcpu;
   3.367 +}
   3.368 +
   3.369 +void vlsapic_write(struct vcpu *v,
   3.370 +                      unsigned long addr,
   3.371 +                      unsigned long length,
   3.372 +                      unsigned long val)
   3.373 +{
   3.374 +    addr &= (PIB_SIZE - 1);
   3.375 +
   3.376 +    switch (addr) {
   3.377 +    case PIB_OFST_INTA:
   3.378 +        panic_domain(NULL, "Undefined write on PIB INTA\n");
   3.379 +        break;
   3.380 +    case PIB_OFST_XTP:
   3.381 +        if (length == 1) {
   3.382 +            // printk("write xtp %lx\n", val);
   3.383 +            vlsapic_write_xtp(v, val);
   3.384 +        } else {
   3.385 +            panic_domain(NULL, "Undefined write on PIB XTP\n");
   3.386 +        }
   3.387 +        break;
   3.388 +    default:
   3.389 +        if (PIB_LOW_HALF(addr)) {   // lower half
   3.390 +            if (length != 8)
   3.391 +                panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n",
   3.392 +                             length);
   3.393 +            else
   3.394 +                vlsapic_write_ipi(v, addr, val);
   3.395 +        }
   3.396 +        else {   // upper half
   3.397 +            // printk("IPI-UHF write %lx\n",addr);
   3.398 +            panic_domain(NULL, "No support for SM-VP yet\n");
   3.399 +        }
   3.400 +        break;
   3.401 +    }
   3.402 +}
   3.403 +
     4.1 --- a/xen/include/asm-ia64/viosapic.h	Thu Dec 07 04:15:54 2006 -0700
     4.2 +++ b/xen/include/asm-ia64/viosapic.h	Thu Dec 07 05:34:07 2006 -0700
     4.3 @@ -78,6 +78,7 @@ struct viosapic {
     4.4      uint64_t isr;     /* This is used for level trigger */
     4.5      uint32_t ioregsel;
     4.6      spinlock_t lock;
     4.7 +    struct vcpu * lowest_vcpu;
     4.8      uint64_t base_address;
     4.9      union viosapic_rte redirtbl[VIOSAPIC_NUM_PINS];
    4.10  };
     5.1 --- a/xen/include/asm-ia64/vlsapic.h	Thu Dec 07 04:15:54 2006 -0700
     5.2 +++ b/xen/include/asm-ia64/vlsapic.h	Thu Dec 07 05:34:07 2006 -0700
     5.3 @@ -50,11 +50,20 @@
     5.4  #define SAPIC_LEVEL            1
     5.5  
     5.6  /*
     5.7 + * LSAPIC OFFSET
     5.8 + */
     5.9 +#define PIB_LOW_HALF(ofst)     !(ofst & (1 << 20))
    5.10 +#define PIB_OFST_INTA          0x1E0000
    5.11 +#define PIB_OFST_XTP           0x1E0008
    5.12 +
    5.13 +/*
    5.14   *Mask bit
    5.15   */
    5.16  #define SAPIC_MASK_SHIFT       16
    5.17  #define SAPIC_MASK             (1 << SAPIC_MASK_SHIFT)
    5.18  
    5.19 +#define VLSAPIC_XTP(_v)        VMX(_v, xtp)
    5.20 +
    5.21  extern void vtm_init(struct vcpu *vcpu);
    5.22  extern void vtm_set_itc(struct  vcpu *vcpu, uint64_t new_itc);
    5.23  extern void vtm_set_itm(struct vcpu *vcpu, uint64_t val);
    5.24 @@ -63,5 +72,7 @@ extern void vmx_vexirq(struct vcpu  *vcp
    5.25  extern void vhpi_detection(struct vcpu *vcpu);
    5.26  extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
    5.27  extern struct vcpu * vlsapic_lid_to_vcpu(struct domain *d, uint16_t dest);
    5.28 +extern uint64_t vlsapic_read(struct vcpu *v, uint64_t addr, uint64_t s);
    5.29 +extern void vlsapic_write(struct vcpu *v, uint64_t addr, uint64_t s, uint64_t val);
    5.30  #define vlsapic_set_irq vmx_vcpu_pend_interrupt
    5.31  #endif
     6.1 --- a/xen/include/asm-ia64/vmx_platform.h	Thu Dec 07 04:15:54 2006 -0700
     6.2 +++ b/xen/include/asm-ia64/vmx_platform.h	Thu Dec 07 05:34:07 2006 -0700
     6.3 @@ -28,7 +28,6 @@ typedef struct virtual_platform_def {
     6.4      spinlock_t             buffered_io_lock;
     6.5      unsigned long       shared_page_va;
     6.6      unsigned long       pib_base;
     6.7 -    unsigned char       xtp;
     6.8      unsigned long       params[HVM_NR_PARAMS];
     6.9      struct mmio_list    *mmio;
    6.10      /* One IOSAPIC now... */
     7.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Thu Dec 07 04:15:54 2006 -0700
     7.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Thu Dec 07 05:34:07 2006 -0700
     7.3 @@ -96,9 +96,9 @@ struct arch_vmx_struct {
     7.4  //    unsigned long   rfi_iip;
     7.5  //    unsigned long   rfi_ipsr;
     7.6  //    unsigned long   rfi_ifs;
     7.7 -//	unsigned long	in_service[4];	// vLsapic inservice IRQ bits
     7.8      unsigned long   flags;
     7.9      unsigned long   xen_port;
    7.10 +    unsigned char   xtp;
    7.11  #ifdef VTI_DEBUG
    7.12      unsigned long  ivt_current;
    7.13      struct ivt_debug ivt_debug[IVT_DEBUG_MAX];