ia64/xen-unstable

changeset 5950:7e74ac6fdea9

Final set of HV change to support multiple domains on VTI.

Signed-off-by Anthony Xu <anthony.xu@intel.com>
Signed-off-by Eddie Dong <eddie.dong@intel.com>
Signed-off-by Fred Yang <fred.yang@intel.com>
Signed-off-by Kevin Tian <kevin.tian@intel.com>
author fred@xuni-t01.sc.intel.com
date Tue Aug 02 02:11:41 2005 -0800 (2005-08-02)
parents b6803bdaa95a
children bd77de43ed4a
files xen/arch/ia64/domain.c xen/arch/ia64/mmio.c xen/arch/ia64/vlsapic.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_support.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vmx_virt.c xen/arch/ia64/vtlb.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_uaccess.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h
line diff
     1.1 --- a/xen/arch/ia64/domain.c	Tue Aug 02 02:09:24 2005 -0800
     1.2 +++ b/xen/arch/ia64/domain.c	Tue Aug 02 02:11:41 2005 -0800
     1.3 @@ -348,6 +348,7 @@ int arch_set_info_guest(
     1.4      struct domain *d = v->domain;
     1.5      int i, rc, ret;
     1.6      unsigned long progress = 0;
     1.7 +    shared_iopage_t *sp;
     1.8  
     1.9      if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
    1.10          return 0;
    1.11 @@ -373,8 +374,17 @@ int arch_set_info_guest(
    1.12      /* FIXME: only support PMT table continuously by far */
    1.13      d->arch.pmt = __va(c->pt_base);
    1.14      d->arch.max_pfn = c->pt_max_pfn;
    1.15 -    v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg);
    1.16 -    memset((char *)__va(c->share_io_pg),0,PAGE_SIZE);
    1.17 +    d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
    1.18 +    sp = get_sp(d);
    1.19 +    memset((char *)sp,0,PAGE_SIZE);
    1.20 +    /* FIXME: temp due to old CP */
    1.21 +    sp->sp_global.eport = 2;
    1.22 +#ifdef V_IOSAPIC_READY
    1.23 +    sp->vcpu_number = 1;
    1.24 +#endif
    1.25 +    /* TEMP */
    1.26 +    d->arch.vmx_platform.pib_base = 0xfee00000UL;
    1.27 +    
    1.28  
    1.29      if (c->flags & VGCF_VMX_GUEST) {
    1.30  	if (!vmx_enabled)
    1.31 @@ -393,7 +403,7 @@ int arch_set_info_guest(
    1.32      if (v == d->vcpu[0]) {
    1.33  	memset(&d->shared_info->evtchn_mask[0], 0xff,
    1.34  		sizeof(d->shared_info->evtchn_mask));
    1.35 -	clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]);
    1.36 +	clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
    1.37      }
    1.38      /* Setup domain context. Actually IA-64 is a bit different with
    1.39       * x86, with almost all system resources better managed by HV
     2.1 --- a/xen/arch/ia64/mmio.c	Tue Aug 02 02:09:24 2005 -0800
     2.2 +++ b/xen/arch/ia64/mmio.c	Tue Aug 02 02:11:41 2005 -0800
     2.3 @@ -66,7 +66,7 @@ static void pib_write(VCPU *vcpu, void *
     2.4      default:
     2.5          if ( PIB_LOW_HALF(pib_off) ) {   // lower half
     2.6              if ( s != 8 || ma != 0x4 /* UC */ ) {
     2.7 -                panic("Undefined IPI-LHF write!\n");
     2.8 +                panic("Undefined IPI-LHF write with s %d, ma %d!\n", s, ma);
     2.9              }
    2.10              else {
    2.11                  write_ipi(vcpu, pib_off, *(uint64_t *)src);
    2.12 @@ -135,13 +135,13 @@ static void low_mmio_access(VCPU *vcpu, 
    2.13      ioreq_t *p;
    2.14      unsigned long addr;
    2.15  
    2.16 -    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
    2.17 +    vio = get_vio(v->domain, v->vcpu_id);
    2.18      if (vio == 0) {
    2.19          panic("bad shared page: %lx", (unsigned long)vio);
    2.20      }
    2.21      p = &vio->vp_ioreq;
    2.22      p->addr = pa;
    2.23 -    p->size = 1<<s;
    2.24 +    p->size = s;
    2.25      p->count = 1;
    2.26      p->dir = dir;
    2.27      if(dir==IOREQ_WRITE)     //write;
    2.28 @@ -152,9 +152,9 @@ static void low_mmio_access(VCPU *vcpu, 
    2.29  
    2.30      set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    2.31      p->state = STATE_IOREQ_READY;
    2.32 -    evtchn_send(IOPACKET_PORT);
    2.33 +    evtchn_send(iopacket_port(v->domain));
    2.34      vmx_wait_io();
    2.35 -    if(dir){ //read
    2.36 +    if(dir==IOREQ_READ){ //read
    2.37          *val=p->u.data;
    2.38      }
    2.39      return;
    2.40 @@ -168,13 +168,13 @@ static void legacy_io_access(VCPU *vcpu,
    2.41      ioreq_t *p;
    2.42      unsigned long addr;
    2.43  
    2.44 -    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
    2.45 +    vio = get_vio(v->domain, v->vcpu_id);
    2.46      if (vio == 0) {
    2.47          panic("bad shared page: %lx");
    2.48      }
    2.49      p = &vio->vp_ioreq;
    2.50      p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
    2.51 -    p->size = 1<<s;
    2.52 +    p->size = s;
    2.53      p->count = 1;
    2.54      p->dir = dir;
    2.55      if(dir==IOREQ_WRITE)     //write;
    2.56 @@ -185,11 +185,20 @@ static void legacy_io_access(VCPU *vcpu,
    2.57  
    2.58      set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    2.59      p->state = STATE_IOREQ_READY;
    2.60 -    evtchn_send(IOPACKET_PORT);
    2.61 +    evtchn_send(iopacket_port(v->domain));
    2.62 +
    2.63      vmx_wait_io();
    2.64 -    if(dir){ //read
    2.65 +    if(dir==IOREQ_READ){ //read
    2.66          *val=p->u.data;
    2.67      }
    2.68 +#ifdef DEBUG_PCI
    2.69 +    if(dir==IOREQ_WRITE)
    2.70 +        if(p->addr == 0xcf8UL)
    2.71 +            printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
    2.72 +    else
    2.73 +        if(p->addr == 0xcfcUL)
    2.74 +            printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
    2.75 +#endif //DEBUG_PCI
    2.76      return;
    2.77  }
    2.78  
    2.79 @@ -204,12 +213,13 @@ static void mmio_access(VCPU *vcpu, u64 
    2.80      switch (iot) {
    2.81      case GPFN_PIB:
    2.82          if(!dir)
    2.83 -            pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    2.84 +            pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
    2.85          else
    2.86              pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    2.87          break;
    2.88      case GPFN_GFW:
    2.89          break;
    2.90 +    case GPFN_IOSAPIC:
    2.91      case GPFN_FRAME_BUFFER:
    2.92      case GPFN_LOW_MMIO:
    2.93          low_mmio_access(vcpu, src_pa, dest, s, dir);
    2.94 @@ -217,7 +227,6 @@ static void mmio_access(VCPU *vcpu, u64 
    2.95      case GPFN_LEGACY_IO:
    2.96          legacy_io_access(vcpu, src_pa, dest, s, dir);
    2.97          break;
    2.98 -    case GPFN_IOSAPIC:
    2.99      default:
   2.100          panic("Bad I/O access\n");
   2.101          break;
   2.102 @@ -342,6 +351,8 @@ static inline VCPU *lid_2_vcpu (struct d
   2.103  	LID	  lid;
   2.104  	for (i=0; i<MAX_VIRT_CPUS; i++) {
   2.105  		vcpu = d->vcpu[i];
   2.106 + 		if (!vcpu)
   2.107 + 			continue;
   2.108  		lid.val = VPD_CR(vcpu, lid);
   2.109  		if ( lid.id == id && lid.eid == eid ) {
   2.110  		    return vcpu;
   2.111 @@ -379,15 +390,16 @@ static int write_ipi (VCPU *vcpu, uint64
   2.112      inst_type 0:integer 1:floating point
   2.113   */
   2.114  extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
   2.115 -
   2.116 +#define SL_INTEGER  0        // store/load interger
   2.117 +#define SL_FLOATING    1       // store/load floating
   2.118  
   2.119  void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
   2.120  {
   2.121      REGS *regs;
   2.122      IA64_BUNDLE bundle;
   2.123 -    int slot, dir, inst_type=0;
   2.124 +    int slot, dir, inst_type;
   2.125      size_t size;
   2.126 -    u64 data, value, slot1a, slot1b;
   2.127 +    u64 data, value,post_update, slot1a, slot1b, temp;
   2.128      INST64 inst;
   2.129      regs=vcpu_regs(vcpu);
   2.130      bundle = __vmx_get_domain_bundle(regs->cr_iip);
   2.131 @@ -400,28 +412,70 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   2.132      }
   2.133      else if (slot == 2) inst.inst = bundle.slot2;
   2.134  
   2.135 +
   2.136 +    // Integer Load/Store
   2.137      if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
   2.138 -        inst_type=0;  //fp
   2.139 +        inst_type = SL_INTEGER;  //
   2.140          size=(inst.M1.x6&0x3);
   2.141          if((inst.M1.x6>>2)>0xb){      // write
   2.142 -            vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
   2.143              dir=IOREQ_WRITE;     //write
   2.144 +            vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
   2.145          }else if((inst.M1.x6>>2)<0xb){   //  read
   2.146 -            vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
   2.147              dir=IOREQ_READ;
   2.148 -        }else{
   2.149 -            printf("This memory access instruction can't be emulated one : %lx\n",inst.inst);
   2.150 -            while(1);
   2.151 +            vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
   2.152          }
   2.153 -    }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
   2.154 -        inst_type=1;  //fp
   2.155 -        dir=IOREQ_READ;
   2.156 -        size=3;     //ldfd
   2.157 -    }else{
   2.158 +    }
   2.159 +    // Integer Load + Reg update
   2.160 +    else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
   2.161 +        inst_type = SL_INTEGER;
   2.162 +        dir = IOREQ_READ;     //write
   2.163 +        size = (inst.M2.x6&0x3);
   2.164 +        vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
   2.165 +        vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
   2.166 +        vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
   2.167 +        temp += post_update;
   2.168 +        vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
   2.169 +    }
   2.170 +    // Integer Load/Store + Imm update
   2.171 +    else if(inst.M3.major==5){
   2.172 +        inst_type = SL_INTEGER;  //
   2.173 +        size=(inst.M3.x6&0x3);
   2.174 +        if((inst.M5.x6>>2)>0xb){      // write
   2.175 +            dir=IOREQ_WRITE;     //write
   2.176 +            vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
   2.177 +            vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
   2.178 +            post_update = (inst.M5.i<<7)+inst.M5.imm7;
   2.179 +            if(inst.M5.s)
   2.180 +                temp -= post_update;
   2.181 +            else
   2.182 +                temp += post_update;
   2.183 +            vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
   2.184 +
   2.185 +        }else if((inst.M3.x6>>2)<0xb){   //  read
   2.186 +            dir=IOREQ_READ;
   2.187 +            vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
   2.188 +            vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
   2.189 +            post_update = (inst.M3.i<<7)+inst.M3.imm7;
   2.190 +            if(inst.M3.s)
   2.191 +                temp -= post_update;
   2.192 +            else
   2.193 +                temp += post_update;
   2.194 +            vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
   2.195 +
   2.196 +        }
   2.197 +    }
   2.198 +    // Floating-point Load/Store
   2.199 +//    else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
   2.200 +//        inst_type=SL_FLOATING;  //fp
   2.201 +//        dir=IOREQ_READ;
   2.202 +//        size=3;     //ldfd
   2.203 +//    }
   2.204 +    else{
   2.205          printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst);
   2.206          while(1);
   2.207      }
   2.208  
   2.209 +    size = 1 << size;
   2.210      if(dir==IOREQ_WRITE){
   2.211          mmio_access(vcpu, padr, &data, size, ma, dir);
   2.212      }else{
   2.213 @@ -433,7 +487,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   2.214          else if(size==2)
   2.215              data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
   2.216  
   2.217 -        if(inst_type==0){       //gp
   2.218 +        if(inst_type==SL_INTEGER){       //gp
   2.219              vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
   2.220          }else{
   2.221              panic("Don't support ldfd now !");
     3.1 --- a/xen/arch/ia64/vlsapic.c	Tue Aug 02 02:09:24 2005 -0800
     3.2 +++ b/xen/arch/ia64/vlsapic.c	Tue Aug 02 02:11:41 2005 -0800
     3.3 @@ -38,6 +38,14 @@
     3.4  #include <asm/vmx_pal_vsa.h>
     3.5  #include <asm/kregs.h>
     3.6  
     3.7 +#define  SHARED_VLAPIC_INF
     3.8 +#ifdef V_IOSAPIC_READY
     3.9 +static inline vl_apic_info* get_psapic(VCPU *vcpu)
    3.10 +{
    3.11 +    shared_iopage_t  *sp = get_sp(vcpu->domain);
    3.12 +    return &(sp->vcpu_iodata[vcpu->vcpu_id].apic_intr);
    3.13 +}
    3.14 +#endif
    3.15  //u64  fire_itc;
    3.16  //u64  fire_itc2;
    3.17  //u64  fire_itm;
    3.18 @@ -216,7 +224,8 @@ void vtm_interruption_update(VCPU *vcpu,
    3.19   */
    3.20  void vtm_domain_out(VCPU *vcpu)
    3.21  {
    3.22 -    rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
    3.23 +    if(!is_idle_task(vcpu->domain))
    3.24 +	rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
    3.25  }
    3.26  
    3.27  /*
    3.28 @@ -226,9 +235,11 @@ void vtm_domain_out(VCPU *vcpu)
    3.29  void vtm_domain_in(VCPU *vcpu)
    3.30  {
    3.31      vtime_t     *vtm;
    3.32 -    
    3.33 -    vtm=&(vcpu->arch.arch_vmx.vtm);
    3.34 -    vtm_interruption_update(vcpu, vtm);
    3.35 +
    3.36 +    if(!is_idle_task(vcpu->domain)) {
    3.37 +	vtm=&(vcpu->arch.arch_vmx.vtm);
    3.38 +	vtm_interruption_update(vcpu, vtm);
    3.39 +    }
    3.40  }
    3.41  
    3.42  /*
    3.43 @@ -262,10 +273,50 @@ static void update_vhpi(VCPU *vcpu, int 
    3.44      }
    3.45  }
    3.46  
    3.47 +#ifdef V_IOSAPIC_READY
    3.48 +void vlapic_update_shared_info(VCPU *vcpu)
    3.49 +{
    3.50 +    //int	i;
    3.51 +    
    3.52 +    vl_apic_info *ps;
    3.53 +
    3.54 +    if (vcpu->domain == dom0)
    3.55 +	return;
    3.56 +
    3.57 +    ps = get_psapic(vcpu);
    3.58 +    ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16; 
    3.59 +    printf("vl_lapic_id = %x\n", ps->vl_lapic_id);
    3.60 +    ps->vl_apr = 0;
    3.61 +    // skip ps->vl_logical_dest && ps->vl_dest_format
    3.62 +    // IPF support physical destination mode only
    3.63 +    ps->vl_arb_id = 0;
    3.64 +    /*
    3.65 +    for ( i=0; i<4; i++ ) {
    3.66 +    	ps->tmr[i] = 0;		// edge trigger 
    3.67 +    }
    3.68 +    */
    3.69 +}
    3.70 +
    3.71 +void vlapic_update_ext_irq(VCPU *vcpu)
    3.72 +{
    3.73 +    int  vec;
    3.74 +    
    3.75 +    vl_apic_info *ps = get_psapic(vcpu);
    3.76 +    while ( (vec = highest_bits(ps->irr)) != NULL_VECTOR ) {
    3.77 +    	clear_bit (vec, ps->irr);
    3.78 +        vmx_vcpu_pend_interrupt(vcpu, vec);
    3.79 +    }
    3.80 +}
    3.81 +#endif
    3.82 +
    3.83  void vlsapic_reset(VCPU *vcpu)
    3.84  {
    3.85      int     i;
    3.86 -    VPD_CR(vcpu, lid) = 0;
    3.87 +#ifdef V_IOSAPIC_READY
    3.88 +    vl_apic_info  *psapic;	// shared lapic inf.
    3.89 +#endif
    3.90 +    
    3.91 +    VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
    3.92      VPD_CR(vcpu, ivr) = 0;
    3.93      VPD_CR(vcpu,tpr) = 0x10000;
    3.94      VPD_CR(vcpu, eoi) = 0;
    3.95 @@ -281,6 +332,10 @@ void vlsapic_reset(VCPU *vcpu)
    3.96      for ( i=0; i<4; i++) {
    3.97          VLSAPIC_INSVC(vcpu,i) = 0;
    3.98      }
    3.99 +#ifdef V_IOSAPIC_READY
   3.100 +    vlapic_update_shared_info(vcpu);
   3.101 +    //vlapic_update_shared_irr(vcpu);
   3.102 +#endif
   3.103      DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
   3.104  }
   3.105  
   3.106 @@ -414,6 +469,7 @@ void vmx_vcpu_pend_interrupt(VCPU *vcpu,
   3.107      }
   3.108      local_irq_save(spsr);
   3.109      VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
   3.110 +    //vlapic_update_shared_irr(vcpu);
   3.111      local_irq_restore(spsr);
   3.112      vcpu->arch.irq_new_pending = 1;
   3.113  }
   3.114 @@ -432,6 +488,7 @@ void vmx_vcpu_pend_batch_interrupt(VCPU 
   3.115      for (i=0 ; i<4; i++ ) {
   3.116          VPD_CR(vcpu,irr[i]) |= pend_irr[i];
   3.117      }
   3.118 +    //vlapic_update_shared_irr(vcpu);
   3.119      local_irq_restore(spsr);
   3.120      vcpu->arch.irq_new_pending = 1;
   3.121  }
   3.122 @@ -518,6 +575,7 @@ uint64_t guest_read_vivr(VCPU *vcpu)
   3.123      VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
   3.124      VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
   3.125      update_vhpi(vcpu, NULL_VECTOR);     // clear VHPI till EOI or IRR write
   3.126 +    //vlapic_update_shared_irr(vcpu);
   3.127      local_irq_restore(spsr);
   3.128      return (uint64_t)vec;
   3.129  }
     4.1 --- a/xen/arch/ia64/vmx_ivt.S	Tue Aug 02 02:09:24 2005 -0800
     4.2 +++ b/xen/arch/ia64/vmx_ivt.S	Tue Aug 02 02:11:41 2005 -0800
     4.3 @@ -560,6 +560,21 @@ END(vmx_virtual_exirq)
     4.4  	VMX_DBG_FAULT(19)
     4.5  	VMX_FAULT(19)
     4.6  
     4.7 +    .org vmx_ia64_ivt+0x5000
     4.8 +/////////////////////////////////////////////////////////////////////////////////////////
     4.9 +// 0x5000 Entry 20 (size 16 bundles) Page Not Present
    4.10 +ENTRY(vmx_page_not_present)
    4.11 +	VMX_REFLECT(20)
    4.12 +END(vmx_page_not_present)
    4.13 +
    4.14 +    .org vmx_ia64_ivt+0x5100
    4.15 +/////////////////////////////////////////////////////////////////////////////////////////
    4.16 +// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
    4.17 +ENTRY(vmx_key_permission)
    4.18 +	VMX_REFLECT(21)
    4.19 +END(vmx_key_permission)
    4.20 +
    4.21 +    .org vmx_ia64_ivt+0x5200
    4.22  /////////////////////////////////////////////////////////////////////////////////////////
    4.23  // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
    4.24  ENTRY(vmx_iaccess_rights)
     5.1 --- a/xen/arch/ia64/vmx_support.c	Tue Aug 02 02:09:24 2005 -0800
     5.2 +++ b/xen/arch/ia64/vmx_support.c	Tue Aug 02 02:11:41 2005 -0800
     5.3 @@ -37,18 +37,19 @@ void vmx_wait_io(void)
     5.4      struct vcpu *v = current;
     5.5      struct domain *d = v->domain;
     5.6      extern void do_block();
     5.7 +    int port = iopacket_port(d);
     5.8  
     5.9      do {
    5.10 -	if (!test_bit(IOPACKET_PORT,
    5.11 +	if (!test_bit(port,
    5.12  		&d->shared_info->evtchn_pending[0]))
    5.13  	    do_block();
    5.14  
    5.15  	/* Unblocked when some event is coming. Clear pending indication
    5.16  	 * immediately if deciding to go for io assist
    5.17  	  */
    5.18 -	if (test_and_clear_bit(IOPACKET_PORT,
    5.19 +	if (test_and_clear_bit(port,
    5.20  		&d->shared_info->evtchn_pending[0])) {
    5.21 -	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
    5.22 +	    clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
    5.23  	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
    5.24  	    vmx_io_assist(v);
    5.25  	}
    5.26 @@ -66,7 +67,7 @@ void vmx_wait_io(void)
    5.27  	     * nothing losed. Next loop will check I/O channel to fix this
    5.28  	     * window.
    5.29  	     */
    5.30 -	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
    5.31 +	    clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
    5.32  	}
    5.33  	else
    5.34  	    break;
    5.35 @@ -88,7 +89,7 @@ void vmx_io_assist(struct vcpu *v)
    5.36       * This shared page contains I/O request between emulation code
    5.37       * and device model.
    5.38       */
    5.39 -    vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
    5.40 +    vio = get_vio(v->domain, v->vcpu_id);
    5.41      if (!vio)
    5.42  	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
    5.43  
    5.44 @@ -127,6 +128,7 @@ void vmx_intr_assist(struct vcpu *v)
    5.45      struct domain *d = v->domain;
    5.46      extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
    5.47  					unsigned long *pend_irr);
    5.48 +    int port = iopacket_port(d);
    5.49  
    5.50      /* I/O emulation is atomic, so it's impossible to see execution flow
    5.51       * out of vmx_wait_io, when guest is still waiting for response.
    5.52 @@ -135,10 +137,10 @@ void vmx_intr_assist(struct vcpu *v)
    5.53  	panic("!!!Bad resume to guest before I/O emulation is done.\n");
    5.54  
    5.55      /* Clear indicator specific to interrupt delivered from DM */
    5.56 -    if (test_and_clear_bit(IOPACKET_PORT,
    5.57 +    if (test_and_clear_bit(port,
    5.58  		&d->shared_info->evtchn_pending[0])) {
    5.59 -	if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5])
    5.60 -	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
    5.61 +	if (!d->shared_info->evtchn_pending[port >> 5])
    5.62 +	    clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
    5.63  
    5.64  	if (!v->vcpu_info->evtchn_pending_sel)
    5.65  	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
    5.66 @@ -149,11 +151,14 @@ void vmx_intr_assist(struct vcpu *v)
    5.67       * shares same event channel as I/O emulation, with corresponding
    5.68       * indicator possibly cleared when vmx_wait_io().
    5.69       */
    5.70 -    vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
    5.71 +    vio = get_vio(v->domain, v->vcpu_id);
    5.72      if (!vio)
    5.73  	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
    5.74  
    5.75 -    vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]); 
    5.76 -    memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr));
    5.77 +#ifdef V_IOSAPIC_READY
    5.78 +    vlapic_update_ext_irq(v);
    5.79 +#else
    5.80 +    panic("IOSAPIC model is missed in qemu\n");
    5.81 +#endif
    5.82      return;
    5.83  }
     6.1 --- a/xen/arch/ia64/vmx_vcpu.c	Tue Aug 02 02:09:24 2005 -0800
     6.2 +++ b/xen/arch/ia64/vmx_vcpu.c	Tue Aug 02 02:11:41 2005 -0800
     6.3 @@ -23,7 +23,7 @@
     6.4   *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
     6.5   */
     6.6  
     6.7 -#include <linux/sched.h>
     6.8 +#include <xen/sched.h>
     6.9  #include <public/arch-ia64.h>
    6.10  #include <asm/ia64_int.h>
    6.11  #include <asm/vmx_vcpu.h>
    6.12 @@ -201,7 +201,7 @@ vmx_vcpu_get_vtlb(VCPU *vcpu)
    6.13  struct virutal_platform_def *
    6.14  vmx_vcpu_get_plat(VCPU *vcpu)
    6.15  {
    6.16 -    return &(vcpu->arch.arch_vmx.vmx_platform);
    6.17 +    return &(vcpu->domain->arch.vmx_platform);
    6.18  }
    6.19  
    6.20  
    6.21 @@ -375,7 +375,7 @@ IA64FAULT
    6.22  vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
    6.23  {
    6.24      REGS *regs=vcpu_regs(vcpu);
    6.25 -    u64 nat;
    6.26 +    int nat;
    6.27      //TODO, Eddie
    6.28      if (!regs) return 0;
    6.29      if (reg >= 16 && reg < 32) {
     7.1 --- a/xen/arch/ia64/vmx_virt.c	Tue Aug 02 02:09:24 2005 -0800
     7.2 +++ b/xen/arch/ia64/vmx_virt.c	Tue Aug 02 02:11:41 2005 -0800
     7.3 @@ -1193,7 +1193,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
     7.4          case 23:return vmx_vcpu_set_ifs(vcpu,r2);
     7.5          case 24:return vmx_vcpu_set_iim(vcpu,r2);
     7.6          case 25:return vmx_vcpu_set_iha(vcpu,r2);
     7.7 -        case 64:return vmx_vcpu_set_lid(vcpu,r2);
     7.8 +        case 64:printk("SET LID to 0x%lx\n", r2);
     7.9 +		return vmx_vcpu_set_lid(vcpu,r2);
    7.10          case 65:return IA64_NO_FAULT;
    7.11          case 66:return vmx_vcpu_set_tpr(vcpu,r2);
    7.12          case 67:return vmx_vcpu_set_eoi(vcpu,r2);
    7.13 @@ -1253,9 +1254,9 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
    7.14          case 23:return cr_get(ifs);
    7.15          case 24:return cr_get(iim);
    7.16          case 25:return cr_get(iha);
    7.17 -	case 64:val = ia64_getreg(_IA64_REG_CR_LID);
    7.18 -	     return vmx_vcpu_set_gr(vcpu,tgt,val,0);
    7.19 -//        case 64:return cr_get(lid);
    7.20 +//	case 64:val = ia64_getreg(_IA64_REG_CR_LID);
    7.21 +//	     return vmx_vcpu_set_gr(vcpu,tgt,val,0);
    7.22 +        case 64:return cr_get(lid);
    7.23          case 65:
    7.24               vmx_vcpu_get_ivr(vcpu,&val);
    7.25               return vmx_vcpu_set_gr(vcpu,tgt,val,0);
     8.1 --- a/xen/arch/ia64/vtlb.c	Tue Aug 02 02:09:24 2005 -0800
     8.2 +++ b/xen/arch/ia64/vtlb.c	Tue Aug 02 02:11:41 2005 -0800
     8.3 @@ -23,6 +23,7 @@
     8.4  
     8.5  #include <linux/sched.h>
     8.6  #include <asm/tlb.h>
     8.7 +#include <asm/mm.h>
     8.8  #include <asm/vmx_mm_def.h>
     8.9  #include <asm/gcc_intrin.h>
    8.10  #include <xen/interrupt.h>
    8.11 @@ -359,7 +360,10 @@ thash_data_t *__alloc_chain(thash_cb_t *
    8.12  void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
    8.13  {
    8.14      thash_data_t    *hash_table, *cch;
    8.15 +    int flag;
    8.16      rr_t  vrr;
    8.17 +    u64 gppn;
    8.18 +    u64 ppns, ppne;
    8.19      
    8.20      hash_table = (hcb->hash_func)(hcb->pta,
    8.21                          va, entry->rid, entry->ps);
    8.22 @@ -375,7 +379,18 @@ void vtlb_insert(thash_cb_t *hcb, thash_
    8.23          *hash_table = *entry;
    8.24          hash_table->next = cch;
    8.25      }
    8.26 -    thash_insert (hcb->ts->vhpt, entry, va);
    8.27 +    if(hcb->vcpu->domain->domain_id==0){
    8.28 +       thash_insert(hcb->ts->vhpt, entry, va);
    8.29 +        return;
    8.30 +    }
    8.31 +    flag = 1;
    8.32 +    gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
    8.33 +    ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
    8.34 +    ppne = ppns + PSIZE(entry->ps);
    8.35 +    if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
    8.36 +        flag = 0;
    8.37 +    if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
    8.38 +       thash_insert(hcb->ts->vhpt, entry, va);
    8.39      return ;
    8.40  }
    8.41  
    8.42 @@ -427,18 +442,22 @@ static void rem_thash(thash_cb_t *hcb, t
    8.43      thash_data_t    *hash_table, *p, *q;
    8.44      thash_internal_t *priv = &hcb->priv;
    8.45      int idx;
    8.46 -    
    8.47 +
    8.48      hash_table = priv->hash_base;
    8.49      if ( hash_table == entry ) {
    8.50 -        __rem_hash_head (hcb, entry);
    8.51 +//        if ( PURGABLE_ENTRY(hcb, entry) ) {
    8.52 +            __rem_hash_head (hcb, entry);
    8.53 +//        }
    8.54          return ;
    8.55      }
    8.56      // remove from collision chain
    8.57      p = hash_table;
    8.58      for ( q=p->next; q; q = p->next ) {
    8.59 -        if ( q == entry ) {
    8.60 -            p->next = q->next;
    8.61 -            __rem_chain(hcb, entry);
    8.62 +        if ( q == entry ){
    8.63 +//            if ( PURGABLE_ENTRY(hcb,q ) ) {
    8.64 +                p->next = q->next;
    8.65 +                __rem_chain(hcb, entry);
    8.66 +//            }
    8.67              return ;
    8.68          }
    8.69          p = q;
     9.1 --- a/xen/include/asm-ia64/domain.h	Tue Aug 02 02:09:24 2005 -0800
     9.2 +++ b/xen/include/asm-ia64/domain.h	Tue Aug 02 02:11:41 2005 -0800
     9.3 @@ -8,6 +8,7 @@
     9.4  #include <asm/vmmu.h>
     9.5  #include <asm/regionreg.h>
     9.6  #include <public/arch-ia64.h>
     9.7 +#include <asm/vmx_platform.h>
     9.8  #endif // CONFIG_VTI
     9.9  #include <xen/list.h>
    9.10  
    9.11 @@ -42,6 +43,7 @@ struct arch_domain {
    9.12       * max_pages in domain struct, which indicates maximum memory size
    9.13       */
    9.14      unsigned long max_pfn;
    9.15 +    struct virutal_platform_def     vmx_platform;
    9.16  #endif  //CONFIG_VTI
    9.17      u64 xen_vastart;
    9.18      u64 xen_vaend;
    10.1 --- a/xen/include/asm-ia64/ia64_int.h	Tue Aug 02 02:09:24 2005 -0800
    10.2 +++ b/xen/include/asm-ia64/ia64_int.h	Tue Aug 02 02:11:41 2005 -0800
    10.3 @@ -37,7 +37,9 @@
    10.4  #define	IA64_RFI_IN_PROGRESS	0x0002
    10.5  #define IA64_RETRY              0x0003
    10.6  #ifdef  CONFIG_VTI
    10.7 -#define IA64_FAULT		0x0002
    10.8 +#undef  IA64_NO_FAULT
    10.9 +#define	IA64_NO_FAULT		0x0000
   10.10 +#define IA64_FAULT		0x0001
   10.11  #endif      //CONFIG_VTI
   10.12  #define IA64_FORCED_IFA         0x0004
   10.13  #define	IA64_ILLOP_FAULT	(IA64_GENEX_VECTOR | 0x00)
    11.1 --- a/xen/include/asm-ia64/privop.h	Tue Aug 02 02:09:24 2005 -0800
    11.2 +++ b/xen/include/asm-ia64/privop.h	Tue Aug 02 02:11:41 2005 -0800
    11.3 @@ -138,14 +138,32 @@ typedef union U_INST64_M47 {
    11.4      IA64_INST inst;
    11.5      struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
    11.6  } INST64_M47;
    11.7 +
    11.8  typedef union U_INST64_M1{
    11.9      IA64_INST inst;
   11.10      struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   11.11  } INST64_M1;
   11.12 +
   11.13 +typedef union U_INST64_M2{
   11.14 +    IA64_INST inst;
   11.15 +    struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   11.16 +} INST64_M2;
   11.17 +
   11.18 +typedef union U_INST64_M3{
   11.19 +    IA64_INST inst;
   11.20 +    struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
   11.21 +} INST64_M3;
   11.22 +
   11.23  typedef union U_INST64_M4 {
   11.24      IA64_INST inst;
   11.25      struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   11.26  } INST64_M4;
   11.27 +
   11.28 +typedef union U_INST64_M5 {
   11.29 +    IA64_INST inst;
   11.30 +    struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
   11.31 +} INST64_M5;
   11.32 +
   11.33  typedef union U_INST64_M6 {
   11.34      IA64_INST inst;
   11.35      struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   11.36 @@ -166,7 +184,10 @@ typedef union U_INST64 {
   11.37      INST64_I28 I28;	// mov from ar (I unit)
   11.38  #ifdef CONFIG_VTI
   11.39      INST64_M1  M1;  // ld integer
   11.40 +    INST64_M2  M2;
   11.41 +    INST64_M3  M3;
   11.42      INST64_M4  M4;  // st integer
   11.43 +    INST64_M5  M5;
   11.44      INST64_M6  M6;  // ldfd floating pointer
   11.45  #endif // CONFIG_VTI
   11.46      INST64_M28 M28;	// purge translation cache entry
    12.1 --- a/xen/include/asm-ia64/vmx.h	Tue Aug 02 02:09:24 2005 -0800
    12.2 +++ b/xen/include/asm-ia64/vmx.h	Tue Aug 02 02:11:41 2005 -0800
    12.3 @@ -23,6 +23,7 @@
    12.4  #define _ASM_IA64_VT_H
    12.5  
    12.6  #define RR7_SWITCH_SHIFT	12	/* 4k enough */
    12.7 +#include <public/io/ioreq.h>
    12.8  
    12.9  extern void identify_vmx_feature(void);
   12.10  extern unsigned int vmx_enabled;
   12.11 @@ -38,4 +39,19 @@ extern void vmx_change_double_mapping(st
   12.12  
   12.13  extern void vmx_wait_io(void);
   12.14  extern void vmx_io_assist(struct vcpu *v);
   12.15 +
   12.16 +static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
   12.17 +{
   12.18 +    return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
   12.19 +}
   12.20 +
   12.21 +static inline int iopacket_port(struct domain *d)
   12.22 +{
   12.23 +    return ((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->sp_global.eport;
   12.24 +}
   12.25 +
   12.26 +static inline shared_iopage_t *get_sp(struct domain *d)
   12.27 +{
   12.28 +    return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va;
   12.29 +}
   12.30  #endif /* _ASM_IA64_VT_H */
    13.1 --- a/xen/include/asm-ia64/vmx_uaccess.h	Tue Aug 02 02:09:24 2005 -0800
    13.2 +++ b/xen/include/asm-ia64/vmx_uaccess.h	Tue Aug 02 02:11:41 2005 -0800
    13.3 @@ -40,6 +40,8 @@
    13.4   */
    13.5  asm (".section \"__ex_table\", \"a\"\n\t.previous");
    13.6  
    13.7 +/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
    13.8 +#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
    13.9  /* For back compatibility */
   13.10  #define __access_ok(addr, size, segment)	1
   13.11  #define access_ok(addr, size, segment)	__access_ok((addr), (size), (segment))
    14.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Tue Aug 02 02:09:24 2005 -0800
    14.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue Aug 02 02:11:41 2005 -0800
    14.3 @@ -105,6 +105,10 @@ extern void vtm_set_itv(VCPU *vcpu);
    14.4  extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
    14.5  extern void vtm_domain_out(VCPU *vcpu);
    14.6  extern void vtm_domain_in(VCPU *vcpu);
    14.7 +#ifdef V_IOSAPIC_READY
    14.8 +extern void vlapic_update_ext_irq(VCPU *vcpu);
    14.9 +extern void vlapic_update_shared_info(VCPU *vcpu);
   14.10 +#endif
   14.11  extern void vlsapic_reset(VCPU *vcpu);
   14.12  extern int vmx_check_pending_irq(VCPU *vcpu);
   14.13  extern void guest_write_eoi(VCPU *vcpu);
   14.14 @@ -399,6 +403,9 @@ IA64FAULT
   14.15  vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
   14.16  {
   14.17      VPD_CR(vcpu,lid)=val;
   14.18 +#ifdef V_IOSAPIC_READY
   14.19 +    vlapic_update_shared_info(vcpu);
   14.20 +#endif
   14.21      return IA64_NO_FAULT;
   14.22  }
   14.23  extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
    15.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Tue Aug 02 02:09:24 2005 -0800
    15.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Tue Aug 02 02:11:41 2005 -0800
    15.3 @@ -25,7 +25,6 @@
    15.4  #ifndef __ASSEMBLY__
    15.5  
    15.6  #include <asm/vtm.h>
    15.7 -#include <asm/vmx_platform.h>
    15.8  #include <public/arch-ia64.h>
    15.9  
   15.10  #define VPD_SHIFT	17	/* 128K requirement */
   15.11 @@ -84,7 +83,6 @@ struct arch_vmx_struct {
   15.12      unsigned long   rfi_ipsr;
   15.13      unsigned long   rfi_ifs;
   15.14  	unsigned long	in_service[4];	// vLsapic inservice IRQ bits
   15.15 -	struct virutal_platform_def     vmx_platform;
   15.16  	unsigned long   flags;
   15.17  };
   15.18  
   15.19 @@ -126,7 +124,6 @@ extern unsigned int opt_vmx_debug_level;
   15.20  
   15.21  #endif //__ASSEMBLY__
   15.22  
   15.23 -
   15.24  // VPD field offset
   15.25  #define VPD_VAC_START_OFFSET		0
   15.26  #define VPD_VDC_START_OFFSET		8