ia64/xen-unstable

changeset 9164:1abf3783975d

[IA64] Merge guest TR emulation

This patch is intended to merge guest TR emulation both on VTIdomain
and para-domain.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Fri Mar 10 08:52:12 2006 -0700 (2006-03-10)
parents 551f7935f79a
children 056109e43947
files xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_hypercall.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_irq_ia64.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/irq.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx_platform.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmmu.c	Fri Mar 10 08:25:54 2006 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Fri Mar 10 08:52:12 2006 -0700
     1.3 @@ -31,6 +31,7 @@
     1.4  #include <asm/hw_irq.h>
     1.5  #include <asm/vmx_pal_vsa.h>
     1.6  #include <asm/kregs.h>
     1.7 +#include <asm/vcpu.h>
     1.8  #include <xen/irq.h>
     1.9  
    1.10  /*
    1.11 @@ -68,14 +69,14 @@ u64 get_mfn(struct domain *d, u64 gpfn)
    1.12  /*
    1.13   * The VRN bits of va stand for which rr to get.
    1.14   */
    1.15 -ia64_rr vmmu_get_rr(VCPU *vcpu, u64 va)
    1.16 -{
    1.17 -    ia64_rr   vrr;
    1.18 -    vmx_vcpu_get_rr(vcpu, va, &vrr.rrval);
    1.19 -    return vrr;
    1.20 -}
    1.21 +//ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
    1.22 +//{
    1.23 +//    ia64_rr   vrr;
    1.24 +//    vcpu_get_rr(vcpu, va, &vrr.rrval);
    1.25 +//    return vrr;
    1.26 +//}
    1.27  
    1.28 -
    1.29 +/*
    1.30  void recycle_message(thash_cb_t *hcb, u64 para)
    1.31  {
    1.32      if(hcb->ht == THASH_VHPT)
    1.33 @@ -84,7 +85,7 @@ void recycle_message(thash_cb_t *hcb, u6
    1.34      }
    1.35      printk("hcb=%p recycled with %lx\n",hcb,para);
    1.36  }
    1.37 -
    1.38 + */
    1.39  
    1.40  /*
    1.41   * Purge all guest TCs in logical processor.
    1.42 @@ -102,7 +103,6 @@ purge_machine_tc_by_domid(domid_t domid)
    1.43      u32 stride1,stride2;
    1.44      u32 i,j;
    1.45      u64 psr;
    1.46 -    
    1.47  
    1.48      result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
    1.49      if ( result.status != 0 ) {
    1.50 @@ -113,7 +113,7 @@ purge_machine_tc_by_domid(domid_t domid)
    1.51      count2 = LOW_32BITS (result.v1);
    1.52      stride1 = HIGH_32BITS(result.v2);
    1.53      stride2 = LOW_32BITS (result.v2);
    1.54 -    
    1.55 +
    1.56      local_irq_save(psr);
    1.57      for (i=0; i<count1; i++) {
    1.58          for (j=0; j<count2; j++) {
    1.59 @@ -133,24 +133,10 @@ static thash_cb_t *init_domain_vhpt(stru
    1.60  //    struct page_info *page;
    1.61      thash_cb_t  *vhpt;
    1.62      PTA pta_value;
    1.63 -/*
    1.64 -    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
    1.65 -    if ( page == NULL ) {
    1.66 -        panic("No enough contiguous memory for init_domain_mm\n");
    1.67 -    }
    1.68 -    vbase = page_to_virt(page);
    1.69 -    printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
    1.70 -    memset(vbase, 0, VCPU_VHPT_SIZE);
    1.71 - */
    1.72 -//    vcur = (void*)((u64)vbase + VCPU_VHPT_SIZE);
    1.73      vcur -= sizeof (thash_cb_t);
    1.74      vhpt = vcur;
    1.75      vhpt->ht = THASH_VHPT;
    1.76      vhpt->vcpu = d;
    1.77 -//    vhpt->hash_func = machine_thash;
    1.78 -//    vcur -= sizeof (vhpt_special);
    1.79 -//    vs = vcur;
    1.80 -
    1.81      /* Setup guest pta */
    1.82      pta_value.val = 0;
    1.83      pta_value.ve = 1;
    1.84 @@ -159,14 +145,10 @@ static thash_cb_t *init_domain_vhpt(stru
    1.85      pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
    1.86      d->arch.arch_vmx.mpta = pta_value.val;
    1.87  
    1.88 -//    vhpt->vs = vs;
    1.89 -//    vhpt->vs->get_mfn = __gpfn_to_mfn_foreign;
    1.90 -//    vhpt->vs->tag_func = machine_ttag;
    1.91      vhpt->hash = vbase;
    1.92      vhpt->hash_sz = VCPU_VHPT_SIZE/2;
    1.93      vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
    1.94      vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
    1.95 -//    vhpt->recycle_notifier = recycle_message;
    1.96      thash_init(vhpt,VCPU_VHPT_SHIFT-1);
    1.97      return vhpt;
    1.98  }
    1.99 @@ -177,9 +159,8 @@ thash_cb_t *init_domain_tlb(struct vcpu 
   1.100  {
   1.101      struct page_info *page;
   1.102      void    *vbase, *vhptbase, *vcur;
   1.103 -    tlb_special_t  *ts;
   1.104      thash_cb_t  *tlb;
   1.105 -    
   1.106 +
   1.107      page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
   1.108      if ( page == NULL ) {
   1.109          panic("No enough contiguous memory for init_domain_mm\n");
   1.110 @@ -193,10 +174,7 @@ thash_cb_t *init_domain_tlb(struct vcpu 
   1.111      tlb = vcur;
   1.112      tlb->ht = THASH_TLB;
   1.113      tlb->vcpu = d;
   1.114 -    vcur -= sizeof (tlb_special_t);
   1.115 -    ts = vcur;
   1.116 -    tlb->ts = ts;
   1.117 -    tlb->ts->vhpt = init_domain_vhpt(d,vhptbase,vbase);
   1.118 +    tlb->vhpt = init_domain_vhpt(d,vhptbase,vbase);
   1.119  //    tlb->hash_func = machine_thash;
   1.120      tlb->hash = vbase;
   1.121      tlb->hash_sz = VCPU_VTLB_SIZE/2;
   1.122 @@ -207,27 +185,6 @@ thash_cb_t *init_domain_tlb(struct vcpu 
   1.123      return tlb;
   1.124  }
   1.125  
   1.126 -/* Allocate physical to machine mapping table for domN
   1.127 - * FIXME: Later this interface may be removed, if that table is provided
   1.128 - * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
   1.129 - * this interface at all.
   1.130 - */
   1.131 -#if 0
   1.132 -void
   1.133 -alloc_pmt(struct domain *d)
   1.134 -{
   1.135 -    struct page_info *page;
   1.136 -
   1.137 -    /* Only called once */
   1.138 -    ASSERT(d->arch.pmt);
   1.139 -
   1.140 -    page = alloc_domheap_pages(NULL, get_order(d->max_pages), 0);
   1.141 -    ASSERT(page);
   1.142 -
   1.143 -    d->arch.pmt = page_to_virt(page);
   1.144 -    memset(d->arch.pmt, 0x55, d->max_pages * 8);
   1.145 -}
   1.146 -#endif
   1.147  /*
   1.148   * Insert guest TLB to machine TLB.
   1.149   *  data:   In TLB format
   1.150 @@ -240,7 +197,6 @@ void machine_tlb_insert(struct vcpu *d, 
   1.151      unsigned long mtlb_ppn;
   1.152      mtlb.ifa = tlb->vadr;
   1.153      mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
   1.154 -    //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
   1.155      mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
   1.156      mtlb.ppn = get_mfn(d->domain,tlb->ppn);
   1.157      mtlb_ppn=mtlb.ppn;
   1.158 @@ -311,7 +267,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va
   1.159      IA64_PSR  vpsr; 
   1.160  
   1.161      vpsr.val = vmx_vcpu_get_psr(vcpu);
   1.162 -    vrr = vmx_vcpu_rr(vcpu, vadr);
   1.163 +    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
   1.164      vmx_vcpu_get_pta(vcpu,&vpta.val);
   1.165  
   1.166      if ( vrr.ve & vpta.ve ) {
   1.167 @@ -355,21 +311,18 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
   1.168      u64     *vpa;
   1.169      thash_data_t    *tlb;
   1.170      thash_cb_t *hcb;
   1.171 -    ia64_rr vrr;
   1.172      u64     mfn;
   1.173  
   1.174      if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
   1.175          gpip = gip;
   1.176      }
   1.177      else {
   1.178 -        vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
   1.179 -	hcb = vmx_vcpu_get_vtlb(vcpu);
   1.180 -        tlb = vtlb_lookup_ex (hcb, vrr.rid, gip, ISIDE_TLB );
   1.181 -        if( tlb == NULL )
   1.182 -             tlb = vtlb_lookup_ex (hcb,
   1.183 -                vrr.rid, gip, DSIDE_TLB );
   1.184 -        if (tlb) 
   1.185 -	        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
   1.186 +	    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.187 +        tlb = vtlb_lookup(hcb, gip, ISIDE_TLB);
   1.188 +//        if( tlb == NULL )
   1.189 +//             tlb = vtlb_lookup(hcb, gip, DSIDE_TLB );
   1.190 +        if (tlb)
   1.191 +	        gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
   1.192      }
   1.193      if( gpip){
   1.194  	 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
   1.195 @@ -388,236 +341,146 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
   1.196  
   1.197  IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
   1.198  {
   1.199 -
   1.200 -    thash_data_t data, *ovl;
   1.201 +    int slot;
   1.202 +    u64 ps, va;
   1.203      thash_cb_t  *hcb;
   1.204 -    search_section_t sections;
   1.205 -    ia64_rr vrr;
   1.206  
   1.207 -    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.208 -    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   1.209 -    data.itir=itir;
   1.210 -    data.vadr=PAGEALIGN(ifa,data.ps);
   1.211 -    data.tc = 1;
   1.212 -    data.cl=ISIDE_TLB;
   1.213 -    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
   1.214 -    data.rid = vrr.rid;
   1.215 -    
   1.216 -    sections.tr = 1;
   1.217 -    sections.tc = 0;
   1.218 -
   1.219 -    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
   1.220 -    while (ovl) {
   1.221 +    ps = itir_ps(itir);
   1.222 +    va = PAGEALIGN(ifa, ps);
   1.223 +    slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
   1.224 +    if (slot >=0) {
   1.225          // generate MCA.
   1.226          panic("Tlb conflict!!");
   1.227          return IA64_FAULT;
   1.228      }
   1.229 -    thash_purge_and_insert(hcb, &data, ifa);
   1.230 +    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.231 +    thash_purge_and_insert(hcb, pte, itir, ifa);
   1.232      return IA64_NO_FAULT;
   1.233  }
   1.234  
   1.235 +IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
   1.236 +{
   1.237 +    int slot;
   1.238 +    u64 ps, va, gpfn;
   1.239 +    thash_cb_t  *hcb;
   1.240 +
   1.241 +    ps = itir_ps(itir);
   1.242 +    va = PAGEALIGN(ifa, ps);
   1.243 +    slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
   1.244 +    if (slot >=0) {
   1.245 +        // generate MCA.
   1.246 +        panic("Tlb conflict!!");
   1.247 +        return IA64_FAULT;
   1.248 +    }
   1.249 +    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.250 +    gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
   1.251 +    if(__gpfn_is_io(vcpu->domain,gpfn))
   1.252 +        pte |= VTLB_PTE_IO;
   1.253 +    thash_purge_and_insert(hcb, pte, itir, ifa);
   1.254 +    return IA64_NO_FAULT;
   1.255 +
   1.256 +}
   1.257 +
   1.258  
   1.259  
   1.260  
   1.261 -IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
   1.262 +IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
   1.263  {
   1.264 -
   1.265 -    thash_data_t data, *ovl;
   1.266 +    int index;
   1.267 +    u64 ps, va, rid;
   1.268      thash_cb_t  *hcb;
   1.269 -    search_section_t sections;
   1.270 -    ia64_rr vrr;
   1.271 -
   1.272 -    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.273 -    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   1.274 -    data.itir=itir;
   1.275 -    data.vadr=PAGEALIGN(ifa,data.ps);
   1.276 -    data.tc = 1;
   1.277 -    data.cl=DSIDE_TLB;
   1.278 -    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
   1.279 -    data.rid = vrr.rid;
   1.280 -    sections.tr = 1;
   1.281 -    sections.tc = 0;
   1.282 -
   1.283 -    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
   1.284 -    if (ovl) {
   1.285 -          // generate MCA.
   1.286 -        panic("Tlb conflict!!");
   1.287 -        return IA64_FAULT;
   1.288 -    }
   1.289 -    thash_purge_and_insert(hcb, &data, ifa);
   1.290 -    return IA64_NO_FAULT;
   1.291 -}
   1.292 -
   1.293 -/*
   1.294 - * Return TRUE/FALSE for success of lock operation
   1.295 - */
   1.296 -
   1.297 -/*
   1.298 -int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
   1.299 -{
   1.300  
   1.301 -    thash_cb_t  *hcb;
   1.302 -    ia64_rr vrr;
   1.303 -    u64	  preferred_size;
   1.304 -
   1.305 -    vmx_vcpu_get_rr(vcpu, va, &vrr);
   1.306 -    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.307 -    va = PAGEALIGN(va,vrr.ps);
   1.308 -    preferred_size = PSIZE(vrr.ps);
   1.309 -    return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
   1.310 -}
   1.311 - */
   1.312 -
   1.313 -
   1.314 -
   1.315 -IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
   1.316 -{
   1.317 -
   1.318 -    thash_data_t data, *ovl;
   1.319 -    thash_cb_t  *hcb;
   1.320 -    search_section_t sections;
   1.321 -    ia64_rr vrr;
   1.322 -    /* u64 mfn,psr; */
   1.323 -
   1.324 -    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.325 -    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   1.326 -    data.itir=itir;
   1.327 -    data.vadr=PAGEALIGN(ifa,data.ps);
   1.328 -    data.tc = 0;
   1.329 -    data.cl=ISIDE_TLB;
   1.330 -    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
   1.331 -    data.rid = vrr.rid;
   1.332 -    sections.tr = 1;
   1.333 -    sections.tc = 0;
   1.334 -
   1.335 -
   1.336 -    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
   1.337 -    if (ovl) {
   1.338 +    ps = itir_ps(itir);
   1.339 +    va = PAGEALIGN(ifa, ps);
   1.340 +    index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
   1.341 +    if (index >=0) {
   1.342          // generate MCA.
   1.343          panic("Tlb conflict!!");
   1.344          return IA64_FAULT;
   1.345      }
   1.346 -    sections.tr = 0;
   1.347 -    sections.tc = 1;
   1.348 -    thash_purge_entries(hcb, &data, sections);
   1.349 -/*    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
   1.350 -        data.contiguous=1;
   1.351 -    }
   1.352 - */
   1.353 -    thash_tr_insert(hcb, &data, ifa, idx);
   1.354 -/*
   1.355 -    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
   1.356 -        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
   1.357 -        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
   1.358 -        data.ppn = xen_to_arch_ppn(mfn);
   1.359 -        psr = ia64_clear_ic();
   1.360 -        ia64_itr(0x1, IA64_ITR_GUEST_KERNEL, data.vadr, data.page_flags, data.ps);
   1.361 -        ia64_set_psr(psr);      // restore psr
   1.362 -        ia64_srlz_i();
   1.363 -//        return IA64_NO_FAULT;
   1.364 -    }
   1.365 -*/
   1.366 +    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.367 +    thash_purge_entries(hcb, va, ps);
   1.368 +    vcpu_get_rr(vcpu, va, &rid);
   1.369 +    rid = rid& RR_RID_MASK;
   1.370 +    vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, rid);
   1.371 +    vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
   1.372      return IA64_NO_FAULT;
   1.373  }
   1.374  
   1.375 -IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
   1.376 -{
   1.377 -
   1.378 -    thash_data_t data, *ovl;
   1.379 -    thash_cb_t  *hcb;
   1.380 -    search_section_t sections;
   1.381 -    ia64_rr    vrr;
   1.382 -    /* u64 mfn,psr; */
   1.383  
   1.384 -    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.385 -    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   1.386 -    data.itir=itir;
   1.387 -    data.vadr=PAGEALIGN(ifa,data.ps);
   1.388 -    data.tc = 0;
   1.389 -    data.cl=DSIDE_TLB;
   1.390 -    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
   1.391 -    data.rid = vrr.rid;
   1.392 -    sections.tr = 1;
   1.393 -    sections.tc = 0;
   1.394 +IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
   1.395 +{
   1.396 +    int index;
   1.397 +    u64 ps, va, gpfn, rid;
   1.398 +    thash_cb_t  *hcb;
   1.399  
   1.400 -    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
   1.401 -    while (ovl) {
   1.402 +    ps = itir_ps(itir);
   1.403 +    va = PAGEALIGN(ifa, ps);
   1.404 +    index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
   1.405 +    if (index>=0) {
   1.406          // generate MCA.
   1.407          panic("Tlb conflict!!");
   1.408          return IA64_FAULT;
   1.409      }
   1.410 -    sections.tr = 0;
   1.411 -    sections.tc = 1;
   1.412 -    thash_purge_entries(hcb, &data, sections);
   1.413 -/*
   1.414 -    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
   1.415 -        data.contiguous=1;
   1.416 -    }
   1.417 - */
   1.418 -    thash_tr_insert(hcb, &data, ifa, idx);
   1.419 -/*
   1.420 -    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
   1.421 -        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
   1.422 -        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
   1.423 -        data.ppn = xen_to_arch_ppn(mfn);
   1.424 -        psr = ia64_clear_ic();
   1.425 -        ia64_itr(0x2,IA64_DTR_GUEST_KERNEL , data.vadr, data.page_flags, data.ps);
   1.426 -        ia64_set_psr(psr);      // restore psr
   1.427 -        ia64_srlz_i();
   1.428 -//        return IA64_NO_FAULT;
   1.429 -    }
   1.430 -*/
   1.431 -
   1.432 +    hcb = vmx_vcpu_get_vtlb(vcpu);
   1.433 +    thash_purge_entries(hcb, va, ps);
   1.434 +    gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
   1.435 +    if(__gpfn_is_io(vcpu->domain,gpfn))
   1.436 +        pte |= VTLB_PTE_IO;
   1.437 +    vcpu_get_rr(vcpu, va, &rid);
   1.438 +    rid = rid& RR_RID_MASK;
   1.439 +    vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
   1.440 +    vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
   1.441      return IA64_NO_FAULT;
   1.442  }
   1.443  
   1.444  
   1.445  
   1.446 -IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
   1.447 +IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
   1.448  {
   1.449 +    int index;
   1.450 +    u64 va;
   1.451      thash_cb_t  *hcb;
   1.452 -    ia64_rr rr;
   1.453 -    search_section_t sections;
   1.454  
   1.455 +    va = PAGEALIGN(ifa, ps);
   1.456 +    index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
   1.457 +    if (index>=0) {
   1.458 +        vcpu->arch.dtrs[index].p=0;
   1.459 +        index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
   1.460 +    }
   1.461      hcb = vmx_vcpu_get_vtlb(vcpu);
   1.462 -    rr=vmx_vcpu_rr(vcpu,vadr);
   1.463 -    sections.tr = 1;
   1.464 -    sections.tc = 1;
   1.465 -    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
   1.466 +    thash_purge_entries(hcb, va, ps);
   1.467      return IA64_NO_FAULT;
   1.468  }
   1.469  
   1.470 -IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
   1.471 +IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
   1.472  {
   1.473 +    int index;
   1.474 +    u64 va;
   1.475      thash_cb_t  *hcb;
   1.476 -    ia64_rr rr;
   1.477 -    search_section_t sections;
   1.478 +
   1.479 +    va = PAGEALIGN(ifa, ps);
   1.480 +    index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
   1.481 +    if (index>=0) {
   1.482 +        vcpu->arch.itrs[index].p=0;
   1.483 +        index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
   1.484 +    }
   1.485      hcb = vmx_vcpu_get_vtlb(vcpu);
   1.486 -    rr=vmx_vcpu_rr(vcpu,vadr);
   1.487 -    sections.tr = 1;
   1.488 -    sections.tc = 1;
   1.489 -    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
   1.490 +    thash_purge_entries(hcb, va, ps);
   1.491      return IA64_NO_FAULT;
   1.492  }
   1.493  
   1.494 -IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
   1.495 +IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
   1.496  {
   1.497      thash_cb_t  *hcb;
   1.498 -    ia64_rr vrr;
   1.499 -    search_section_t sections;
   1.500 +    va = PAGEALIGN(va, ps);
   1.501      hcb = vmx_vcpu_get_vtlb(vcpu);
   1.502 -    vrr=vmx_vcpu_rr(vcpu,vadr);
   1.503 -    sections.tr = 0;
   1.504 -    sections.tc = 1;
   1.505 -    vadr = PAGEALIGN(vadr, ps);
   1.506 -
   1.507 -    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
   1.508 -    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB);
   1.509 +    thash_purge_entries(hcb, va, ps);
   1.510      return IA64_NO_FAULT;
   1.511  }
   1.512  
   1.513  
   1.514 -IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
   1.515 +IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
   1.516  {
   1.517      thash_cb_t  *hcb;
   1.518      hcb = vmx_vcpu_get_vtlb(vcpu);
   1.519 @@ -625,15 +488,15 @@ IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UIN
   1.520      return IA64_NO_FAULT;
   1.521  }
   1.522  
   1.523 -IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps)
   1.524 +IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
   1.525  {
   1.526 -    vmx_vcpu_ptc_l(vcpu, vadr, ps);
   1.527 +    vmx_vcpu_ptc_l(vcpu, va, ps);
   1.528      return IA64_ILLOP_FAULT;
   1.529  }
   1.530  
   1.531 -IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps)
   1.532 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
   1.533  {
   1.534 -    vmx_vcpu_ptc_l(vcpu, vadr, ps);
   1.535 +    vmx_vcpu_ptc_l(vcpu, va, ps);
   1.536      return IA64_NO_FAULT;
   1.537  }
   1.538  
   1.539 @@ -644,7 +507,7 @@ IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UIN
   1.540      ia64_rr vrr;
   1.541      u64 vhpt_offset;
   1.542      vmx_vcpu_get_pta(vcpu, &vpta.val);
   1.543 -    vrr=vmx_vcpu_rr(vcpu, vadr);
   1.544 +    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
   1.545      if(vpta.vf){
   1.546          panic("THASH,Don't support long format VHPT");
   1.547          *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
   1.548 @@ -663,7 +526,7 @@ IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT
   1.549      ia64_rr vrr;
   1.550      PTA vpta;
   1.551      vmx_vcpu_get_pta(vcpu, &vpta.val);
   1.552 -    vrr=vmx_vcpu_rr(vcpu, vadr);
   1.553 +    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
   1.554      if(vpta.vf){
   1.555          panic("THASH,Don't support long format VHPT");
   1.556          *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
   1.557 @@ -679,13 +542,11 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
   1.558  {
   1.559      thash_data_t *data;
   1.560      thash_cb_t  *hcb;
   1.561 -    ia64_rr vrr;
   1.562      ISR visr,pt_isr;
   1.563      REGS *regs;
   1.564      u64 vhpt_adr;
   1.565      IA64_PSR vpsr;
   1.566      hcb = vmx_vcpu_get_vtlb(vcpu);
   1.567 -    vrr=vmx_vcpu_rr(vcpu,vadr);
   1.568      regs=vcpu_regs(vcpu);
   1.569      pt_isr.val=VMX(vcpu,cr_isr);
   1.570      visr.val=0;
   1.571 @@ -696,7 +557,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
   1.572           visr.ni=1;
   1.573      }
   1.574      visr.na=1;
   1.575 -    data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB);
   1.576 +    data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
   1.577      if(data){
   1.578          if(data->p==0){
   1.579              visr.na=1;
   1.580 @@ -744,8 +605,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
   1.581          }
   1.582          else{
   1.583              vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
   1.584 -            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
   1.585 -            data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
   1.586 +            data = vtlb_lookup(hcb, vhpt_adr, DSIDE_TLB);
   1.587              if(data){
   1.588                  if(vpsr.ic){
   1.589                      vcpu_set_isr(vcpu, visr.val);
   1.590 @@ -776,7 +636,6 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT6
   1.591  {
   1.592      thash_data_t *data;
   1.593      thash_cb_t  *hcb;
   1.594 -    ia64_rr rr;
   1.595      PTA vpta;
   1.596      vmx_vcpu_get_pta(vcpu, &vpta.val);
   1.597      if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
   1.598 @@ -784,8 +643,7 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT6
   1.599          return IA64_NO_FAULT;
   1.600      }
   1.601      hcb = vmx_vcpu_get_vtlb(vcpu);
   1.602 -    rr=vmx_vcpu_rr(vcpu,vadr);
   1.603 -    data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB);
   1.604 +    data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
   1.605      if(!data||!data->p){
   1.606          *key=1;
   1.607      }else{
   1.608 @@ -821,11 +679,9 @@ long
   1.609      unsigned long	end;	/* end of the area mapped by current entry */
   1.610      thash_data_t	*entry;
   1.611      struct vcpu *v = current;
   1.612 -    ia64_rr	vrr;
   1.613  
   1.614      vtlb = vmx_vcpu_get_vtlb(v); 
   1.615 -    vrr = vmx_vcpu_rr(v, va);
   1.616 -    entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
   1.617 +    entry = vtlb_lookup(vtlb, va, DSIDE_TLB);
   1.618      if (entry == NULL)
   1.619  	return -EFAULT;
   1.620  
     2.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c	Fri Mar 10 08:25:54 2006 -0700
     2.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c	Fri Mar 10 08:52:12 2006 -0700
     2.3 @@ -36,7 +36,7 @@
     2.4  #include <xen/domain.h>
     2.5  
     2.6  extern long do_sched_op(int cmd, unsigned long arg);
     2.7 -
     2.8 +extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
     2.9  
    2.10  void hyper_not_support(void)
    2.11  {
    2.12 @@ -126,7 +126,7 @@ void hyper_xen_version(void)
    2.13      vcpu_set_gr(vcpu, 8, ret, 0);
    2.14      vmx_vcpu_increment_iip(vcpu);
    2.15  }
    2.16 -
    2.17 +/*
    2.18  static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
    2.19  {
    2.20      ia64_rr rr;
    2.21 @@ -135,7 +135,7 @@ static int do_lock_page(VCPU *vcpu, u64 
    2.22      rr = vmx_vcpu_rr(vcpu, va);
    2.23      return thash_lock_tc(hcb, va ,1U<<rr.ps, rr.rid, DSIDE_TLB, lock);
    2.24  }
    2.25 -
    2.26 + */
    2.27  /*
    2.28   * Lock guest page in vTLB, so that it's not relinquished by recycle
    2.29   * session when HV is servicing that hypercall.
     3.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Fri Mar 10 08:25:54 2006 -0700
     3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Fri Mar 10 08:52:12 2006 -0700
     3.3 @@ -96,7 +96,7 @@ identify_vmx_feature(void)
     3.4  	if (!(vp_env_info & VP_OPCODE))
     3.5  		printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
     3.6  	vm_order = get_order(buffer_size);
     3.7 -	printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
     3.8 +	printk("vm buffer size: %ld, order: %d\n", buffer_size, vm_order);
     3.9  
    3.10  	vmx_enabled = 1;
    3.11  no_vti:
    3.12 @@ -161,7 +161,7 @@ static vpd_t *alloc_vpd(void)
    3.13  		return NULL;
    3.14  	}
    3.15  
    3.16 -	printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
    3.17 +	printk("vpd base: 0x%lp, vpd size:%ld\n", vpd, sizeof(vpd_t));
    3.18  	memset(vpd, 0, VPD_SIZE);
    3.19  	/* CPUID init */
    3.20  	for (i = 0; i < 5; i++)
    3.21 @@ -234,7 +234,7 @@ vmx_load_state(struct vcpu *v)
    3.22  {
    3.23  	u64 status;
    3.24  
    3.25 -	status = ia64_pal_vp_restore(v->arch.privregs, 0);
    3.26 +	status = ia64_pal_vp_restore((u64)v->arch.privregs, 0);
    3.27  	if (status != PAL_STATUS_SUCCESS)
    3.28  		panic("Restore vp status failed\n");
    3.29  
    3.30 @@ -307,7 +307,6 @@ io_range_t io_ranges[] = {
    3.31  
    3.32  int vmx_alloc_contig_pages(struct domain *d)
    3.33  {
    3.34 -	unsigned int order;
    3.35  	unsigned long i, j, start,tmp, end, pgnr, conf_nr;
    3.36  	struct page_info *page;
    3.37  	struct vcpu *v = d->vcpu[0];
     4.1 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c	Fri Mar 10 08:25:54 2006 -0700
     4.2 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c	Fri Mar 10 08:52:12 2006 -0700
     4.3 @@ -128,6 +128,6 @@ vmx_ia64_handle_irq (ia64_vector vector,
     4.4  	 * come through until ia64_eoi() has been done.
     4.5  	 */
     4.6  	vmx_irq_exit();
     4.7 -	if (current && wake_dom0 != dom0 ) 
     4.8 +	if (wake_dom0 && current->domain != dom0 ) 
     4.9  		vcpu_wake(dom0->vcpu[0]);
    4.10  }
     5.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Mar 10 08:25:54 2006 -0700
     5.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Mar 10 08:52:12 2006 -0700
     5.3 @@ -218,7 +218,7 @@ vmx_load_all_rr(VCPU *vcpu)
     5.4      extern void * pal_vaddr;
     5.5      vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info,
     5.6                  (void *)vcpu->arch.privregs,
     5.7 -                ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
     5.8 +                (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
     5.9      ia64_set_pta(vcpu->arch.arch_vmx.mpta);
    5.10  
    5.11  	ia64_srlz_d();
    5.12 @@ -260,10 +260,10 @@ switch_to_virtual_rid(VCPU *vcpu)
    5.13  
    5.14      psr=ia64_clear_ic();
    5.15  
    5.16 -    mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
    5.17 +    vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
    5.18      ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
    5.19      ia64_srlz_d();
    5.20 -    mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
    5.21 +    vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
    5.22      ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
    5.23      ia64_srlz_d();
    5.24      ia64_set_psr(psr);
     6.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Fri Mar 10 08:25:54 2006 -0700
     6.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Fri Mar 10 08:52:12 2006 -0700
     6.3 @@ -292,10 +292,9 @@ IA64FAULT
     6.4  vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
     6.5  {
     6.6      IA64_PSR vpsr;
     6.7 -    CACHE_LINE_TYPE type=ISIDE_TLB;
     6.8 +    int type=ISIDE_TLB;
     6.9      u64 vhpt_adr, gppa;
    6.10      ISR misr;
    6.11 -    ia64_rr vrr;
    6.12  //    REGS *regs;
    6.13      thash_cb_t *vtlb;
    6.14      thash_data_t *data;
    6.15 @@ -330,16 +329,17 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    6.16          physical_tlb_miss(v, vadr, vec);
    6.17          return IA64_FAULT;
    6.18      }
    6.19 -    vrr = vmx_vcpu_rr(v, vadr);
    6.20      if(vec == 1) type = ISIDE_TLB;
    6.21      else if(vec == 2) type = DSIDE_TLB;
    6.22      else panic("wrong vec\n");
    6.23  
    6.24  //    prepare_if_physical_mode(v);
    6.25  
    6.26 -    if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
    6.27 -	gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
    6.28 -        if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
    6.29 +    if((data=vtlb_lookup(vtlb, vadr,type))!=0){
    6.30 +//	gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
    6.31 +//        if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
    6.32 +        if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
    6.33 +        	gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
    6.34              emulate_io_inst(v, gppa, data->ma);
    6.35              return IA64_FAULT;
    6.36          }
    6.37 @@ -353,7 +353,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    6.38          }
    6.39          else{
    6.40   */
    6.41 -            thash_vhpt_insert(vtlb->ts->vhpt,data,vadr);
    6.42 +            thash_vhpt_insert(vtlb->vhpt,data->page_flags, data->itir ,vadr);
    6.43  //        }
    6.44  //	    }
    6.45      }else if(type == DSIDE_TLB){
    6.46 @@ -374,8 +374,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    6.47              }
    6.48          } else{
    6.49              vmx_vcpu_thash(v, vadr, &vhpt_adr);
    6.50 -            vrr=vmx_vcpu_rr(v,vhpt_adr);
    6.51 -            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB)){
    6.52 +            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup(vtlb, vhpt_adr, DSIDE_TLB)){
    6.53                  if(vpsr.ic){
    6.54                      vcpu_set_isr(v, misr.val);
    6.55                      dtlb_fault(v, vadr);
    6.56 @@ -417,8 +416,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    6.57              return IA64_FAULT;
    6.58          } else{
    6.59              vmx_vcpu_thash(v, vadr, &vhpt_adr);
    6.60 -            vrr=vmx_vcpu_rr(v,vhpt_adr);
    6.61 -            if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB)){
    6.62 +            if(vhpt_lookup(vhpt_adr) || vtlb_lookup(vtlb, vhpt_adr, DSIDE_TLB)){
    6.63                  if(!vpsr.ic){
    6.64                      misr.ni=1;
    6.65                  }
     7.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Fri Mar 10 08:25:54 2006 -0700
     7.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Fri Mar 10 08:52:12 2006 -0700
     7.3 @@ -204,32 +204,24 @@ vmx_vcpu_get_plat(VCPU *vcpu)
     7.4  }
     7.5  
     7.6  
     7.7 -ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
     7.8 -{
     7.9 -        return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
    7.10 -}
    7.11 -
    7.12  
    7.13  IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
    7.14  {
    7.15      ia64_rr oldrr,newrr;
    7.16      thash_cb_t *hcb;
    7.17      extern void * pal_vaddr;
    7.18 -    oldrr=vmx_vcpu_rr(vcpu,reg);
    7.19 +    vcpu_get_rr(vcpu, reg, &oldrr.rrval);
    7.20      newrr.rrval=val;
    7.21 -#if 1
    7.22      if(oldrr.ps!=newrr.ps){
    7.23          hcb = vmx_vcpu_get_vtlb(vcpu);
    7.24          thash_purge_all(hcb);
    7.25      }
    7.26 -#endif
    7.27      VMX(vcpu,vrr[reg>>61]) = val;
    7.28 -
    7.29      switch((u64)(reg>>61)) {
    7.30      case VRN7:
    7.31 -       vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
    7.32 +        vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
    7.33          (void *)vcpu->arch.privregs,
    7.34 -       ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
    7.35 +        (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
    7.36         break;
    7.37      default:
    7.38          ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
    7.39 @@ -275,7 +267,7 @@ check_entry(u64 va, u64 ps, char *str)
    7.40  u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
    7.41  {
    7.42      ia64_rr rr,rr1;
    7.43 -    rr=vmx_vcpu_rr(vcpu,ifa);
    7.44 +    vcpu_get_rr(vcpu,ifa,&rr.rrval);
    7.45      rr1.rrval=0;
    7.46      rr1.ps=rr.ps;
    7.47      rr1.rid=rr.rid;
     8.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Fri Mar 10 08:25:54 2006 -0700
     8.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Fri Mar 10 08:52:12 2006 -0700
     8.3 @@ -572,7 +572,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
     8.4     }
     8.5  #endif // VMAL_NO_FAULT_CHECK
     8.6  
     8.7 -    return (vmx_vcpu_itr_d(vcpu,pte,itir,ifa,slot));
     8.8 +    return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
     8.9  }
    8.10  
    8.11  IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
    8.12 @@ -631,7 +631,7 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
    8.13     }
    8.14  #endif // VMAL_NO_FAULT_CHECK
    8.15  
    8.16 -   return (vmx_vcpu_itr_i(vcpu,pte,itir,ifa,slot));
    8.17 +   return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
    8.18  }
    8.19  
    8.20  IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
    8.21 @@ -972,7 +972,7 @@ IA64FAULT vmx_emul_mov_from_rr(VCPU *vcp
    8.22          rsv_reg_field(vcpu);
    8.23      }
    8.24  #endif  //CHECK_FAULT
    8.25 -    vmx_vcpu_get_rr(vcpu,r3,&r1);
    8.26 +    vcpu_get_rr(vcpu,r3,&r1);
    8.27      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
    8.28  }
    8.29  
     9.1 --- a/xen/arch/ia64/vmx/vtlb.c	Fri Mar 10 08:25:54 2006 -0700
     9.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Fri Mar 10 08:52:12 2006 -0700
     9.3 @@ -32,7 +32,7 @@
     9.4  #include <asm/tlbflush.h>
     9.5  #define  MAX_CCH_LENGTH     40
     9.6  
     9.7 -thash_data_t *__alloc_chain(thash_cb_t *, thash_data_t *);
     9.8 +thash_data_t *__alloc_chain(thash_cb_t *);
     9.9  
    9.10  static void cch_mem_init(thash_cb_t *hcb)
    9.11  {
    9.12 @@ -71,36 +71,25 @@ static void cch_free(thash_cb_t *hcb, th
    9.13   * Check to see if the address rid:va is translated by the TLB
    9.14   */
    9.15  
    9.16 -static int __is_tr_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl)
    9.17 +static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
    9.18  {
    9.19 -    u64  size;
    9.20 -    size = PSIZE(tlb->ps);
    9.21 -    if(tlb->vadr&(size-1))
    9.22 -        while(1);
    9.23 -    if ((tlb->rid == rid) && ((va-tlb->vadr)<size))
    9.24 -        return 1;
    9.25 -    else
    9.26 -        return 0;
    9.27 +    return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr)<PSIZE(trp->ps)));
    9.28  }
    9.29  
    9.30  /*
    9.31   * Only for GUEST TR format.
    9.32   */
    9.33  static int
    9.34 -__is_tr_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva)
    9.35 +__is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
    9.36  {
    9.37 -    uint64_t size, sa1, ea1;
    9.38 +    uint64_t sa1, ea1;
    9.39  
    9.40 -//    if ( entry->invalid || entry->rid != rid || (entry->cl != cl ) ) {
    9.41 -    if ( entry->invalid || entry->rid != rid ) {
    9.42 +    if (!trp->p || trp->rid != rid ) {
    9.43          return 0;
    9.44      }
    9.45 -    size = PSIZE(entry->ps);
    9.46 -    sa1 = entry->vadr;
    9.47 -    ea1 = sa1 + size -1;
    9.48 +    sa1 = trp->vadr;
    9.49 +    ea1 = sa1 + PSIZE(trp->ps) -1;
    9.50      eva -= 1;
    9.51 -    if(sa1&(size-1))
    9.52 -        while(1);
    9.53      if ( (sva>ea1) || (sa1>eva) )
    9.54          return 0;
    9.55      else
    9.56 @@ -108,90 +97,6 @@ static int
    9.57  
    9.58  }
    9.59  
    9.60 -static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
    9.61 -{
    9.62 -/*
    9.63 -    if ( hcb->remove_notifier ) {
    9.64 -        (hcb->remove_notifier)(hcb,tr);
    9.65 -    }
    9.66 -*/
    9.67 -    tr->invalid = 1;
    9.68 -}
    9.69 -
    9.70 -static inline void __set_tr (thash_data_t *tr, thash_data_t *data, int idx)
    9.71 -{
    9.72 -    *tr = *data;
    9.73 -    tr->tr_idx = idx;
    9.74 -}
    9.75 -
    9.76 -
    9.77 -static void __init_tr(thash_cb_t *hcb)
    9.78 -{
    9.79 -    int i;
    9.80 -    thash_data_t *tr;
    9.81 -
    9.82 -    for ( i=0, tr = &ITR(hcb,0); i<NITRS; i++ ) {
    9.83 -        tr[i].invalid = 1;
    9.84 -    }
    9.85 -    for ( i=0, tr = &DTR(hcb,0); i<NDTRS; i++ ) {
    9.86 -        tr[i].invalid = 1;
    9.87 -    }
    9.88 -}
    9.89 -
    9.90 -/*
    9.91 - * Replace TR entry.
    9.92 - */
    9.93 -static void rep_tr(thash_cb_t *hcb,thash_data_t *insert, int idx)
    9.94 -{
    9.95 -    thash_data_t *tr;
    9.96 -
    9.97 -    if ( insert->cl == ISIDE_TLB ) {
    9.98 -        tr = &ITR(hcb,idx);
    9.99 -    }
   9.100 -    else {
   9.101 -        tr = &DTR(hcb,idx);
   9.102 -    }
   9.103 -    if ( !INVALID_TR(tr) ) {
   9.104 -        __rem_tr(hcb, tr);
   9.105 -    }
   9.106 -    __set_tr (tr, insert, idx);
   9.107 -}
   9.108 -
   9.109 -/*
   9.110 - * remove TR entry.
   9.111 - */
   9.112 -/*
   9.113 -static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
   9.114 -{
   9.115 -    thash_data_t *tr;
   9.116 -
   9.117 -    if ( cl == ISIDE_TLB ) {
   9.118 -        tr = &ITR(hcb,idx);
   9.119 -    }
   9.120 -    else {
   9.121 -        tr = &DTR(hcb,idx);
   9.122 -    }
   9.123 -    if ( !INVALID_TR(tr) ) {
   9.124 -        __rem_tr(hcb, tr);
   9.125 -    }
   9.126 -}
   9.127 - */
   9.128 -/*
   9.129 - * Delete an thash entry in collision chain.
   9.130 - *  prev: the previous entry.
   9.131 - *  rem: the removed entry.
   9.132 - */
   9.133 -/*
   9.134 -static void __rem_chain(thash_cb_t *hcb, thash_data_t *prev, thash_data_t *rem)
   9.135 -{
   9.136 -    //prev->next = rem->next;
   9.137 -    if ( hcb->remove_notifier ) {
   9.138 -         (hcb->remove_notifier)(hcb,rem);
   9.139 -    }
   9.140 -    cch_free (hcb, rem);
   9.141 -}
   9.142 - */
   9.143 -
   9.144  /*
   9.145   * Delete an thash entry leading collision chain.
   9.146   */
   9.147 @@ -212,71 +117,37 @@ static void __rem_hash_head(thash_cb_t *
   9.148      }
   9.149  }
   9.150  
   9.151 -thash_data_t *__vtr_lookup(thash_cb_t *hcb,
   9.152 -            u64 rid, u64 va,
   9.153 -            CACHE_LINE_TYPE cl)
   9.154 +thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
   9.155  {
   9.156 -    thash_data_t    *tr;
   9.157 -    int   num,i;
   9.158  
   9.159 -    if ( cl == ISIDE_TLB ) {
   9.160 -        tr = &ITR(hcb,0);
   9.161 -        num = NITRS;
   9.162 +    thash_data_t  *trp;
   9.163 +    int  i;
   9.164 +    u64 rid;
   9.165 +    vcpu_get_rr(vcpu, va, &rid);
   9.166 +    rid = rid&RR_RID_MASK;;
   9.167 +    if (is_data) {
   9.168 +        if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
   9.169 +            for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
   9.170 +                if (__is_tr_translated(trp, rid, va)) {
   9.171 +                    return trp;
   9.172 +                }
   9.173 +            }
   9.174 +        }
   9.175      }
   9.176      else {
   9.177 -        tr = &DTR(hcb,0);
   9.178 -        num = NDTRS;
   9.179 -    }
   9.180 -    for ( i=0; i<num; i++ ) {
   9.181 -        if ( !INVALID_TR(&tr[i]) &&
   9.182 -            __is_tr_translated(&tr[i], rid, va, cl) )
   9.183 -            return &tr[i];
   9.184 +        if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
   9.185 +            for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
   9.186 +                if (__is_tr_translated(trp, rid, va)) {
   9.187 +                    return trp;
   9.188 +                }
   9.189 +            }
   9.190 +        }
   9.191      }
   9.192      return NULL;
   9.193  }
   9.194  
   9.195  
   9.196  /*
   9.197 - * Find overlap VHPT entry within current collision chain
   9.198 - * base on internal priv info.
   9.199 - */
   9.200 -/*
   9.201 -static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
   9.202 -{
   9.203 -    thash_data_t    *cch;
   9.204 -    thash_internal_t *priv = &hcb->priv;
   9.205 -
   9.206 -
   9.207 -    for (cch=priv->cur_cch; cch; cch = cch->next) {
   9.208 -        if ( priv->tag == cch->etag  ) {
   9.209 -            return cch;
   9.210 -        }
   9.211 -    }
   9.212 -    return NULL;
   9.213 -}
   9.214 -*/
   9.215 -/*
   9.216 - * Find overlap TLB/VHPT entry within current collision chain
   9.217 - * base on internal priv info.
   9.218 - */
   9.219 -/*
   9.220 -static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
   9.221 -{
   9.222 -    thash_data_t    *cch;
   9.223 -    thash_internal_t *priv = &hcb->priv;
   9.224 -
   9.225 -    // Find overlap TLB entry
   9.226 -    for (cch=priv->cur_cch; cch; cch = cch->next) {
   9.227 -        if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
   9.228 -            __is_translated( cch, priv->rid, priv->_curva, priv->cl)) {
   9.229 -            return cch;
   9.230 -        }
   9.231 -    }
   9.232 -    return NULL;
   9.233 -}
   9.234 - */
   9.235 -
   9.236 -/*
   9.237   * Get the machine format of VHPT entry.
   9.238   *    PARAS:
   9.239   *  1: tlb: means the tlb format hash entry converting to VHPT.
   9.240 @@ -292,24 +163,16 @@ static thash_data_t *_vtlb_next_overlap_
   9.241   *  0/1: means successful or fail.
   9.242   *
   9.243   */
   9.244 -int __tlb_to_vhpt(thash_cb_t *hcb,
   9.245 -            thash_data_t *tlb, u64 va,
   9.246 -            thash_data_t *vhpt)
   9.247 +int __tlb_to_vhpt(thash_cb_t *hcb, thash_data_t *vhpt, u64 va)
   9.248  {
   9.249      u64 padr,pte;
   9.250 -//    ia64_rr vrr;
   9.251      ASSERT ( hcb->ht == THASH_VHPT );
   9.252 -//    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
   9.253 -    padr = tlb->ppn >>(tlb->ps-ARCH_PAGE_SHIFT)<<tlb->ps;
   9.254 -    padr += va&((1UL<<tlb->ps)-1);
   9.255 +    padr = vhpt->ppn >>(vhpt->ps-ARCH_PAGE_SHIFT)<<vhpt->ps;
   9.256 +    padr += va&((1UL<<vhpt->ps)-1);
   9.257      pte=lookup_domain_mpa(current->domain,padr);
   9.258      if((pte>>56))
   9.259          return 0;
   9.260 -    // TODO with machine discontinuous address space issue.
   9.261      vhpt->etag = ia64_ttag(va);
   9.262 -    //vhpt->ti = 0;
   9.263 -    vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
   9.264 -    vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
   9.265      vhpt->ps = PAGE_SHIFT;
   9.266      vhpt->ppn = (pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
   9.267      vhpt->next = 0;
   9.268 @@ -331,17 +194,20 @@ static void thash_remove_cch(thash_cb_t 
   9.269  
   9.270  /*  vhpt only has entries with PAGE_SIZE page size */
   9.271  
   9.272 -void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   9.273 +void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
   9.274  {
   9.275      thash_data_t   vhpt_entry, *hash_table, *cch;
   9.276 +    vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK;
   9.277 +    vhpt_entry.itir=itir;
   9.278 +
   9.279  //    ia64_rr vrr;
   9.280  
   9.281 -    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
   9.282 +    if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) {
   9.283          return;
   9.284      //panic("Can't convert to machine VHPT entry\n");
   9.285      }
   9.286  
   9.287 -    hash_table = (thash_data_t *)ia64_thash(va);
   9.288 +    hash_table = (thash_data_t *)ia64_thash(ifa);
   9.289      if( INVALID_VHPT(hash_table) ) {
   9.290          *hash_table = vhpt_entry;
   9.291          hash_table->next = 0;
   9.292 @@ -358,6 +224,7 @@ void thash_vhpt_insert(thash_cb_t *hcb, 
   9.293          }
   9.294          cch = cch->next;
   9.295      }
   9.296 +
   9.297      if(hash_table->len>=MAX_CCN_DEPTH){
   9.298      	thash_remove_cch(hcb, hash_table);
   9.299      	cch = cch_alloc(hcb);
   9.300 @@ -367,9 +234,9 @@ void thash_vhpt_insert(thash_cb_t *hcb, 
   9.301          hash_table->next = cch;
   9.302      	return;
   9.303      }
   9.304 -	
   9.305 +
   9.306      // TODO: Add collision chain length limitation.
   9.307 -     cch = __alloc_chain(hcb,entry);
   9.308 +     cch = __alloc_chain(hcb);
   9.309       if(cch == NULL){
   9.310             *hash_table = vhpt_entry;
   9.311              hash_table->next = 0;
   9.312 @@ -377,10 +244,8 @@ void thash_vhpt_insert(thash_cb_t *hcb, 
   9.313              *cch = *hash_table;
   9.314              *hash_table = vhpt_entry;
   9.315              hash_table->next = cch;
   9.316 -	    hash_table->len = cch->len + 1;
   9.317 -	    cch->len = 0;	
   9.318 -//            if(hash_table->tag==hash_table->next->tag)
   9.319 -//                while(1);
   9.320 +    	    hash_table->len = cch->len + 1;
   9.321 +    	    cch->len = 0;
   9.322  
   9.323      }
   9.324      return /*hash_table*/;
   9.325 @@ -414,7 +279,7 @@ static void vtlb_purge(thash_cb_t *hcb, 
   9.326      thash_data_t *hash_table, *prev, *next;
   9.327      u64 start, end, size, tag, rid;
   9.328      ia64_rr vrr;
   9.329 -    vrr=vmx_vcpu_rr(current, va);
   9.330 +    vcpu_get_rr(current, va, &vrr.rrval);
   9.331      rid = vrr.rid;
   9.332      size = PSIZE(ps);
   9.333      start = va & (-size);
   9.334 @@ -480,36 +345,6 @@ static void vhpt_purge(thash_cb_t *hcb, 
   9.335      }
   9.336      machine_tlb_purge(va, ps);
   9.337  }
   9.338 -/*
   9.339 - * Insert an entry to hash table. 
   9.340 - *    NOTES:
   9.341 - *  1: TLB entry may be TR, TC or Foreign Map. For TR entry,
   9.342 - *     itr[]/dtr[] need to be updated too.
   9.343 - *  2: Inserting to collision chain may trigger recycling if 
   9.344 - *     the buffer for collision chain is empty.
   9.345 - *  3: The new entry is inserted at the next of hash table.
   9.346 - *     (I.e. head of the collision chain)
   9.347 - *  4: The buffer holding the entry is allocated internally
   9.348 - *     from cch_buf or just in the hash table.
   9.349 - *  5: Return the entry in hash table or collision chain.
   9.350 - *  6: Input parameter, entry, should be in TLB format.
   9.351 - *      I.e. Has va, rid, ps...
   9.352 - *  7: This API is invoked by emulating ITC/ITR and tlb_miss.
   9.353 - *
   9.354 - */
   9.355 -
   9.356 -void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
   9.357 -{
   9.358 -    if ( hcb->ht != THASH_TLB || entry->tc ) {
   9.359 -        panic("wrong parameter\n");
   9.360 -    }
   9.361 -    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
   9.362 -    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
   9.363 -    rep_tr(hcb, entry, idx);
   9.364 -//    thash_vhpt_insert(hcb->ts->vhpt, entry, va);
   9.365 -    return ;
   9.366 -}
   9.367 -
   9.368  
   9.369  /*
   9.370   * Recycle all collisions chain in VTLB or VHPT.
   9.371 @@ -525,8 +360,8 @@ void thash_recycle_cch(thash_cb_t *hcb)
   9.372          thash_remove_cch(hcb,hash_table);
   9.373      }
   9.374  }
   9.375 -/*
   9.376 -thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
   9.377 +
   9.378 +thash_data_t *__alloc_chain(thash_cb_t *hcb)
   9.379  {
   9.380      thash_data_t *cch;
   9.381  
   9.382 @@ -537,23 +372,6 @@ thash_data_t *vtlb_alloc_chain(thash_cb_
   9.383      }
   9.384      return cch;
   9.385  }
   9.386 -*/
   9.387 -
   9.388 -thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
   9.389 -{
   9.390 -    thash_data_t *cch;
   9.391 -
   9.392 -    cch = cch_alloc(hcb);
   9.393 -    if(cch == NULL){
   9.394 -        // recycle
   9.395 -//        if ( hcb->recycle_notifier ) {
   9.396 -//                hcb->recycle_notifier(hcb,(u64)entry);
   9.397 -//        }
   9.398 -        thash_recycle_cch(hcb);
   9.399 -        cch = cch_alloc(hcb);
   9.400 -    }
   9.401 -    return cch;
   9.402 -}
   9.403  
   9.404  /*
   9.405   * Insert an entry into hash TLB or VHPT.
   9.406 @@ -564,474 +382,117 @@ thash_data_t *__alloc_chain(thash_cb_t *
   9.407   *  3: The caller need to make sure the new entry will not overlap
   9.408   *     with any existed entry.
   9.409   */
   9.410 -void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   9.411 +void vtlb_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 va)
   9.412  {
   9.413      thash_data_t    *hash_table, *cch;
   9.414      /* int flag; */
   9.415      ia64_rr vrr;
   9.416      /* u64 gppn, ppns, ppne; */
   9.417 -    u64 tag;
   9.418 -    vrr=vmx_vcpu_rr(current, va);
   9.419 -    if (vrr.ps != entry->ps) {
   9.420 +    u64 tag, ps;
   9.421 +    ps = itir_ps(itir);
   9.422 +    vcpu_get_rr(current, va, &vrr.rrval);
   9.423 +    if (vrr.ps != ps) {
   9.424  //        machine_tlb_insert(hcb->vcpu, entry);
   9.425      	panic("not preferred ps with va: 0x%lx\n", va);
   9.426      	return;
   9.427      }
   9.428 -    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
   9.429 -    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
   9.430      hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
   9.431 -    entry->etag = tag;
   9.432      if( INVALID_TLB(hash_table) ) {
   9.433 -        *hash_table = *entry;
   9.434 +        hash_table->page_flags = pte;
   9.435 +        hash_table->itir=itir;
   9.436 +        hash_table->etag=tag;
   9.437          hash_table->next = 0;
   9.438      }
   9.439      else if (hash_table->len>=MAX_CCN_DEPTH){
   9.440          thash_remove_cch(hcb, hash_table);
   9.441          cch = cch_alloc(hcb);
   9.442          *cch = *hash_table;
   9.443 -        *hash_table = *entry;
   9.444 +        hash_table->page_flags = pte;
   9.445 +        hash_table->itir=itir;
   9.446 +        hash_table->etag=tag;
   9.447          hash_table->len = 1;
   9.448          hash_table->next = cch;
   9.449      }
   9.450 +
   9.451      else {
   9.452          // TODO: Add collision chain length limitation.
   9.453 -        cch = __alloc_chain(hcb,entry);
   9.454 +        cch = __alloc_chain(hcb);
   9.455          if(cch == NULL){
   9.456 -            *hash_table = *entry;
   9.457 +            hash_table->page_flags = pte;
   9.458 +            hash_table->itir=itir;
   9.459 +            hash_table->etag=tag;
   9.460              hash_table->next = 0;
   9.461          }else{
   9.462              *cch = *hash_table;
   9.463 -            *hash_table = *entry;
   9.464 +            hash_table->page_flags = pte;
   9.465 +            hash_table->itir=itir;
   9.466 +            hash_table->etag=tag;
   9.467              hash_table->next = cch;
   9.468              hash_table->len = cch->len + 1;
   9.469              cch->len = 0;
   9.470          }
   9.471      }
   9.472 -#if 0
   9.473 -    if(hcb->vcpu->domain->domain_id==0){
   9.474 -       thash_insert(hcb->ts->vhpt, entry, va);
   9.475 -        return;
   9.476 -    }
   9.477 -#endif
   9.478 -/*
   9.479 -    flag = 1;
   9.480 -    gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
   9.481 -    ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
   9.482 -    ppne = ppns + PSIZE(entry->ps);
   9.483 -    if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
   9.484 -        flag = 0;
   9.485 -    if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
   9.486 -       thash_insert(hcb->ts->vhpt, entry, va);
   9.487 -*/
   9.488      return ;
   9.489  }
   9.490  
   9.491  
   9.492 -/*
   9.493 -void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   9.494 -{
   9.495 -    thash_data_t    *hash_table;
   9.496 -    ia64_rr vrr;
   9.497 -    
   9.498 -    vrr = vmx_vcpu_rr(hcb->vcpu,entry->vadr);
   9.499 -    if ( entry->ps != vrr.ps && entry->tc ) {
   9.500 -        panic("Not support for multiple page size now\n");
   9.501 -    }
   9.502 -    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
   9.503 -    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
   9.504 -    (hcb->ins_hash)(hcb, entry, va);
   9.505 -    
   9.506 -}
   9.507 -*/
   9.508 -/*
   9.509 -static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
   9.510 +int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
   9.511  {
   9.512 -    thash_data_t    *hash_table, *p, *q;
   9.513 -    thash_internal_t *priv = &hcb->priv;
   9.514 -    int idx;
   9.515 -
   9.516 -    hash_table = priv->hash_base;
   9.517 -    if ( hash_table == entry ) {
   9.518 -//        if ( PURGABLE_ENTRY(hcb, entry) ) {
   9.519 -            __rem_hash_head (hcb, entry);
   9.520 -//        }
   9.521 -        return ;
   9.522 -    }
   9.523 -    // remove from collision chain
   9.524 -    p = hash_table;
   9.525 -    for ( q=p->next; q; q = p->next ) {
   9.526 -        if ( q == entry ){
   9.527 -//            if ( PURGABLE_ENTRY(hcb,q ) ) {
   9.528 -                p->next = q->next;
   9.529 -                __rem_chain(hcb, entry);
   9.530 -                hash_table->len--;
   9.531 -//            }
   9.532 -            return ;
   9.533 +    thash_data_t  *trp;
   9.534 +    int  i;
   9.535 +    u64 end, rid;
   9.536 +    vcpu_get_rr(vcpu, va, &rid);
   9.537 +    rid = rid&RR_RID_MASK;;
   9.538 +    end = va + PSIZE(ps);
   9.539 +    if (is_data) {
   9.540 +        if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
   9.541 +            for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
   9.542 +                if (__is_tr_overlap(trp, rid, va, end )) {
   9.543 +                    return i;
   9.544 +                }
   9.545 +            }
   9.546          }
   9.547 -        p = q;
   9.548 -    }
   9.549 -    panic("Entry not existed or bad sequence\n");
   9.550 -}
   9.551 -*/
   9.552 -/*
   9.553 -static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
   9.554 -{
   9.555 -    thash_data_t    *hash_table, *p, *q;
   9.556 -    thash_internal_t *priv = &hcb->priv;
   9.557 -    int idx;
   9.558 -    
   9.559 -    if ( !entry->tc ) {
   9.560 -        return rem_tr(hcb, entry->cl, entry->tr_idx);
   9.561 -    }
   9.562 -    rem_thash(hcb, entry);
   9.563 -}    
   9.564 -*/
   9.565 -int   cch_depth=0;
   9.566 -/*
   9.567 - * Purge the collision chain starting from cch.
   9.568 - * NOTE:
   9.569 - *     For those UN-Purgable entries(FM), this function will return
   9.570 - * the head of left collision chain.
   9.571 - */
   9.572 -/*
   9.573 -static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
   9.574 -{
   9.575 -    thash_data_t *next;
   9.576 -
   9.577 -//    if ( ++cch_depth > MAX_CCH_LENGTH ) {
   9.578 -//        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
   9.579 -//        while(1);
   9.580 -//   }
   9.581 -    if ( cch -> next ) {
   9.582 -        next = thash_rem_cch(hcb, cch->next);
   9.583      }
   9.584      else {
   9.585 -        next = NULL;
   9.586 -    }
   9.587 -    if ( PURGABLE_ENTRY(hcb, cch) ) {
   9.588 -        __rem_chain(hcb, cch);
   9.589 -        return next;
   9.590 -    }
   9.591 -    else {
   9.592 -        cch->next = next;
   9.593 -        return cch;
   9.594 +        if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
   9.595 +            for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
   9.596 +                if (__is_tr_overlap(trp, rid, va, end )) {
   9.597 +                    return i;
   9.598 +                }
   9.599 +            }
   9.600 +        }
   9.601      }
   9.602 -}
   9.603 - */
   9.604 -
   9.605 -/*
   9.606 - * Purge one hash line (include the entry in hash table).
   9.607 - * Can only be called by thash_purge_all.
   9.608 - * Input:
   9.609 - *  hash: The head of collision chain (hash table)
   9.610 - *
   9.611 - */
   9.612 -/*
   9.613 -static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
   9.614 -{
   9.615 -    if ( INVALID_ENTRY(hcb, hash) ) return;
   9.616 -
   9.617 -    if ( hash->next ) {
   9.618 -        cch_depth = 0;
   9.619 -        hash->next = thash_rem_cch(hcb, hash->next);
   9.620 -    }
   9.621 -    // Then hash table itself.
   9.622 -    if ( PURGABLE_ENTRY(hcb, hash) ) {
   9.623 -        __rem_hash_head(hcb, hash);
   9.624 -    }
   9.625 +    return -1;
   9.626  }
   9.627 - */
   9.628 -
   9.629 -/*
   9.630 - * Find an overlap entry in hash table and its collision chain.
   9.631 - * Refer to SDM2 4.1.1.4 for overlap definition.
   9.632 - *    PARAS:
   9.633 - *  1: in: TLB format entry, rid:ps must be same with vrr[].
   9.634 - *         va & ps identify the address space for overlap lookup
   9.635 - *  2: section can be combination of TR, TC and FM. (THASH_SECTION_XX)
   9.636 - *  3: cl means I side or D side.
   9.637 - *    RETURNS:
   9.638 - *  NULL to indicate the end of findings.
   9.639 - *    NOTES:
   9.640 - *
   9.641 - */
   9.642 -
   9.643 -/*
   9.644 -thash_data_t *thash_find_overlap(thash_cb_t *hcb,
   9.645 -            thash_data_t *in, search_section_t s_sect)
   9.646 -{
   9.647 -    return (hcb->find_overlap)(hcb, in->vadr,
   9.648 -            PSIZE(in->ps), in->rid, in->cl, s_sect);
   9.649 -}
   9.650 -*/
   9.651 -
   9.652 -/*
   9.653 -static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
   9.654 -        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
   9.655 -{
   9.656 -    thash_data_t    *hash_table;
   9.657 -    thash_internal_t *priv = &hcb->priv;
   9.658 -    u64     tag;
   9.659 -    ia64_rr vrr;
   9.660 -
   9.661 -    priv->_curva = va & ~(size-1);
   9.662 -    priv->_eva = priv->_curva + size;
   9.663 -    priv->rid = rid;
   9.664 -    vrr = vmx_vcpu_rr(hcb->vcpu,va);
   9.665 -    priv->ps = vrr.ps;
   9.666 -    hash_table = vsa_thash(hcb->pta, priv->_curva, vrr.rrval, &tag);
   9.667 -    priv->s_sect = s_sect;
   9.668 -    priv->cl = cl;
   9.669 -    priv->_tr_idx = 0;
   9.670 -    priv->hash_base = hash_table;
   9.671 -    priv->cur_cch = hash_table;
   9.672 -    return (hcb->next_overlap)(hcb);
   9.673 -}
   9.674 -*/
   9.675  
   9.676  /*
   9.677 -static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
   9.678 -        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
   9.679 + * Purge entries in VTLB and VHPT
   9.680 + */
   9.681 +void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps)
   9.682  {
   9.683 -    thash_data_t    *hash_table;
   9.684 -    thash_internal_t *priv = &hcb->priv;
   9.685 -    u64     tag;
   9.686 -    ia64_rr vrr;
   9.687 -
   9.688 -    priv->_curva = va & ~(size-1);
   9.689 -    priv->_eva = priv->_curva + size;
   9.690 -    priv->rid = rid;
   9.691 -    vrr = vmx_vcpu_rr(hcb->vcpu,va);
   9.692 -    priv->ps = vrr.ps;
   9.693 -    hash_table = ia64_thash(priv->_curva);
   9.694 -    tag = ia64_ttag(priv->_curva);
   9.695 -    priv->tag = tag;
   9.696 -    priv->hash_base = hash_table;
   9.697 -    priv->cur_cch = hash_table;
   9.698 -    return (hcb->next_overlap)(hcb);
   9.699 -}
   9.700 -*/
   9.701 -
   9.702 -
   9.703 -thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl)
   9.704 -{
   9.705 -    thash_data_t    *tr;
   9.706 -    int  i,num;
   9.707 -    u64 end;
   9.708 -
   9.709 -    if (cl == ISIDE_TLB ) {
   9.710 -        num = NITRS;
   9.711 -        tr = &ITR(hcb,0);
   9.712 -    }
   9.713 -    else {
   9.714 -        num = NDTRS;
   9.715 -        tr = &DTR(hcb,0);
   9.716 -    }
   9.717 -    end=data->vadr + PSIZE(data->ps);
   9.718 -    for (i=0; i<num; i++ ) {
   9.719 -        if ( __is_tr_overlap(hcb, &tr[i], data->rid, cl, data->vadr, end )) {
   9.720 -            return &tr[i];
   9.721 -        }
   9.722 -    }
   9.723 -    return NULL;
   9.724 +    vtlb_purge(hcb, va, ps);
   9.725 +    vhpt_purge(hcb->vhpt, va, ps);
   9.726  }
   9.727  
   9.728  
   9.729  /*
   9.730 -static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
   9.731 -{
   9.732 -    thash_data_t    *tr;
   9.733 -    thash_internal_t *priv = &hcb->priv;
   9.734 -    int   num;
   9.735 -
   9.736 -    if ( priv->cl == ISIDE_TLB ) {
   9.737 -        num = NITRS;
   9.738 -        tr = &ITR(hcb,0);
   9.739 -    }
   9.740 -    else {
   9.741 -        num = NDTRS;
   9.742 -        tr = &DTR(hcb,0);
   9.743 -    }
   9.744 -    for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
   9.745 -        if ( __is_tr_overlap(hcb, &tr[priv->_tr_idx],
   9.746 -                priv->rid, priv->cl,
   9.747 -                priv->_curva, priv->_eva) ) {
   9.748 -            return &tr[priv->_tr_idx++];
   9.749 -        }
   9.750 -    }
   9.751 -    return NULL;
   9.752 -}
   9.753 -*/
   9.754 -
   9.755 -/*
   9.756 - * Similar with vtlb_next_overlap but find next entry.
   9.757 - *    NOTES:
   9.758 - *  Intermediate position information is stored in hcb->priv.
   9.759 - */
   9.760 -/*
   9.761 -static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
   9.762 -{
   9.763 -    thash_data_t    *ovl;
   9.764 -    thash_internal_t *priv = &hcb->priv;
   9.765 -    u64 addr,rr_psize,tag;
   9.766 -    ia64_rr vrr;
   9.767 -
   9.768 -    if ( priv->s_sect.tr ) {
   9.769 -        ovl = vtr_find_next_overlap (hcb);
   9.770 -        if ( ovl ) return ovl;
   9.771 -        priv->s_sect.tr = 0;
   9.772 -    }
   9.773 -    if ( priv->s_sect.v == 0 ) return NULL;
   9.774 -    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
   9.775 -    rr_psize = PSIZE(vrr.ps);
   9.776 -
   9.777 -    while ( priv->_curva < priv->_eva ) {
   9.778 -        if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
   9.779 -            ovl = _vtlb_next_overlap_in_chain(hcb);
   9.780 -            if ( ovl ) {
   9.781 -                priv->cur_cch = ovl->next;
   9.782 -                return ovl;
   9.783 -            }
   9.784 -        }
   9.785 -        priv->_curva += rr_psize;
   9.786 -        priv->hash_base = vsa_thash( hcb->pta, priv->_curva, vrr.rrval, &tag);
   9.787 -        priv->cur_cch = priv->hash_base;
   9.788 -    }
   9.789 -    return NULL;
   9.790 -}
   9.791 - */
   9.792 -
   9.793 -
   9.794 -/*
   9.795 -static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
   9.796 -{
   9.797 -    thash_data_t    *ovl;
   9.798 -    thash_internal_t *priv = &hcb->priv;
   9.799 -    u64 addr,rr_psize;
   9.800 -    ia64_rr vrr;
   9.801 -
   9.802 -    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
   9.803 -    rr_psize = PSIZE(vrr.ps);
   9.804 -
   9.805 -    while ( priv->_curva < priv->_eva ) {
   9.806 -        if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
   9.807 -            ovl = _vhpt_next_overlap_in_chain(hcb);
   9.808 -            if ( ovl ) {
   9.809 -                priv->cur_cch = ovl->next;
   9.810 -                return ovl;
   9.811 -            }
   9.812 -        }
   9.813 -        priv->_curva += rr_psize;
   9.814 -        priv->hash_base = ia64_thash(priv->_curva);
   9.815 -        priv->tag = ia64_ttag(priv->_curva);
   9.816 -        priv->cur_cch = priv->hash_base;
   9.817 -    }
   9.818 -    return NULL;
   9.819 -}
   9.820 -*/
   9.821 -
   9.822 -/*
   9.823 - * Find and purge overlap entries in hash table and its collision chain.
   9.824 - *    PARAS:
   9.825 - *  1: in: TLB format entry, rid:ps must be same with vrr[].
   9.826 - *         rid, va & ps identify the address space for purge
   9.827 - *  2: section can be combination of TR, TC and FM. (thash_SECTION_XX)
   9.828 - *  3: cl means I side or D side.
   9.829 - *    NOTES:
   9.830 - *
   9.831 - */
   9.832 -void thash_purge_entries(thash_cb_t *hcb,
   9.833 -            thash_data_t *in, search_section_t p_sect)
   9.834 -{
   9.835 -    return thash_purge_entries_ex(hcb, in->rid, in->vadr,
   9.836 -            in->ps, p_sect, in->cl);
   9.837 -}
   9.838 -
   9.839 -void thash_purge_entries_ex(thash_cb_t *hcb,
   9.840 -            u64 rid, u64 va, u64 ps,
   9.841 -            search_section_t p_sect,
   9.842 -            CACHE_LINE_TYPE cl)
   9.843 -{
   9.844 -/*
   9.845 -    thash_data_t    *ovl;
   9.846 -
   9.847 -    ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
   9.848 -    while ( ovl != NULL ) {
   9.849 -        (hcb->rem_hash)(hcb, ovl);
   9.850 -        ovl = (hcb->next_overlap)(hcb);
   9.851 -    };
   9.852 - */
   9.853 -    vtlb_purge(hcb, va, ps);
   9.854 -    vhpt_purge(hcb->ts->vhpt, va, ps);
   9.855 -}
   9.856 -
   9.857 -/*
   9.858   * Purge overlap TCs and then insert the new entry to emulate itc ops.
   9.859   *    Notes: Only TC entry can purge and insert.
   9.860   */
   9.861 -void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va)
   9.862 +void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
   9.863  {
   9.864 -    /* thash_data_t    *ovl; */
   9.865 -    search_section_t sections;
   9.866 -
   9.867 -#ifdef   XEN_DEBUGGER
   9.868 -    vrr = vmx_vcpu_rr(hcb->vcpu,in->vadr);
   9.869 -	if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
   9.870 -		panic ("Oops, wrong call for purge_and_insert\n");
   9.871 -		return;
   9.872 -	}
   9.873 -#endif
   9.874 -    in->vadr = PAGEALIGN(in->vadr,in->ps);
   9.875 -    in->ppn = PAGEALIGN(in->ppn, in->ps-12);
   9.876 -    sections.tr = 0;
   9.877 -    sections.tc = 1;
   9.878 -/*
   9.879 -    ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
   9.880 -    				 in->rid, in->cl, sections);
   9.881 -    if(ovl)
   9.882 -        (hcb->rem_hash)(hcb, ovl);
   9.883 - */
   9.884 -    vtlb_purge(hcb, va, in->ps);
   9.885 -    vhpt_purge(hcb->ts->vhpt, va, in->ps);
   9.886 -#ifdef   XEN_DEBUGGER
   9.887 -    ovl = (hcb->next_overlap)(hcb);
   9.888 -    if ( ovl ) {
   9.889 -		panic ("Oops, 2+ overlaps for purge_and_insert\n");
   9.890 -		return;
   9.891 +    u64 ps, va;
   9.892 +    ps = itir_ps(itir);
   9.893 +    va = PAGEALIGN(ifa,ps);
   9.894 +    vtlb_purge(hcb, va, ps);
   9.895 +    vhpt_purge(hcb->vhpt, va, ps);
   9.896 +    if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO))
   9.897 +        vtlb_insert(hcb, pte, itir, va);
   9.898 +    if(!(pte&VTLB_PTE_IO)){
   9.899 +        va = PAGEALIGN(ifa,PAGE_SHIFT);
   9.900 +        thash_vhpt_insert(hcb->vhpt, pte, itir, va);
   9.901      }
   9.902 -#endif
   9.903 -    if(in->ps!=PAGE_SHIFT)
   9.904 -        vtlb_insert(hcb, in, va);
   9.905 -    thash_vhpt_insert(hcb->ts->vhpt, in, va);
   9.906  }
   9.907 -/*
   9.908 - * Purge one hash line (include the entry in hash table).
   9.909 - * Can only be called by thash_purge_all.
   9.910 - * Input:
   9.911 - *  hash: The head of collision chain (hash table)
   9.912 - *
   9.913 - */
   9.914 -/*
   9.915 -static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash)
   9.916 -{
   9.917 -    if ( INVALID_ENTRY(hcb, hash) ) return;
   9.918 -    thash_data_t *prev, *next;
   9.919 -    next=hash->next;
   9.920 -    while ( next ) {
   9.921 -        prev=next;
   9.922 -        next=next->next;
   9.923 -        cch_free(hcb, prev);
   9.924 -    }
   9.925 -    // Then hash table itself.
   9.926 -    INVALIDATE_HASH(hcb, hash);
   9.927 -}
   9.928 -*/
   9.929 -
   9.930 -
   9.931 -
   9.932 -
   9.933 -
   9.934 -
   9.935  
   9.936  
   9.937  
   9.938 @@ -1064,27 +525,12 @@ void thash_purge_all(thash_cb_t *hcb)
   9.939      }
   9.940      cch_mem_init (hcb);
   9.941  
   9.942 -    vhpt = hcb->ts->vhpt;
   9.943 +    vhpt = hcb->vhpt;
   9.944      hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
   9.945      for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
   9.946          INVALIDATE_VHPT_HEADER(hash_table);
   9.947      }
   9.948      cch_mem_init (vhpt);
   9.949 -    
   9.950 -/*
   9.951 -    entry = &hcb->ts->itr[0];
   9.952 -    for(i=0; i< (NITRS+NDTRS); i++){
   9.953 -        if(!INVALID_TLB(entry)){
   9.954 -            start=entry->vadr & (-PSIZE(entry->ps));
   9.955 -            end = start + PSIZE(entry->ps);
   9.956 -            while(start<end){
   9.957 -                thash_vhpt_insert(vhpt, entry, start);
   9.958 -                start += PAGE_SIZE;
   9.959 -            }
   9.960 -        }
   9.961 -        entry++;
   9.962 -    }
   9.963 -*/
   9.964      local_flush_tlb_all();
   9.965  }
   9.966  
   9.967 @@ -1096,100 +542,32 @@ void thash_purge_all(thash_cb_t *hcb)
   9.968   * INPUT:
   9.969   *  in: TLB format for both VHPT & TLB.
   9.970   */
   9.971 -thash_data_t *vtlb_lookup(thash_cb_t *hcb, 
   9.972 -            thash_data_t *in)
   9.973 -{
   9.974 -    return vtlb_lookup_ex(hcb, in->rid, in->vadr, in->cl);
   9.975 -}
   9.976  
   9.977 -thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 
   9.978 -            u64 rid, u64 va,
   9.979 -            CACHE_LINE_TYPE cl)
   9.980 +thash_data_t *vtlb_lookup(thash_cb_t *hcb, u64 va,int is_data)
   9.981  {
   9.982      thash_data_t    *hash_table, *cch;
   9.983      u64     tag;
   9.984      ia64_rr vrr;
   9.985 -   
   9.986 +
   9.987      ASSERT ( hcb->ht == THASH_TLB );
   9.988 -    
   9.989 -    cch = __vtr_lookup(hcb, rid, va, cl);;
   9.990 +
   9.991 +    cch = __vtr_lookup(hcb->vcpu, va, is_data);;
   9.992      if ( cch ) return cch;
   9.993  
   9.994 -    vrr = vmx_vcpu_rr(hcb->vcpu,va);
   9.995 +    vcpu_get_rr(hcb->vcpu,va,&vrr.rrval);
   9.996      hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
   9.997  
   9.998      if ( INVALID_ENTRY(hcb, hash_table ) )
   9.999          return NULL;
  9.1000  
  9.1001 -        
  9.1002 +
  9.1003      for (cch=hash_table; cch; cch = cch->next) {
  9.1004 -//        if ( __is_translated(cch, rid, va, cl) )
  9.1005          if(cch->etag == tag)
  9.1006              return cch;
  9.1007      }
  9.1008      return NULL;
  9.1009  }
  9.1010  
  9.1011 -/*
  9.1012 - * Lock/Unlock TC if found.
  9.1013 - *     NOTES: Only the page in prefered size can be handled.
  9.1014 - *   return:
  9.1015 - *          1: failure
  9.1016 - *          0: success
  9.1017 - */
  9.1018 -/*
  9.1019 -int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
  9.1020 -{
  9.1021 -	thash_data_t	*ovl;
  9.1022 -	search_section_t	sections;
  9.1023 -
  9.1024 -    sections.tr = 1;
  9.1025 -    sections.tc = 1;
  9.1026 -	ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
  9.1027 -	if ( ovl ) {
  9.1028 -		if ( !ovl->tc ) {
  9.1029 -//			panic("Oops, TR for lock\n");
  9.1030 -			return 0;
  9.1031 -		}
  9.1032 -		else if ( lock ) {
  9.1033 -			if ( ovl->locked ) {
  9.1034 -				DPRINTK("Oops, already locked entry\n");
  9.1035 -			}
  9.1036 -			ovl->locked = 1;
  9.1037 -		}
  9.1038 -		else if ( !lock ) {
  9.1039 -			if ( !ovl->locked ) {
  9.1040 -				DPRINTK("Oops, already unlocked entry\n");
  9.1041 -			}
  9.1042 -			ovl->locked = 0;
  9.1043 -		}
  9.1044 -		return 0;
  9.1045 -	}
  9.1046 -	return 1;
  9.1047 -}
  9.1048 -*/
  9.1049 -
  9.1050 -/*
  9.1051 - * Notifier when TLB is deleted from hash table and its collision chain.
  9.1052 - * NOTES:
  9.1053 - *  The typical situation is that TLB remove needs to inform
  9.1054 - * VHPT to remove too.
  9.1055 - * PARAS:
  9.1056 - *  1: hcb is TLB object.
  9.1057 - *  2: The format of entry is always in TLB.
  9.1058 - *
  9.1059 - */
  9.1060 -//void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
  9.1061 -//{
  9.1062 -//    vhpt_purge(hcb->ts->vhpt,entry->vadr,entry->ps);
  9.1063 -//    thash_cb_t  *vhpt;
  9.1064 -    
  9.1065 -//    search_section_t    s_sect;
  9.1066 -    
  9.1067 -//    s_sect.v = 0;
  9.1068 -//    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
  9.1069 -//    machine_tlb_purge(entry->vadr, entry->ps);
  9.1070 -//}
  9.1071  
  9.1072  /*
  9.1073   * Initialize internal control data before service.
  9.1074 @@ -1206,28 +584,15 @@ void thash_init(thash_cb_t *hcb, u64 sz)
  9.1075      hcb->pta.size = sz;
  9.1076  //    hcb->get_rr_fn = vmmu_get_rr;
  9.1077      ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
  9.1078 -    if ( hcb->ht == THASH_TLB ) {
  9.1079 -//        hcb->remove_notifier =  NULL;	//tlb_remove_notifier;
  9.1080 -//        hcb->find_overlap = vtlb_find_overlap;
  9.1081 -//        hcb->next_overlap = vtlb_next_overlap;
  9.1082 -//        hcb->rem_hash = rem_vtlb;
  9.1083 -//        hcb->ins_hash = vtlb_insert;
  9.1084 -        __init_tr(hcb);
  9.1085 -    }
  9.1086 -    else {
  9.1087 -//        hcb->remove_notifier =  NULL;
  9.1088 -//        hcb->find_overlap = vhpt_find_overlap;
  9.1089 -//        hcb->next_overlap = vhpt_next_overlap;
  9.1090 -//        hcb->rem_hash = rem_thash;
  9.1091 -//        hcb->ins_hash = thash_vhpt_insert;
  9.1092 -    }
  9.1093      hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
  9.1094  
  9.1095      for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
  9.1096          INVALIDATE_HASH_HEADER(hcb,hash_table);
  9.1097      }
  9.1098  }
  9.1099 +
  9.1100  #ifdef  VTLB_DEBUG
  9.1101 +/*
  9.1102  static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
  9.1103  u64  sanity_check=0;
  9.1104  u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
  9.1105 @@ -1264,7 +629,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
  9.1106      thash_data_t  *hash, *cch;
  9.1107      thash_data_t    *ovl;
  9.1108      search_section_t s_sect;
  9.1109 -    thash_cb_t *vhpt = vtlb->ts->vhpt;
  9.1110 +    thash_cb_t *vhpt = vtlb->vhpt;
  9.1111      u64   invalid_ratio;
  9.1112   
  9.1113      if ( sanity_check == 0 ) return;
  9.1114 @@ -1403,4 +768,5 @@ void dump_vtlb(thash_cb_t *vtlb)
  9.1115      }
  9.1116      printf("End of vTLB dump\n");
  9.1117  }
  9.1118 +*/
  9.1119  #endif
    10.1 --- a/xen/arch/ia64/xen/irq.c	Fri Mar 10 08:25:54 2006 -0700
    10.2 +++ b/xen/arch/ia64/xen/irq.c	Fri Mar 10 08:52:12 2006 -0700
    10.3 @@ -1338,6 +1338,7 @@ typedef struct {
    10.4      struct domain *guest[IRQ_MAX_GUESTS];
    10.5  } irq_guest_action_t;
    10.6  
    10.7 +/*
    10.8  static void __do_IRQ_guest(int irq)
    10.9  {
   10.10      irq_desc_t         *desc = &irq_desc[irq];
   10.11 @@ -1353,7 +1354,7 @@ static void __do_IRQ_guest(int irq)
   10.12          send_guest_pirq(d, irq);
   10.13      }
   10.14  }
   10.15 -
   10.16 + */
   10.17  int pirq_guest_unmask(struct domain *d)
   10.18  {
   10.19      irq_desc_t    *desc;
    11.1 --- a/xen/arch/ia64/xen/process.c	Fri Mar 10 08:25:54 2006 -0700
    11.2 +++ b/xen/arch/ia64/xen/process.c	Fri Mar 10 08:52:12 2006 -0700
    11.3 @@ -1,3 +1,4 @@
    11.4 +
    11.5  /*
    11.6   * Miscellaneous process/domain related routines
    11.7   * 
    11.8 @@ -58,9 +59,6 @@ extern unsigned long dom0_start, dom0_si
    11.9  			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
   11.10  			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
   11.11  
   11.12 -#define PSCB(x,y)	VCPU(x,y)
   11.13 -#define PSCBX(x,y)	x->arch.y
   11.14 -
   11.15  #include <xen/sched-if.h>
   11.16  
   11.17  void schedule_tail(struct vcpu *prev)
    12.1 --- a/xen/arch/ia64/xen/vcpu.c	Fri Mar 10 08:25:54 2006 -0700
    12.2 +++ b/xen/arch/ia64/xen/vcpu.c	Fri Mar 10 08:52:12 2006 -0700
    12.3 @@ -36,8 +36,6 @@ typedef	union {
    12.4  
    12.5  // this def for vcpu_regs won't work if kernel stack is present
    12.6  //#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
    12.7 -#define	PSCB(x,y)	VCPU(x,y)
    12.8 -#define	PSCBX(x,y)	x->arch.y
    12.9  
   12.10  #define	TRUE	1
   12.11  #define	FALSE	0
   12.12 @@ -66,18 +64,6 @@ unsigned long tr_translate_count = 0;
   12.13  unsigned long phys_translate_count = 0;
   12.14  
   12.15  unsigned long vcpu_verbose = 0;
   12.16 -#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
   12.17 -
   12.18 -//#define vcpu_quick_region_check(_tr_regions,_ifa)	1
   12.19 -#define vcpu_quick_region_check(_tr_regions,_ifa)			\
   12.20 -	(_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
   12.21 -#define vcpu_quick_region_set(_tr_regions,_ifa)				\
   12.22 -	do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
   12.23 -
   12.24 -// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
   12.25 -#define vcpu_match_tr_entry(_trp,_ifa,_rid)				\
   12.26 -	((_trp->p && (_trp->rid==_rid) && (_ifa >= _trp->vadr) &&	\
   12.27 -	(_ifa < (_trp->vadr + (1L<< _trp->ps)) - 1)))
   12.28  
   12.29  /**************************************************************************
   12.30   VCPU general register access routines
   12.31 @@ -1641,8 +1627,11 @@ IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64
   12.32  
   12.33  IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   12.34  {
   12.35 -	UINT val = PSCB(vcpu,rrs)[reg>>61];
   12.36 -	*pval = val;
   12.37 +	if(VMX_DOMAIN(vcpu)){
   12.38 +		*pval = VMX(vcpu,vrr[reg>>61]);
   12.39 +	}else{
   12.40 +		*pval = PSCB(vcpu,rrs)[reg>>61];
   12.41 +	}
   12.42  	return (IA64_NO_FAULT);
   12.43  }
   12.44  
    13.1 --- a/xen/include/asm-ia64/vcpu.h	Fri Mar 10 08:25:54 2006 -0700
    13.2 +++ b/xen/include/asm-ia64/vcpu.h	Fri Mar 10 08:52:12 2006 -0700
    13.3 @@ -7,7 +7,6 @@
    13.4  //#include "thread.h"
    13.5  #include <asm/ia64_int.h>
    13.6  #include <public/arch-ia64.h>
    13.7 -
    13.8  typedef	unsigned long UINT64;
    13.9  typedef	unsigned int UINT;
   13.10  typedef	int BOOLEAN;
   13.11 @@ -16,7 +15,10 @@ typedef	struct vcpu VCPU;
   13.12  
   13.13  typedef cpu_user_regs_t REGS;
   13.14  
   13.15 -#define VCPU(_v,_x)	_v->arch.privregs->_x
   13.16 +
   13.17 +#define VCPU(_v,_x)	(_v->arch.privregs->_x)
   13.18 +#define PSCB(_v,_x) VCPU(_v,_x)
   13.19 +#define PSCBX(_v,_x) (_v->arch._x)
   13.20  
   13.21  #define PRIVOP_ADDR_COUNT
   13.22  #ifdef PRIVOP_ADDR_COUNT
   13.23 @@ -175,4 +177,18 @@ itir_mask(UINT64 itir)
   13.24      return (~((1UL << itir_ps(itir)) - 1));
   13.25  }
   13.26  
   13.27 +#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
   13.28 +
   13.29 +//#define vcpu_quick_region_check(_tr_regions,_ifa) 1
   13.30 +#define vcpu_quick_region_check(_tr_regions,_ifa)           \
   13.31 +    (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
   13.32 +#define vcpu_quick_region_set(_tr_regions,_ifa)             \
   13.33 +    do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
   13.34 +
   13.35 +// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
   13.36 +#define vcpu_match_tr_entry(_trp,_ifa,_rid)             \
   13.37 +    ((_trp->p && (_trp->rid==_rid) && (_ifa >= _trp->vadr) &&   \
   13.38 +    (_ifa < (_trp->vadr + (1L<< _trp->ps)) - 1)))
   13.39 +
   13.40 +
   13.41  #endif
    14.1 --- a/xen/include/asm-ia64/vmmu.h	Fri Mar 10 08:25:54 2006 -0700
    14.2 +++ b/xen/include/asm-ia64/vmmu.h	Fri Mar 10 08:52:12 2006 -0700
    14.3 @@ -68,11 +68,14 @@ typedef union search_section {
    14.4  } search_section_t;
    14.5  
    14.6  
    14.7 -typedef enum {
    14.8 +enum {
    14.9          ISIDE_TLB=0,
   14.10          DSIDE_TLB=1
   14.11 -} CACHE_LINE_TYPE;
   14.12 -
   14.13 +};
   14.14 +#define VTLB_PTE_P_BIT      0
   14.15 +#define VTLB_PTE_IO_BIT     60
   14.16 +#define VTLB_PTE_IO         (1UL<<VTLB_PTE_IO_BIT)
   14.17 +#define VTLB_PTE_P         (1UL<<VTLB_PTE_P_BIT)
   14.18  typedef struct thash_data {
   14.19      union {
   14.20          struct {
   14.21 @@ -86,18 +89,16 @@ typedef struct thash_data {
   14.22              u64 ppn  : 38; // 12-49
   14.23              u64 rv2  :  2; // 50-51
   14.24              u64 ed   :  1; // 52
   14.25 -            u64 ig1  :  3; // 53-55
   14.26 -            u64 len  :  4; // 56-59
   14.27 -            u64 ig2  :  3; // 60-63
   14.28 +            u64 ig1  :  3; // 53-63
   14.29          };
   14.30          struct {
   14.31              u64 __rv1 : 53;	// 0-52
   14.32              u64 contiguous : 1; //53
   14.33              u64 tc : 1;     // 54 TR or TC
   14.34 -            CACHE_LINE_TYPE cl : 1; // 55 I side or D side cache line
   14.35 +            u64 cl : 1; // 55 I side or D side cache line
   14.36              // next extension to ig1, only for TLB instance
   14.37 -            u64 __ig1  :  4; // 56-59
   14.38 -            u64 locked  : 1;	// 60 entry locked or not
   14.39 +            u64 len  :  4; // 56-59
   14.40 +            u64 io  : 1;	// 60 entry is for io or not
   14.41              u64 nomap : 1;   // 61 entry cann't be inserted into machine TLB.
   14.42              u64 checked : 1; // 62 for VTLB/VHPT sanity check
   14.43              u64 invalid : 1; // 63 invalid entry
   14.44 @@ -112,12 +113,12 @@ typedef struct thash_data {
   14.45              u64 key  : 24; // 8-31
   14.46              u64 rv4  : 32; // 32-63
   14.47          };
   14.48 -        struct {
   14.49 -            u64 __rv3  : 32; // 0-31
   14.50 +//        struct {
   14.51 +//            u64 __rv3  : 32; // 0-31
   14.52              // next extension to rv4
   14.53 -            u64 rid  : 24;  // 32-55
   14.54 -            u64 __rv4  : 8; // 56-63
   14.55 -        };
   14.56 +//            u64 rid  : 24;  // 32-55
   14.57 +//            u64 __rv4  : 8; // 56-63
   14.58 +//        };
   14.59          u64 itir;
   14.60      };
   14.61      union {
   14.62 @@ -136,7 +137,8 @@ typedef struct thash_data {
   14.63      };
   14.64      union {
   14.65          struct thash_data *next;
   14.66 -        u64  tr_idx;
   14.67 +        u64  rid;  // only used in guest TR
   14.68 +//        u64  tr_idx;
   14.69      };
   14.70  } thash_data_t;
   14.71  
   14.72 @@ -152,7 +154,7 @@ typedef struct thash_data {
   14.73  
   14.74  #define INVALID_VHPT(hdata)     ((hdata)->ti)
   14.75  #define INVALID_TLB(hdata)      ((hdata)->ti)
   14.76 -#define INVALID_TR(hdata)      ((hdata)->invalid)
   14.77 +#define INVALID_TR(hdata)      (!(hdata)->p)
   14.78  #define INVALID_ENTRY(hcb, hdata)       INVALID_VHPT(hdata)
   14.79  
   14.80  /*        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */
   14.81 @@ -199,18 +201,18 @@ typedef thash_data_t *(FIND_NEXT_OVL_FN)
   14.82  typedef void (REM_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry);
   14.83  typedef void (INS_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry, u64 va);
   14.84  
   14.85 -typedef struct tlb_special {
   14.86 -        thash_data_t     itr[NITRS];
   14.87 -        thash_data_t     dtr[NDTRS];
   14.88 -        struct thash_cb  *vhpt;
   14.89 -} tlb_special_t;
   14.90 +//typedef struct tlb_special {
   14.91 +//        thash_data_t     itr[NITRS];
   14.92 +//        thash_data_t     dtr[NDTRS];
   14.93 +//        struct thash_cb  *vhpt;
   14.94 +//} tlb_special_t;
   14.95  
   14.96  //typedef struct vhpt_cb {
   14.97          //u64     pta;    // pta value.
   14.98  //        GET_MFN_FN      *get_mfn;
   14.99  //        TTAG_FN         *tag_func;
  14.100  //} vhpt_special;
  14.101 -
  14.102 +/*
  14.103  typedef struct thash_internal {
  14.104          thash_data_t *hash_base;
  14.105          thash_data_t *cur_cch;  // head of overlap search
  14.106 @@ -227,7 +229,7 @@ typedef struct thash_internal {
  14.107          u64     _curva;         // current address to search
  14.108          u64     _eva;
  14.109  } thash_internal_t;
  14.110 -
  14.111 + */
  14.112  #define  THASH_CB_MAGIC         0x55aa00aa55aa55aaUL
  14.113  typedef struct thash_cb {
  14.114          /* THASH base information */
  14.115 @@ -243,6 +245,7 @@ typedef struct thash_cb {
  14.116          thash_cch_mem_t *cch_freelist;
  14.117          struct vcpu *vcpu;
  14.118          PTA     pta;
  14.119 +        struct thash_cb *vhpt;
  14.120          /* VTLB/VHPT common information */
  14.121  //        FIND_OVERLAP_FN *find_overlap;
  14.122  //        FIND_NEXT_OVL_FN *next_overlap;
  14.123 @@ -251,15 +254,15 @@ typedef struct thash_cb {
  14.124  //        REM_NOTIFIER_FN *remove_notifier;
  14.125          /* private information */
  14.126  //        thash_internal_t  priv;
  14.127 -        union {
  14.128 -                tlb_special_t  *ts;
  14.129 +//        union {
  14.130 +//                tlb_special_t  *ts;
  14.131  //                vhpt_special   *vs;
  14.132 -        };
  14.133 +//        };
  14.134          // Internal positon information, buffer and storage etc. TBD
  14.135  } thash_cb_t;
  14.136  
  14.137 -#define ITR(hcb,id)             ((hcb)->ts->itr[id])
  14.138 -#define DTR(hcb,id)             ((hcb)->ts->dtr[id])
  14.139 +//#define ITR(hcb,id)             ((hcb)->ts->itr[id])
  14.140 +//#define DTR(hcb,id)             ((hcb)->ts->dtr[id])
  14.141  #define INVALIDATE_HASH_HEADER(hcb,hash)    INVALIDATE_TLB_HEADER(hash)
  14.142  /*              \
  14.143  {           if ((hcb)->ht==THASH_TLB){            \
  14.144 @@ -290,10 +293,10 @@ extern void thash_init(thash_cb_t *hcb, 
  14.145   *      4: Return the entry in hash table or collision chain.
  14.146   *
  14.147   */
  14.148 -extern void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
  14.149 +extern void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa);
  14.150  //extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
  14.151 -extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx);
  14.152 -extern thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl);
  14.153 +//extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx);
  14.154 +extern int vtr_find_overlap(struct vcpu *vcpu, u64 va, u64 ps, int is_data);
  14.155  extern u64 get_mfn(struct domain *d, u64 gpfn);
  14.156  /*
  14.157   * Force to delete a found entry no matter TR or foreign map for TLB.
  14.158 @@ -344,13 +347,8 @@ extern thash_data_t *thash_find_next_ove
  14.159   *    NOTES:
  14.160   *
  14.161   */
  14.162 -extern void thash_purge_entries(thash_cb_t *hcb, 
  14.163 -                        thash_data_t *in, search_section_t p_sect);
  14.164 -extern void thash_purge_entries_ex(thash_cb_t *hcb,
  14.165 -                        u64 rid, u64 va, u64 sz, 
  14.166 -                        search_section_t p_sect, 
  14.167 -                        CACHE_LINE_TYPE cl);
  14.168 -extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va);
  14.169 +extern void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps);
  14.170 +extern void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa);
  14.171  
  14.172  /*
  14.173   * Purge all TCs or VHPT entries including those in Hash table.
  14.174 @@ -363,10 +361,7 @@ extern void thash_purge_all(thash_cb_t *
  14.175   * covering this address rid:va.
  14.176   *
  14.177   */
  14.178 -extern thash_data_t *vtlb_lookup(thash_cb_t *hcb, 
  14.179 -                        thash_data_t *in);
  14.180 -extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 
  14.181 -                        u64 rid, u64 va,CACHE_LINE_TYPE cl);
  14.182 +extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,u64 va,int is_data);
  14.183  extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
  14.184  
  14.185  
  14.186 @@ -382,6 +377,15 @@ extern thash_data_t * vsa_thash(PTA vpta
  14.187  extern thash_data_t * vhpt_lookup(u64 va);
  14.188  extern void machine_tlb_purge(u64 va, u64 ps);
  14.189  
  14.190 +static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 va, u64 rid)
  14.191 +{
  14.192 +    trp->page_flags = pte;
  14.193 +    trp->itir = itir;
  14.194 +    trp->vadr = va;
  14.195 +    trp->rid = rid;
  14.196 +}
  14.197 +
  14.198 +
  14.199  //#define   VTLB_DEBUG
  14.200  #ifdef   VTLB_DEBUG
  14.201  extern void check_vtlb_sanity(thash_cb_t *vtlb);
    15.1 --- a/xen/include/asm-ia64/vmx_platform.h	Fri Mar 10 08:25:54 2006 -0700
    15.2 +++ b/xen/include/asm-ia64/vmx_platform.h	Fri Mar 10 08:52:12 2006 -0700
    15.3 @@ -22,7 +22,6 @@
    15.4  #include <public/xen.h>
    15.5  #include <public/arch-ia64.h>
    15.6  #include <asm/hvm/vioapic.h>
    15.7 -
    15.8  struct mmio_list;
    15.9  typedef struct virtual_platform_def {
   15.10      unsigned long       shared_page_va;
   15.11 @@ -51,9 +50,8 @@ typedef struct vlapic {
   15.12  } vlapic_t;
   15.13  
   15.14  extern uint64_t dummy_tmr[];
   15.15 -#define VCPU(_v,_x)	_v->arch.privregs->_x
   15.16 -#define VLAPIC_ID(l) (uint16_t)(VCPU((l)->vcpu, lid) >> 16)
   15.17 -#define VLAPIC_IRR(l) VCPU((l)->vcpu, irr[0])
   15.18 +#define VLAPIC_ID(l) (uint16_t)(((l)->vcpu->arch.privregs->lid) >> 16)
   15.19 +#define VLAPIC_IRR(l) ((l)->vcpu->arch.privregs->irr[0])
   15.20  struct vlapic* apic_round_robin(struct domain *d, uint8_t dest_mode, uint8_t vector, uint32_t bitmap);
   15.21  extern int vmx_vcpu_pend_interrupt(struct vcpu *vcpu, uint8_t vector);
   15.22  static inline int vlapic_set_irq(struct vlapic *t, uint8_t vec, uint8_t trig)
    16.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Fri Mar 10 08:25:54 2006 -0700
    16.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Fri Mar 10 08:52:12 2006 -0700
    16.3 @@ -66,17 +66,13 @@ extern void vmx_vcpu_set_psr_sync_mpsr(V
    16.4  extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
    16.5  extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
    16.6  extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
    16.7 -extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
    16.8  extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
    16.9 -#if 0
   16.10 -extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   16.11 -#endif
   16.12  extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   16.13  IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
   16.14  extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.15  extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.16 -extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
   16.17 -extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
   16.18 +extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.19 +extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.20  extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
   16.21  extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
   16.22  extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
   16.23 @@ -347,12 +343,14 @@ IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UI
   16.24      *val = vtm_get_itc(vcpu);
   16.25      return  IA64_NO_FAULT;
   16.26  }
   16.27 +/*
   16.28  static inline
   16.29  IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   16.30  {
   16.31      *pval = VMX(vcpu,vrr[reg>>61]);
   16.32      return (IA64_NO_FAULT);
   16.33  }
   16.34 + */
   16.35  /**************************************************************************
   16.36   VCPU debug breakpoint register access routines
   16.37  **************************************************************************/