ia64/xen-unstable

changeset 10876:27ccf13dc3b7

[IA64] boot windows server 2003: support 8k guest pagesize

Make HASH VTLB support 8K page size which is used by windows

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
[whitespace and masking cleanups]
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Tue Aug 01 14:44:04 2006 -0600 (2006-08-01)
parents 00dd5eb7adc1
children 4acc6d51f389
files xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vtlb.c xen/include/asm-ia64/linux-xen/asm/pgtable.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmmu.c	Mon Jul 31 15:14:47 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Tue Aug 01 14:44:04 2006 -0600
     1.3 @@ -316,7 +316,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
     1.4      u64     gpip=0;   // guest physical IP
     1.5      u64     *vpa;
     1.6      thash_data_t    *tlb;
     1.7 -    u64     mfn;
     1.8 +    u64     mfn, maddr;
     1.9      struct page_info* page;
    1.10  
    1.11   again:
    1.12 @@ -333,11 +333,14 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
    1.13      if( gpip){
    1.14          mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
    1.15          if( mfn == INVALID_MFN )  panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
    1.16 +        maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
    1.17      }else{
    1.18          tlb = vhpt_lookup(gip);
    1.19          if( tlb == NULL)
    1.20              panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
    1.21          mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT);
    1.22 +        maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
    1.23 +                (gip & (PSIZE(tlb->ps) - 1));
    1.24      }
    1.25  
    1.26      page = mfn_to_page(mfn);
    1.27 @@ -349,7 +352,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
    1.28          }
    1.29          goto again;
    1.30      }
    1.31 -    vpa = (u64 *)__va((mfn << PAGE_SHIFT) | (gip & (PAGE_SIZE - 1)));
    1.32 +    vpa = (u64 *)__va(maddr);
    1.33  
    1.34      *code1 = *vpa++;
    1.35      *code2 = *vpa;
    1.36 @@ -371,6 +374,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
    1.37          return IA64_FAULT;
    1.38      }
    1.39  #endif //VTLB_DEBUG    
    1.40 +    pte &= ~PAGE_FLAGS_RV_MASK;
    1.41      thash_purge_and_insert(vcpu, pte, itir, ifa);
    1.42      return IA64_NO_FAULT;
    1.43  }
    1.44 @@ -390,6 +394,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
    1.45          return IA64_FAULT;
    1.46      }
    1.47  #endif //VTLB_DEBUG
    1.48 +    pte &= ~PAGE_FLAGS_RV_MASK;
    1.49      gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
    1.50      if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
    1.51          pte |= VTLB_PTE_IO;
    1.52 @@ -418,7 +423,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
    1.53          return IA64_FAULT;
    1.54      }
    1.55      thash_purge_entries(vcpu, va, ps);
    1.56 -#endif    
    1.57 +#endif
    1.58 +    pte &= ~PAGE_FLAGS_RV_MASK;
    1.59      vcpu_get_rr(vcpu, va, &rid);
    1.60      rid = rid& RR_RID_MASK;
    1.61      p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
    1.62 @@ -432,8 +438,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
    1.63  {
    1.64  #ifdef VTLB_DEBUG
    1.65      int index;
    1.66 +#endif    
    1.67      u64 gpfn;
    1.68 -#endif    
    1.69      u64 ps, va, rid;
    1.70      thash_data_t * p_dtr;
    1.71      ps = itir_ps(itir);
    1.72 @@ -445,11 +451,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
    1.73          panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
    1.74          return IA64_FAULT;
    1.75      }
    1.76 +#endif   
    1.77 +    pte &= ~PAGE_FLAGS_RV_MASK;
    1.78      thash_purge_entries(vcpu, va, ps);
    1.79      gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
    1.80 -    if(VMX_DOMAIN(vcpu) && _gpfn_is_io(vcpu->domain,gpfn))
    1.81 +    if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
    1.82          pte |= VTLB_PTE_IO;
    1.83 -#endif    
    1.84      vcpu_get_rr(vcpu, va, &rid);
    1.85      rid = rid& RR_RID_MASK;
    1.86      p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
     2.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Mon Jul 31 15:14:47 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue Aug 01 14:44:04 2006 -0600
     2.3 @@ -110,9 +110,14 @@ void
     2.4  physical_tlb_miss(VCPU *vcpu, u64 vadr)
     2.5  {
     2.6      u64 pte;
     2.7 +    ia64_rr rr;
     2.8 +    rr.rrval = ia64_get_rr(vadr);
     2.9      pte =  vadr& _PAGE_PPN_MASK;
    2.10 -    pte = pte | PHY_PAGE_WB;
    2.11 -    thash_purge_and_insert(vcpu, pte, (PAGE_SHIFT<<2), vadr);
    2.12 +    if (vadr >> 63)
    2.13 +        pte = pte | PHY_PAGE_UC;
    2.14 +    else
    2.15 +        pte = pte | PHY_PAGE_WB;
    2.16 +    thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr);
    2.17      return;
    2.18  }
    2.19  
    2.20 @@ -120,19 +125,14 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr)
    2.21  void
    2.22  vmx_init_all_rr(VCPU *vcpu)
    2.23  {
    2.24 -	VMX(vcpu,vrr[VRN0]) = 0x38;
    2.25 -	VMX(vcpu,vrr[VRN1]) = 0x138;
    2.26 -	VMX(vcpu,vrr[VRN2]) = 0x238;
    2.27 -	VMX(vcpu,vrr[VRN3]) = 0x338;
    2.28 -	VMX(vcpu,vrr[VRN4]) = 0x438;
    2.29 -	VMX(vcpu,vrr[VRN5]) = 0x538;
    2.30 -	VMX(vcpu,vrr[VRN6]) = 0x660;
    2.31 -	VMX(vcpu,vrr[VRN7]) = 0x760;
    2.32 -#if 0
    2.33 -	VMX(vcpu,mrr5) = vrrtomrr(vcpu, 0x38);
    2.34 -	VMX(vcpu,mrr6) = vrrtomrr(vcpu, 0x60);
    2.35 -	VMX(vcpu,mrr7) = vrrtomrr(vcpu, 0x60);
    2.36 -#endif
    2.37 +	VMX(vcpu, vrr[VRN0]) = 0x38;
    2.38 +	VMX(vcpu, vrr[VRN1]) = 0x38;
    2.39 +	VMX(vcpu, vrr[VRN2]) = 0x38;
    2.40 +	VMX(vcpu, vrr[VRN3]) = 0x38;
    2.41 +	VMX(vcpu, vrr[VRN4]) = 0x38;
    2.42 +	VMX(vcpu, vrr[VRN5]) = 0x38;
    2.43 +	VMX(vcpu, vrr[VRN6]) = 0x38;
    2.44 +	VMX(vcpu, vrr[VRN7]) = 0x738;
    2.45  }
    2.46  
    2.47  extern void * pal_vaddr;
    2.48 @@ -208,18 +208,19 @@ void
    2.49  switch_to_physical_rid(VCPU *vcpu)
    2.50  {
    2.51      UINT64 psr;
    2.52 -    ia64_rr phy_rr;
    2.53 -
    2.54 +    ia64_rr phy_rr, mrr;
    2.55  
    2.56      /* Save original virtual mode rr[0] and rr[4] */
    2.57      psr=ia64_clear_ic();
    2.58      phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
    2.59 -//    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
    2.60 +    mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT);
    2.61 +    phy_rr.ps = mrr.ps;
    2.62      phy_rr.ve = 1;
    2.63      ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
    2.64      ia64_srlz_d();
    2.65      phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
    2.66 -//    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
    2.67 +    mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT);
    2.68 +    phy_rr.ps = mrr.ps;
    2.69      phy_rr.ve = 1;
    2.70      ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
    2.71      ia64_srlz_d();
    2.72 @@ -262,6 +263,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
    2.73      act = mm_switch_action(old_psr, new_psr);
    2.74      switch (act) {
    2.75      case SW_V2P:
    2.76 +//        printf("V -> P mode transition: (0x%lx -> 0x%lx)\n",
    2.77 +//               old_psr.val, new_psr.val);
    2.78          vcpu->arch.old_rsc = regs->ar_rsc;
    2.79          switch_to_physical_rid(vcpu);
    2.80          /*
    2.81 @@ -272,6 +275,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
    2.82          vcpu->arch.mode_flags |= GUEST_IN_PHY;
    2.83          break;
    2.84      case SW_P2V:
    2.85 +//        printf("P -> V mode transition: (0x%lx -> 0x%lx)\n",
    2.86 +//               old_psr.val, new_psr.val);
    2.87          switch_to_virtual_rid(vcpu);
    2.88          /*
    2.89           * recover old mode which is saved when entering
    2.90 @@ -285,8 +290,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
    2.91              old_psr.val);
    2.92          break;
    2.93      case SW_NOP:
    2.94 -        printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
    2.95 -            old_psr.val, new_psr.val);
    2.96 +//        printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
    2.97 +//               old_psr.val, new_psr.val);
    2.98          break;
    2.99      default:
   2.100          /* Sanity check */
     3.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Mon Jul 31 15:14:47 2006 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Tue Aug 01 14:44:04 2006 -0600
     3.3 @@ -273,21 +273,24 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     3.4  //    prepare_if_physical_mode(v);
     3.5  
     3.6      if((data=vtlb_lookup(v, vadr,type))!=0){
     3.7 -//       gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
     3.8 -//       if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
     3.9 -        if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
    3.10 -            if(data->pl >= ((regs->cr_ipsr>>IA64_PSR_CPL0_BIT)&3)){
    3.11 -                gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
    3.12 -                emulate_io_inst(v, gppa, data->ma);
    3.13 -            }else{
    3.14 -                vcpu_set_isr(v,misr.val);
    3.15 -                data_access_rights(v, vadr);
    3.16 +        if (v->domain != dom0 && type == DSIDE_TLB) {
    3.17 +            gppa = (vadr & ((1UL << data->ps) - 1)) +
    3.18 +                   (data->ppn >> (data->ps - 12) << data->ps);
    3.19 +            if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
    3.20 +                if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
    3.21 +                    emulate_io_inst(v, gppa, data->ma);
    3.22 +                else {
    3.23 +                    vcpu_set_isr(v, misr.val);
    3.24 +                    data_access_rights(v, vadr);
    3.25 +                }
    3.26 +                return IA64_FAULT;
    3.27              }
    3.28 -            return IA64_FAULT;
    3.29          }
    3.30 +        thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
    3.31  
    3.32 -        thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
    3.33      }else if(type == DSIDE_TLB){
    3.34 +        if (misr.sp)
    3.35 +            return vmx_handle_lds(regs);
    3.36          if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
    3.37              if(vpsr.ic){
    3.38                  vcpu_set_isr(v, misr.val);
    3.39 @@ -306,7 +309,8 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    3.40          } else{
    3.41              vmx_vcpu_thash(v, vadr, &vhpt_adr);
    3.42              if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
    3.43 -                if (pteval & _PAGE_P){
    3.44 +                if ((pteval & _PAGE_P) &&
    3.45 +                    ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST)) {
    3.46                      vcpu_get_rr(v, vadr, &rr);
    3.47                      itir = rr&(RR_RID_MASK | RR_PS_MASK);
    3.48                      thash_purge_and_insert(v, pteval, itir , vadr);
     4.1 --- a/xen/arch/ia64/vmx/vtlb.c	Mon Jul 31 15:14:47 2006 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Tue Aug 01 14:44:04 2006 -0600
     4.3 @@ -141,14 +141,18 @@ static void thash_recycle_cch(thash_cb_t
     4.4  
     4.5  static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
     4.6  {
     4.7 -    u64 tag;
     4.8 +    u64 tag ,len;
     4.9 +    ia64_rr rr;
    4.10      thash_data_t *head, *cch;
    4.11      pte = pte & ~PAGE_FLAGS_RV_MASK;
    4.12 -
    4.13 +    rr.rrval = ia64_get_rr(ifa);
    4.14      head = (thash_data_t *)ia64_thash(ifa);
    4.15      tag = ia64_ttag(ifa);
    4.16      if( INVALID_VHPT(head) ) {
    4.17 +        len = head->len;
    4.18          head->page_flags = pte;
    4.19 +        head->len = len;
    4.20 +        head->itir = rr.ps << 2;
    4.21          head->etag = tag;
    4.22          return;
    4.23      }
    4.24 @@ -160,10 +164,9 @@ static void vmx_vhpt_insert(thash_cb_t *
    4.25      else{
    4.26          cch = __alloc_chain(hcb);
    4.27      }
    4.28 -    cch->page_flags=head->page_flags;
    4.29 -    cch->etag=head->etag;
    4.30 -    cch->next=head->next;
    4.31 +    *cch = *head;
    4.32      head->page_flags=pte;
    4.33 +    head->itir = rr.ps << 2;
    4.34      head->etag=tag;
    4.35      head->next = cch;
    4.36      head->len = cch->len+1;
    4.37 @@ -210,7 +213,13 @@ thash_data_t * vhpt_lookup(u64 va)
    4.38  u64 guest_vhpt_lookup(u64 iha, u64 *pte)
    4.39  {
    4.40      u64 ret;
    4.41 -    vhpt_lookup(iha);
    4.42 +    thash_data_t * data;
    4.43 +    data = vhpt_lookup(iha);
    4.44 +    if (data == NULL) {
    4.45 +        data = vtlb_lookup(current, iha, DSIDE_TLB);
    4.46 +        if (data != NULL)
    4.47 +            thash_vhpt_insert(current, data->page_flags, data->itir ,iha);
    4.48 +    }
    4.49      asm volatile ("rsm psr.ic|psr.i;;"
    4.50                    "srlz.d;;"
    4.51                    "ld8.s r9=[%1];;"
    4.52 @@ -231,10 +240,10 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
    4.53   *  purge software guest tlb
    4.54   */
    4.55  
    4.56 -static void vtlb_purge(VCPU *v, u64 va, u64 ps)
    4.57 +void vtlb_purge(VCPU *v, u64 va, u64 ps)
    4.58  {
    4.59      thash_cb_t *hcb = &v->arch.vtlb;
    4.60 -    thash_data_t *hash_table, *prev, *next;
    4.61 +    thash_data_t *cur;
    4.62      u64 start, end, size, tag, rid, def_size;
    4.63      ia64_rr vrr;
    4.64      vcpu_get_rr(v, va, &vrr.rrval);
    4.65 @@ -244,23 +253,11 @@ static void vtlb_purge(VCPU *v, u64 va, 
    4.66      end = start + size;
    4.67      def_size = PSIZE(vrr.ps);
    4.68      while(start < end){
    4.69 -        hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
    4.70 -        if(!INVALID_TLB(hash_table)){
    4.71 -            if(hash_table->etag == tag){
    4.72 -                 hash_table->etag = 1UL<<63;
    4.73 -            }
    4.74 -            else{
    4.75 -                prev=hash_table;
    4.76 -                next=prev->next;
    4.77 -                while(next){
    4.78 -                    if(next->etag == tag){
    4.79 -                        next->etag = 1UL<<63;
    4.80 -                        break;
    4.81 -                    }
    4.82 -                    prev=next;
    4.83 -                    next=next->next;
    4.84 -                }
    4.85 -            }
    4.86 +        cur = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
    4.87 +        while (cur) {
    4.88 +            if (cur->etag == tag)
    4.89 +                 cur->etag = 1UL << 63;
    4.90 +            cur = cur->next;
    4.91          }
    4.92          start += def_size;
    4.93      }
    4.94 @@ -274,30 +271,23 @@ static void vtlb_purge(VCPU *v, u64 va, 
    4.95  static void vhpt_purge(VCPU *v, u64 va, u64 ps)
    4.96  {
    4.97      //thash_cb_t *hcb = &v->arch.vhpt;
    4.98 -    thash_data_t *hash_table, *prev, *next;
    4.99 +    thash_data_t *cur;
   4.100      u64 start, end, size, tag;
   4.101 +    ia64_rr rr;
   4.102      size = PSIZE(ps);
   4.103      start = va & (-size);
   4.104      end = start + size;
   4.105 +    rr.rrval = ia64_get_rr(va);
   4.106 +    size = PSIZE(rr.ps);    
   4.107      while(start < end){
   4.108 -        hash_table = (thash_data_t *)ia64_thash(start);
   4.109 +        cur = (thash_data_t *)ia64_thash(start);
   4.110          tag = ia64_ttag(start);
   4.111 -        if(hash_table->etag == tag ){
   4.112 -            hash_table->etag = 1UL<<63; 
   4.113 +        while (cur) {
   4.114 +            if (cur->etag == tag)
   4.115 +                cur->etag = 1UL << 63; 
   4.116 +            cur = cur->next;
   4.117          }
   4.118 -        else{
   4.119 -            prev=hash_table;
   4.120 -            next=prev->next;
   4.121 -            while(next){
   4.122 -                if(next->etag == tag){
   4.123 -                    next->etag = 1UL<<63;
   4.124 -                    break; 
   4.125 -                }
   4.126 -                prev=next;
   4.127 -                next=next->next;
   4.128 -            }
   4.129 -        }
   4.130 -        start += PAGE_SIZE;
   4.131 +        start += size;
   4.132      }
   4.133      machine_tlb_purge(va, ps);
   4.134  }
   4.135 @@ -349,7 +339,7 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
   4.136      /* int flag; */
   4.137      ia64_rr vrr;
   4.138      /* u64 gppn, ppns, ppne; */
   4.139 -    u64 tag;
   4.140 +    u64 tag, len;
   4.141      vcpu_get_rr(current, va, &vrr.rrval);
   4.142  #ifdef VTLB_DEBUG    
   4.143      if (vrr.ps != itir_ps(itir)) {
   4.144 @@ -361,7 +351,9 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
   4.145  #endif
   4.146      hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
   4.147      if( INVALID_TLB(hash_table) ) {
   4.148 +        len = hash_table->len;
   4.149          hash_table->page_flags = pte;
   4.150 +        hash_table->len = len;
   4.151          hash_table->itir=itir;
   4.152          hash_table->etag=tag;
   4.153          return;
   4.154 @@ -425,18 +417,23 @@ void thash_purge_entries(VCPU *v, u64 va
   4.155  
   4.156  u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
   4.157  {
   4.158 -    u64 ps, addr;
   4.159 +    u64 ps, ps_mask, paddr, maddr;
   4.160 +//    ia64_rr rr;
   4.161      union pte_flags phy_pte;
   4.162      ps = itir_ps(itir);
   4.163 +    ps_mask = ~((1UL << ps) - 1);
   4.164      phy_pte.val = *pte;
   4.165 -    addr = *pte;
   4.166 -    addr = ((addr & _PAGE_PPN_MASK)>>ps<<ps)|(va&((1UL<<ps)-1));
   4.167 -    addr = lookup_domain_mpa(v->domain, addr, NULL);
   4.168 -    if(addr & GPFN_IO_MASK){
   4.169 +    paddr = *pte;
   4.170 +    paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
   4.171 +    maddr = lookup_domain_mpa(v->domain, paddr, NULL);
   4.172 +    if (maddr & GPFN_IO_MASK) {
   4.173          *pte |= VTLB_PTE_IO;
   4.174          return -1;
   4.175      }
   4.176 -    phy_pte.ppn = addr >> ARCH_PAGE_SHIFT;
   4.177 +//    rr.rrval = ia64_get_rr(va);
   4.178 +//    ps = rr.ps;
   4.179 +    maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
   4.180 +    phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
   4.181      return phy_pte.val;
   4.182  }
   4.183  
   4.184 @@ -449,8 +446,13 @@ void thash_purge_and_insert(VCPU *v, u64
   4.185  {
   4.186      u64 ps;//, va;
   4.187      u64 phy_pte;
   4.188 +    ia64_rr vrr;
   4.189      ps = itir_ps(itir);
   4.190 -
   4.191 +    vcpu_get_rr(current, ifa, &vrr.rrval);
   4.192 +//    if (vrr.ps != itir_ps(itir)) {
   4.193 +//        printf("not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n",
   4.194 +//               ifa, vrr.ps, itir_ps(itir));
   4.195 +//    }
   4.196      if(VMX_DOMAIN(v)){
   4.197          /* Ensure WB attribute if pte is related to a normal mem page,
   4.198           * which is required by vga acceleration since qemu maps shared
   4.199 @@ -460,7 +462,7 @@ void thash_purge_and_insert(VCPU *v, u64
   4.200              pte &= ~_PAGE_MA_MASK;
   4.201  
   4.202          phy_pte = translate_phy_pte(v, &pte, itir, ifa);
   4.203 -        if(ps==PAGE_SHIFT){
   4.204 +        if (vrr.ps <= PAGE_SHIFT) {
   4.205              if(!(pte&VTLB_PTE_IO)){
   4.206                  vhpt_purge(v, ifa, ps);
   4.207                  vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
     5.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Mon Jul 31 15:14:47 2006 -0600
     5.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Tue Aug 01 14:44:04 2006 -0600
     5.3 @@ -38,6 +38,9 @@
     5.4  
     5.5  #define _PAGE_P			(1 << _PAGE_P_BIT)	/* page present bit */
     5.6  #define _PAGE_MA_WB		(0x0 <<  2)	/* write back memory attribute */
     5.7 +#ifdef XEN
     5.8 +#define _PAGE_MA_ST		(0x1 <<  2)	/* is reserved for software use */
     5.9 +#endif
    5.10  #define _PAGE_MA_UC		(0x4 <<  2)	/* uncacheable memory attribute */
    5.11  #define _PAGE_MA_UCE		(0x5 <<  2)	/* UC exported attribute */
    5.12  #define _PAGE_MA_WC		(0x6 <<  2)	/* write coalescing memory attribute */
     6.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Mon Jul 31 15:14:47 2006 -0600
     6.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue Aug 01 14:44:04 2006 -0600
     6.3 @@ -467,7 +467,8 @@ vrrtomrr(VCPU *v, unsigned long val)
     6.4  
     6.5      rr.rrval=val;
     6.6      rr.rid = rr.rid + v->arch.starting_rid;
     6.7 -    rr.ps = PAGE_SHIFT;
     6.8 +    if (rr.ps > PAGE_SHIFT)
     6.9 +        rr.ps = PAGE_SHIFT;
    6.10      rr.ve = 1;
    6.11      return  vmMangleRID(rr.rrval);
    6.12  /* Disable this rid allocation algorithm for now */