ia64/xen-unstable

changeset 15889:b91d16ab68be

[IA64] Cleanup: remove unused declarations, add static and reindent

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Mon Sep 17 11:26:21 2007 -0600 (2007-09-17)
parents 0902e4aae810
children 082faaa306e0
files xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vtlb.c xen/include/asm-ia64/linux-xen/asm/pgtable.h xen/include/asm-ia64/vmmu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmmu.c	Mon Sep 17 11:08:46 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Mon Sep 17 11:26:21 2007 -0600
     1.3 @@ -66,7 +66,7 @@ custom_param("vti_vhpt_size", parse_vhpt
     1.4   * Input:
     1.5   *  d: 
     1.6   */
     1.7 -u64 get_mfn(struct domain *d, u64 gpfn)
     1.8 +static u64 get_mfn(struct domain *d, u64 gpfn)
     1.9  {
    1.10  //    struct domain *d;
    1.11      u64    xen_gppn, xen_mppn, mpfn;
    1.12 @@ -93,68 +93,6 @@ u64 get_mfn(struct domain *d, u64 gpfn)
    1.13      
    1.14  }
    1.15  
    1.16 -/*
    1.17 - * The VRN bits of va stand for which rr to get.
    1.18 - */
    1.19 -//ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
    1.20 -//{
    1.21 -//    ia64_rr   vrr;
    1.22 -//    vcpu_get_rr(vcpu, va, &vrr.rrval);
    1.23 -//    return vrr;
    1.24 -//}
    1.25 -
    1.26 -/*
    1.27 -void recycle_message(thash_cb_t *hcb, u64 para)
    1.28 -{
    1.29 -    if(hcb->ht == THASH_VHPT)
    1.30 -    {
    1.31 -        printk("ERROR : vhpt recycle happenning!!!\n");
    1.32 -    }
    1.33 -    printk("hcb=%p recycled with %lx\n",hcb,para);
    1.34 -}
    1.35 - */
    1.36 -
    1.37 -/*
    1.38 - * Purge all guest TCs in logical processor.
    1.39 - * Instead of purging all LP TCs, we should only purge   
    1.40 - * TCs that belong to this guest.
    1.41 - */
    1.42 -void
    1.43 -purge_machine_tc_by_domid(domid_t domid)
    1.44 -{
    1.45 -#ifndef PURGE_GUEST_TC_ONLY
    1.46 -    // purge all TCs
    1.47 -    struct ia64_pal_retval  result;
    1.48 -    u64 addr;
    1.49 -    u32 count1,count2;
    1.50 -    u32 stride1,stride2;
    1.51 -    u32 i,j;
    1.52 -    u64 psr;
    1.53 -
    1.54 -    result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
    1.55 -    if ( result.status != 0 ) {
    1.56 -        panic ("PAL_PTCE_INFO failed\n");
    1.57 -    }
    1.58 -    addr = result.v0;
    1.59 -    count1 = HIGH_32BITS(result.v1);
    1.60 -    count2 = LOW_32BITS (result.v1);
    1.61 -    stride1 = HIGH_32BITS(result.v2);
    1.62 -    stride2 = LOW_32BITS (result.v2);
    1.63 -
    1.64 -    local_irq_save(psr);
    1.65 -    for (i=0; i<count1; i++) {
    1.66 -        for (j=0; j<count2; j++) {
    1.67 -            ia64_ptce(addr);
    1.68 -            addr += stride2;
    1.69 -        }
    1.70 -        addr += stride1;
    1.71 -    }
    1.72 -    local_irq_restore(psr);
    1.73 -#else
    1.74 -    // purge all TCs belong to this guest.
    1.75 -#endif
    1.76 -}
    1.77 -
    1.78  static int init_domain_vhpt(struct vcpu *v)
    1.79  {
    1.80      int rc;
    1.81 @@ -313,7 +251,8 @@ fetch_code(VCPU *vcpu, u64 gip, IA64_BUN
    1.82      }
    1.83      if( gpip){
    1.84          mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
    1.85 -        if( mfn == INVALID_MFN )  panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
    1.86 +        if (mfn == INVALID_MFN)
    1.87 +            panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
    1.88          maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
    1.89      }else{
    1.90          tlb = vhpt_lookup(gip);
     2.1 --- a/xen/arch/ia64/vmx/vtlb.c	Mon Sep 17 11:08:46 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Mon Sep 17 11:26:21 2007 -0600
     2.3 @@ -69,9 +69,9 @@ static int
     2.4          return 0;
     2.5      }
     2.6      sa1 = trp->vadr;
     2.7 -    ea1 = sa1 + PSIZE(trp->ps) -1;
     2.8 +    ea1 = sa1 + PSIZE(trp->ps) - 1;
     2.9      eva -= 1;
    2.10 -    if ( (sva>ea1) || (sa1>eva) )
    2.11 +    if (sva > ea1 || sa1 > eva)
    2.12          return 0;
    2.13      else
    2.14          return 1;
    2.15 @@ -85,10 +85,11 @@ static thash_data_t *__vtr_lookup(VCPU *
    2.16      int  i;
    2.17      u64 rid;
    2.18      vcpu_get_rr(vcpu, va, &rid);
    2.19 -    rid = rid & RR_RID_MASK;;
    2.20 +    rid &= RR_RID_MASK;
    2.21      if (is_data) {
    2.22          if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
    2.23 -            for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
    2.24 +            trp = (thash_data_t *)vcpu->arch.dtrs;
    2.25 +            for (i = 0;  i < NDTRS; i++, trp++) {
    2.26                  if (__is_tr_translated(trp, rid, va)) {
    2.27                      return trp;
    2.28                  }
    2.29 @@ -97,7 +98,8 @@ static thash_data_t *__vtr_lookup(VCPU *
    2.30      }
    2.31      else {
    2.32          if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
    2.33 -            for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
    2.34 +            trp = (thash_data_t *)vcpu->arch.itrs;
    2.35 +            for (i = 0; i < NITRS; i++, trp++) {
    2.36                  if (__is_tr_translated(trp, rid, va)) {
    2.37                      return trp;
    2.38                  }
    2.39 @@ -107,35 +109,34 @@ static thash_data_t *__vtr_lookup(VCPU *
    2.40      return NULL;
    2.41  }
    2.42  
    2.43 -
    2.44  static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
    2.45  {
    2.46      thash_data_t *p, *q;
    2.47 -    int i=0;
    2.48 -    
    2.49 -    p=hash;
    2.50 -    for(i=0; i < MAX_CCN_DEPTH; i++){
    2.51 -        p=p->next;
    2.52 +    int i = 0;
    2.53 +     
    2.54 +    p = hash;
    2.55 +    for (i = 0; i < MAX_CCN_DEPTH; i++) {
    2.56 +        p = p->next;
    2.57      }
    2.58 -    q=hash->next;
    2.59 -    hash->len=0;
    2.60 -    hash->next=0;
    2.61 -    p->next=hcb->cch_freelist;
    2.62 -    hcb->cch_freelist=q;
    2.63 +    q = hash->next;
    2.64 +    hash->len = 0;
    2.65 +    hash->next = 0;
    2.66 +    p->next = hcb->cch_freelist;
    2.67 +    hcb->cch_freelist = q;
    2.68  }
    2.69  
    2.70 -
    2.71 -
    2.72 -
    2.73  static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
    2.74  {
    2.75      u64 tag;
    2.76      ia64_rr rr;
    2.77      thash_data_t *head, *cch;
    2.78 +
    2.79      pte = pte & ~PAGE_FLAGS_RV_MASK;
    2.80      rr.rrval = ia64_get_rr(ifa);
    2.81      head = (thash_data_t *)ia64_thash(ifa);
    2.82      tag = ia64_ttag(ifa);
    2.83 +
    2.84 +    /* Find a free (ie invalid) entry.  */
    2.85      cch = head;
    2.86      while (cch) {    
    2.87          if (INVALID_VHPT(cch))
    2.88 @@ -143,16 +144,16 @@ static void vmx_vhpt_insert(thash_cb_t *
    2.89          cch = cch->next;
    2.90      }
    2.91      if (cch) {
    2.92 +        /* As we insert in head, copy head.  */
    2.93          if (cch != head) {
    2.94              local_irq_disable();
    2.95              cch->page_flags = head->page_flags;
    2.96              cch->itir = head->itir;
    2.97 -            cch->etag  = head->etag;
    2.98 +            cch->etag = head->etag;
    2.99              head->ti = 1;
   2.100              local_irq_enable();
   2.101          }
   2.102 -    }
   2.103 -    else{
   2.104 +    } else {
   2.105          if (head->len >= MAX_CCN_DEPTH) {
   2.106              thash_recycle_cch(hcb, head);
   2.107              cch = cch_alloc(hcb);
   2.108 @@ -181,7 +182,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte,
   2.109      ia64_rr mrr;
   2.110  
   2.111      mrr.rrval = ia64_get_rr(va);
   2.112 -    phy_pte=translate_phy_pte(v, &pte, itir, va);
   2.113 +    phy_pte = translate_phy_pte(v, &pte, itir, va);
   2.114  
   2.115      if (itir_ps(itir) >= mrr.ps) {
   2.116          vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
   2.117 @@ -232,26 +233,31 @@ thash_data_t * vhpt_lookup(u64 va)
   2.118  {
   2.119      thash_data_t *hash, *head;
   2.120      u64 tag, pte, itir;
   2.121 +
   2.122      head = (thash_data_t *)ia64_thash(va);
   2.123 -    hash=head;
   2.124 +    hash = head;
   2.125      tag = ia64_ttag(va);
   2.126 -    do{
   2.127 -        if(hash->etag == tag)
   2.128 +    do {
   2.129 +        if (hash->etag == tag)
   2.130              break;
   2.131 -        hash=hash->next;
   2.132 -    }while(hash);
   2.133 -    if(hash && hash!=head){
   2.134 +        hash = hash->next;
   2.135 +    } while(hash);
   2.136 +    if (hash && hash != head) {
   2.137 +        /* Put the entry on the front of the list (ie swap hash and head).  */
   2.138          pte = hash->page_flags;
   2.139          hash->page_flags = head->page_flags;
   2.140          head->page_flags = pte;
   2.141 +
   2.142          tag = hash->etag;
   2.143          hash->etag = head->etag;
   2.144          head->etag = tag;
   2.145 +
   2.146          itir = hash->itir;
   2.147          hash->itir = head->itir;
   2.148          head->itir = itir;
   2.149 +
   2.150          head->len = hash->len;
   2.151 -        hash->len=0;
   2.152 +        hash->len = 0;
   2.153          return head;
   2.154      }
   2.155      return hash;
   2.156 @@ -368,14 +374,15 @@ void thash_recycle_cch_all(thash_cb_t *h
   2.157  {
   2.158      int num;
   2.159      thash_data_t *head;
   2.160 -    head=hcb->hash;
   2.161 +
   2.162 +    head = hcb->hash;
   2.163      num = (hcb->hash_sz/sizeof(thash_data_t));
   2.164 -    do{
   2.165 +    do {
   2.166          head->len = 0;
   2.167          head->next = 0;
   2.168          head++;
   2.169          num--;
   2.170 -    }while(num);
   2.171 +    } while(num);
   2.172      cch_mem_init(hcb);
   2.173  }
   2.174  
   2.175 @@ -409,6 +416,7 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
   2.176      /* u64 gppn, ppns, ppne; */
   2.177      u64 tag, len;
   2.178      thash_cb_t *hcb = &v->arch.vtlb;
   2.179 +
   2.180      vcpu_get_rr(v, va, &vrr.rrval);
   2.181      vrr.ps = itir_ps(itir);
   2.182      VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
   2.183 @@ -419,13 +427,13 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
   2.184              len = cch->len;
   2.185              cch->page_flags = pte;
   2.186              cch->len = len;
   2.187 -            cch->itir=itir;
   2.188 -            cch->etag=tag;
   2.189 +            cch->itir = itir;
   2.190 +            cch->etag = tag;
   2.191              return;
   2.192          }
   2.193          cch = cch->next;
   2.194      }
   2.195 -    if (hash_table->len>=MAX_CCN_DEPTH){
   2.196 +    if (hash_table->len >= MAX_CCN_DEPTH) {
   2.197          thash_recycle_cch(hcb, hash_table);
   2.198          cch = cch_alloc(hcb);
   2.199      }
   2.200 @@ -450,11 +458,12 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
   2.201      u64 end, rid;
   2.202  
   2.203      vcpu_get_rr(vcpu, va, &rid);
   2.204 -    rid = rid & RR_RID_MASK;;
   2.205 +    rid &= RR_RID_MASK;
   2.206      end = va + PSIZE(ps);
   2.207      if (is_data) {
   2.208 -        if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
   2.209 -            for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
   2.210 +        if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
   2.211 +            trp = (thash_data_t *)vcpu->arch.dtrs;
   2.212 +            for (i = 0; i < NDTRS; i++, trp++) {
   2.213                  if (__is_tr_overlap(trp, rid, va, end )) {
   2.214                      return i;
   2.215                  }
   2.216 @@ -463,7 +472,8 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
   2.217      }
   2.218      else {
   2.219          if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
   2.220 -            for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
   2.221 +            trp = (thash_data_t *)vcpu->arch.itrs;
   2.222 +            for (i = 0; i < NITRS; i++, trp++) {
   2.223                  if (__is_tr_overlap(trp, rid, va, end )) {
   2.224                      return i;
   2.225                  }
   2.226 @@ -478,7 +488,7 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
   2.227   */
   2.228  void thash_purge_entries(VCPU *v, u64 va, u64 ps)
   2.229  {
   2.230 -    if(vcpu_quick_region_check(v->arch.tc_regions,va))
   2.231 +    if (vcpu_quick_region_check(v->arch.tc_regions, va))
   2.232          vtlb_purge(v, va, ps);
   2.233      vhpt_purge(v, va, ps);
   2.234  }
   2.235 @@ -497,6 +507,7 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
   2.236      u64 ps, ps_mask, paddr, maddr;
   2.237  //    ia64_rr rr;
   2.238      union pte_flags phy_pte;
   2.239 +
   2.240      ps = itir_ps(itir);
   2.241      ps_mask = ~((1UL << ps) - 1);
   2.242      phy_pte.val = *pte;
   2.243 @@ -536,7 +547,7 @@ int thash_purge_and_insert(VCPU *v, u64 
   2.244  
   2.245      ps = itir_ps(itir);
   2.246      mrr.rrval = ia64_get_rr(ifa);
   2.247 -    if(VMX_DOMAIN(v)){
   2.248 +    if (VMX_DOMAIN(v)) {
   2.249          phy_pte = translate_phy_pte(v, &pte, itir, ifa);
   2.250  
   2.251          if (pte & VTLB_PTE_IO)
   2.252 @@ -544,18 +555,18 @@ int thash_purge_and_insert(VCPU *v, u64 
   2.253          vtlb_purge(v, ifa, ps);
   2.254          vhpt_purge(v, ifa, ps);
   2.255          if (ps == mrr.ps) {
   2.256 -            if(!(pte&VTLB_PTE_IO)){
   2.257 +            if (!(pte & VTLB_PTE_IO)) {
   2.258                  vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
   2.259              }
   2.260              else{
   2.261                  vtlb_insert(v, pte, itir, ifa);
   2.262 -                vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
   2.263 +                vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
   2.264              }
   2.265          }
   2.266          else if (ps > mrr.ps) {
   2.267              vtlb_insert(v, pte, itir, ifa);
   2.268 -            vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
   2.269 -            if(!(pte&VTLB_PTE_IO)){
   2.270 +            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
   2.271 +            if (!(pte & VTLB_PTE_IO)) {
   2.272                  vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
   2.273              }
   2.274          }
   2.275 @@ -572,9 +583,9 @@ int thash_purge_and_insert(VCPU *v, u64 
   2.276      }
   2.277      else{
   2.278          phy_pte = translate_phy_pte(v, &pte, itir, ifa);
   2.279 -        if(ps!=PAGE_SHIFT){
   2.280 +        if (ps != PAGE_SHIFT) {
   2.281              vtlb_insert(v, pte, itir, ifa);
   2.282 -            vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
   2.283 +            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
   2.284          }
   2.285          machine_tlb_purge(ifa, ps);
   2.286          vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
   2.287 @@ -593,13 +604,13 @@ void thash_purge_all(VCPU *v)
   2.288      int num;
   2.289      thash_data_t *head;
   2.290      thash_cb_t  *vtlb,*vhpt;
   2.291 -    vtlb =&v->arch.vtlb;
   2.292 -    vhpt =&v->arch.vhpt;
   2.293 +    vtlb = &v->arch.vtlb;
   2.294 +    vhpt = &v->arch.vhpt;
   2.295  
   2.296      for (num = 0; num < 8; num++)
   2.297          VMX(v, psbits[num]) = 0;
   2.298      
   2.299 -    head=vtlb->hash;
   2.300 +    head = vtlb->hash;
   2.301      num = (vtlb->hash_sz/sizeof(thash_data_t));
   2.302      do{
   2.303          head->page_flags = 0;
   2.304 @@ -608,10 +619,10 @@ void thash_purge_all(VCPU *v)
   2.305          head->next = 0;
   2.306          head++;
   2.307          num--;
   2.308 -    }while(num);
   2.309 +    } while(num);
   2.310      cch_mem_init(vtlb);
   2.311      
   2.312 -    head=vhpt->hash;
   2.313 +    head = vhpt->hash;
   2.314      num = (vhpt->hash_sz/sizeof(thash_data_t));
   2.315      do{
   2.316          head->page_flags = 0;
   2.317 @@ -619,7 +630,7 @@ void thash_purge_all(VCPU *v)
   2.318          head->next = 0;
   2.319          head++;
   2.320          num--;
   2.321 -    }while(num);
   2.322 +    } while(num);
   2.323      cch_mem_init(vhpt);
   2.324      local_flush_tlb_all();
   2.325  }
   2.326 @@ -635,19 +646,19 @@ void thash_purge_all(VCPU *v)
   2.327  
   2.328  thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
   2.329  {
   2.330 -    thash_data_t  *cch;
   2.331 -    u64     psbits, ps, tag;
   2.332 +    thash_data_t *cch;
   2.333 +    u64 psbits, ps, tag;
   2.334      ia64_rr vrr;
   2.335 -    thash_cb_t * hcb= &v->arch.vtlb;
   2.336 +    thash_cb_t *hcb = &v->arch.vtlb;
   2.337  
   2.338 -    cch = __vtr_lookup(v, va, is_data);;
   2.339 +    cch = __vtr_lookup(v, va, is_data);
   2.340      if (cch)
   2.341  	return cch;
   2.342  
   2.343 -    if (vcpu_quick_region_check(v->arch.tc_regions,va) == 0)
   2.344 +    if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
   2.345          return NULL;
   2.346      psbits = VMX(v, psbits[(va >> 61)]);
   2.347 -    vcpu_get_rr(v,va,&vrr.rrval);
   2.348 +    vcpu_get_rr(v, va, &vrr.rrval);
   2.349      while (psbits) {
   2.350          ps = __ffs(psbits);
   2.351          psbits &= ~(1UL << ps);
   2.352 @@ -676,16 +687,16 @@ static void thash_init(thash_cb_t *hcb, 
   2.353      hcb->pta.ve = 1;
   2.354      hcb->pta.size = sz;
   2.355      
   2.356 -    head=hcb->hash;
   2.357 +    head = hcb->hash;
   2.358      num = (hcb->hash_sz/sizeof(thash_data_t));
   2.359 -    do{
   2.360 +    do {
   2.361          head->page_flags = 0;
   2.362          head->itir = 0;
   2.363 -        head->etag = 1UL<<63;
   2.364 +        head->etag = 1UL << 63;
   2.365          head->next = 0;
   2.366          head++;
   2.367          num--;
   2.368 -    }while(num);
   2.369 +    } while(num);
   2.370  
   2.371      hcb->cch_free_idx = 0;
   2.372      hcb->cch_freelist = NULL;
     3.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Mon Sep 17 11:08:46 2007 -0600
     3.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Mon Sep 17 11:26:21 2007 -0600
     3.3 @@ -73,6 +73,7 @@
     3.4  #ifdef XEN
     3.5  #define _PAGE_VIRT_D		(__IA64_UL(1) << 53)	/* Virtual dirty bit */
     3.6  #define _PAGE_PROTNONE		0
     3.7 +#define _PAGE_PL_PRIV		(CONFIG_CPL0_EMUL << 7)
     3.8  
     3.9  #ifdef CONFIG_XEN_IA64_TLB_TRACK
    3.10  #define _PAGE_TLB_TRACKING_BIT          54
     4.1 --- a/xen/include/asm-ia64/vmmu.h	Mon Sep 17 11:08:46 2007 -0600
     4.2 +++ b/xen/include/asm-ia64/vmmu.h	Mon Sep 17 11:26:21 2007 -0600
     4.3 @@ -28,7 +28,6 @@
     4.4  #define     DEFAULT_VHPT_SZ     (23) // 8M hash + 8M c-chain for VHPT
     4.5  #define     VTLB(v,_x)          (v->arch.vtlb._x)
     4.6  #define     VHPT(v,_x)          (v->arch.vhpt._x)
     4.7 -#define     _PAGE_PL_PRIV       (CONFIG_CPL0_EMUL << 7)
     4.8  
     4.9  #ifndef __ASSEMBLY__
    4.10  
    4.11 @@ -39,34 +38,6 @@
    4.12  #include <asm/regionreg.h>
    4.13  #include <asm/vmx_mm_def.h>
    4.14  #include <asm/bundle.h>
    4.15 -//#define         THASH_TLB_TR            0
    4.16 -//#define         THASH_TLB_TC            1
    4.17 -
    4.18 -
    4.19 -// bit definition of TR, TC search cmobination
    4.20 -//#define         THASH_SECTION_TR        (1<<0)
    4.21 -//#define         THASH_SECTION_TC        (1<<1)
    4.22 -
    4.23 -/*
    4.24 - * Next bit definition must be same with THASH_TLB_XX
    4.25 -#define         PTA_BASE_SHIFT          (15)
    4.26 - */
    4.27 -
    4.28 -
    4.29 -
    4.30 -
    4.31 -#define HIGH_32BITS(x)  bits(x,32,63)
    4.32 -#define LOW_32BITS(x)   bits(x,0,31)
    4.33 -
    4.34 -typedef union search_section {
    4.35 -    struct {
    4.36 -        u32 tr : 1;
    4.37 -        u32 tc : 1;
    4.38 -        u32 rsv: 30;
    4.39 -    };
    4.40 -    u32     v;
    4.41 -} search_section_t;
    4.42 -
    4.43  
    4.44  enum {
    4.45      ISIDE_TLB=0,
    4.46 @@ -169,28 +140,6 @@ static inline u64 xen_to_arch_ppn(u64 xp
    4.47      return (xppn <<(PAGE_SHIFT- ARCH_PAGE_SHIFT));
    4.48  }
    4.49  
    4.50 -typedef enum {
    4.51 -    THASH_TLB=0,
    4.52 -    THASH_VHPT
    4.53 -} THASH_TYPE;
    4.54 -
    4.55 -struct thash_cb;
    4.56 -/*
    4.57 - * Use to calculate the HASH index of thash_data_t.
    4.58 - */
    4.59 -typedef u64 *(THASH_FN)(PTA pta, u64 va);
    4.60 -typedef u64 *(TTAG_FN)(PTA pta, u64 va);
    4.61 -typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
    4.62 -typedef void *(REM_NOTIFIER_FN)(struct thash_cb *hcb, thash_data_t *entry);
    4.63 -typedef void (RECYCLE_FN)(struct thash_cb *hc, u64 para);
    4.64 -typedef ia64_rr (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
    4.65 -typedef thash_data_t *(FIND_OVERLAP_FN)(struct thash_cb *hcb, 
    4.66 -        u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
    4.67 -typedef thash_data_t *(FIND_NEXT_OVL_FN)(struct thash_cb *hcb);
    4.68 -typedef void (REM_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry);
    4.69 -typedef void (INS_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry, u64 va);
    4.70 -
    4.71 -
    4.72  typedef struct thash_cb {
    4.73      /* THASH base information */
    4.74      thash_data_t    *hash; // hash table pointer, aligned at thash_sz.
    4.75 @@ -224,45 +173,6 @@ extern void thash_free(thash_cb_t *hcb);
    4.76  //extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
    4.77  //extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx);
    4.78  extern int vtr_find_overlap(struct vcpu *vcpu, u64 va, u64 ps, int is_data);
    4.79 -extern u64 get_mfn(struct domain *d, u64 gpfn);
    4.80 -/*
    4.81 - * Force to delete a found entry no matter TR or foreign map for TLB.
    4.82 - *    NOTES:
    4.83 - *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
    4.84 - *         itr[]/dtr[] need to be updated too.
    4.85 - *      2: This API must be called after thash_find_overlap() or
    4.86 - *         thash_find_next_overlap().
    4.87 - *      3: Return TRUE or FALSE
    4.88 - *
    4.89 - */
    4.90 -extern void thash_remove(thash_cb_t *hcb, thash_data_t *entry);
    4.91 -extern void thash_tr_remove(thash_cb_t *hcb, thash_data_t *entry/*, int idx*/);
    4.92 -
    4.93 -/*
    4.94 - * Find an overlap entry in hash table and its collision chain.
    4.95 - * Refer to SDM2 4.1.1.4 for overlap definition.
    4.96 - *    PARAS:
    4.97 - *      1: in: TLB format entry, rid:ps must be same with vrr[].
    4.98 - *             va & ps identify the address space for overlap lookup
    4.99 - *      2: section can be combination of TR, TC and FM. (THASH_SECTION_XX)
   4.100 - *      3: cl means I side or D side.
   4.101 - *    RETURNS:
   4.102 - *      NULL to indicate the end of findings.
   4.103 - *    NOTES:
   4.104 - *
   4.105 - */
   4.106 -extern thash_data_t *thash_find_overlap(thash_cb_t *hcb, 
   4.107 -                        thash_data_t *in, search_section_t s_sect);
   4.108 -extern thash_data_t *thash_find_overlap_ex(thash_cb_t *hcb, 
   4.109 -                u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
   4.110 -
   4.111 -
   4.112 -/*
   4.113 - * Similar with thash_find_overlap but find next entry.
   4.114 - *    NOTES:
   4.115 - *      Intermediate position information is stored in hcb->priv.
   4.116 - */
   4.117 -extern thash_data_t *thash_find_next_overlap(thash_cb_t *hcb);
   4.118  
   4.119  /*
   4.120   * Find and purge overlap entries in hash table and its collision chain.
   4.121 @@ -290,7 +200,6 @@ extern void thash_purge_all(struct vcpu 
   4.122   *
   4.123   */
   4.124  extern thash_data_t *vtlb_lookup(struct vcpu *v,u64 va,int is_data);
   4.125 -extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
   4.126  
   4.127  
   4.128  #define   ITIR_RV_MASK      (((1UL<<32)-1)<<32 | 0x3)
   4.129 @@ -298,12 +207,10 @@ extern int thash_lock_tc(thash_cb_t *hcb
   4.130  #define   PAGE_FLAGS_AR_PL_MASK ((0x7UL<<9)|(0x3UL<<7))
   4.131  extern u64 machine_ttag(PTA pta, u64 va);
   4.132  extern u64 machine_thash(PTA pta, u64 va);
   4.133 -extern void purge_machine_tc_by_domid(domid_t domid);
   4.134  extern void machine_tlb_insert(struct vcpu *v, thash_data_t *tlb);
   4.135  extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
   4.136  extern int init_domain_tlb(struct vcpu *v);
   4.137  extern void free_domain_tlb(struct vcpu *v);
   4.138 -extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
   4.139  extern thash_data_t * vhpt_lookup(u64 va);
   4.140  extern void machine_tlb_purge(u64 va, u64 ps);
   4.141  extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);