direct-io.hg

changeset 11331:261b95f114a2

[IA64] VTLB optimization: Reuse invalid entry

When inserting entry to vtlb or vhpt, reuse invalid entry.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Thu Aug 24 11:32:55 2006 -0600 (2006-08-24)
parents 3e54734e55f3
children c1261ca0d321
files xen/arch/ia64/vmx/vtlb.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vtlb.c	Wed Aug 23 13:26:46 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu Aug 24 11:32:55 2006 -0600
     1.3 @@ -148,13 +148,17 @@ static void vmx_vhpt_insert(thash_cb_t *
     1.4      rr.rrval = ia64_get_rr(ifa);
     1.5      head = (thash_data_t *)ia64_thash(ifa);
     1.6      tag = ia64_ttag(ifa);
     1.7 -    if( INVALID_VHPT(head) ) {
     1.8 -        len = head->len;
     1.9 -        head->page_flags = pte;
    1.10 -        head->len = len;
    1.11 -        head->itir = rr.ps << 2;
    1.12 -        head->etag = tag;
    1.13 -        return;
    1.14 +    cch = head;
    1.15 +    while (cch) {    
    1.16 +        if (INVALID_VHPT(cch)) {
    1.17 +            len = cch->len;
    1.18 +            cch->page_flags = pte;
    1.19 +            cch->len = len;
    1.20 +            cch->itir = rr.ps << 2;
    1.21 +            cch->etag = tag;
    1.22 +            return;
    1.23 +        }
    1.24 +        cch = cch->next;
    1.25      }
    1.26  
    1.27      if(head->len>=MAX_CCN_DEPTH){
    1.28 @@ -358,24 +362,20 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
    1.29      u64 tag, len;
    1.30      thash_cb_t *hcb = &v->arch.vtlb;
    1.31      vcpu_get_rr(v, va, &vrr.rrval);
    1.32 -#ifdef VTLB_DEBUG    
    1.33 -    if (vrr.ps != itir_ps(itir)) {
    1.34 -//        machine_tlb_insert(hcb->vcpu, entry);
    1.35 -        panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n",
    1.36 -             va, vrr.ps, itir_ps(itir));
    1.37 -        return;
    1.38 -    }
    1.39 -#endif
    1.40      vrr.ps = itir_ps(itir);
    1.41      VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
    1.42      hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
    1.43 -    if( INVALID_TLB(hash_table) ) {
    1.44 -        len = hash_table->len;
    1.45 -        hash_table->page_flags = pte;
    1.46 -        hash_table->len = len;
    1.47 -        hash_table->itir=itir;
    1.48 -        hash_table->etag=tag;
    1.49 -        return;
    1.50 +    cch = hash_table;
    1.51 +    while (cch) {
    1.52 +        if (INVALID_TLB(cch)) {
    1.53 +            len = cch->len;
    1.54 +            cch->page_flags = pte;
    1.55 +            cch->len = len;
    1.56 +            cch->itir=itir;
    1.57 +            cch->etag=tag;
    1.58 +            return;
    1.59 +        }
    1.60 +        cch = cch->next;
    1.61      }
    1.62      if (hash_table->len>=MAX_CCN_DEPTH){
    1.63          thash_recycle_cch(hcb, hash_table);
    1.64 @@ -469,10 +469,6 @@ void thash_purge_and_insert(VCPU *v, u64
    1.65      ps = itir_ps(itir);
    1.66      vcpu_get_rr(current, ifa, &vrr.rrval);
    1.67      mrr.rrval = ia64_get_rr(ifa);
    1.68 -//    if (vrr.ps != itir_ps(itir)) {
    1.69 -//        printf("not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n",
    1.70 -//               ifa, vrr.ps, itir_ps(itir));
    1.71 -//    }
    1.72      if(VMX_DOMAIN(v)){
    1.73          /* Ensure WB attribute if pte is related to a normal mem page,
    1.74           * which is required by vga acceleration since qemu maps shared