ia64/xen-unstable

changeset 16340:ef6415fdaf8a

[IA64] Simplify thash_purge_and_insert()

This patch simplifies thash_purge_and_insert() for readability.
PV domain never use this function.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Wed Nov 07 11:01:23 2007 -0700 (2007-11-07)
parents e1cc10766f15
children a1247c2df2b4
files xen/arch/ia64/vmx/vtlb.c xen/include/asm-ia64/vmmu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vtlb.c	Wed Nov 07 10:53:41 2007 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Wed Nov 07 11:01:23 2007 -0700
     1.3 @@ -413,7 +413,7 @@ static thash_data_t *__alloc_chain(thash
     1.4   *  3: The caller need to make sure the new entry will not overlap
     1.5   *     with any existed entry.
     1.6   */
     1.7 -void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
     1.8 +static void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
     1.9  {
    1.10      thash_data_t *hash_table, *cch;
    1.11      /* int flag; */
    1.12 @@ -422,6 +422,8 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
    1.13      u64 tag, len;
    1.14      thash_cb_t *hcb = &v->arch.vtlb;
    1.15  
    1.16 +    vcpu_quick_region_set(PSCBX(v, tc_regions), va);
    1.17 +
    1.18      vcpu_get_rr(v, va, &vrr.rrval);
    1.19      vrr.ps = itir_ps(itir);
    1.20      VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
    1.21 @@ -545,60 +547,35 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
    1.22   */
    1.23  int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
    1.24  {
    1.25 -    u64 ps;//, va;
    1.26 -    u64 phy_pte;
    1.27 +    u64 ps, phy_pte, psr;
    1.28      ia64_rr mrr;
    1.29 -    int ret = 0;
    1.30  
    1.31      ps = itir_ps(itir);
    1.32      mrr.rrval = ia64_get_rr(ifa);
    1.33 -    if (VMX_DOMAIN(v)) {
    1.34 -        phy_pte = translate_phy_pte(v, &pte, itir, ifa);
    1.35 +
    1.36 +    phy_pte = translate_phy_pte(v, &pte, itir, ifa);
    1.37 +
    1.38 +    vtlb_purge(v, ifa, ps);
    1.39 +    vhpt_purge(v, ifa, ps);
    1.40 +
    1.41 +    if (pte & VTLB_PTE_IO) {
    1.42 +        vtlb_insert(v, pte, itir, ifa);
    1.43 +        return 1;
    1.44 +    }
    1.45  
    1.46 -        if (pte & VTLB_PTE_IO)
    1.47 -            ret = 1;
    1.48 -        vtlb_purge(v, ifa, ps);
    1.49 -        vhpt_purge(v, ifa, ps);
    1.50 -        if (ps == mrr.ps) {
    1.51 -            if (!(pte & VTLB_PTE_IO)) {
    1.52 -                vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
    1.53 -            }
    1.54 -            else{
    1.55 -                vtlb_insert(v, pte, itir, ifa);
    1.56 -                vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
    1.57 -            }
    1.58 -        }
    1.59 -        else if (ps > mrr.ps) {
    1.60 -            vtlb_insert(v, pte, itir, ifa);
    1.61 -            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
    1.62 -            if (!(pte & VTLB_PTE_IO)) {
    1.63 -                vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
    1.64 -            }
    1.65 -        }
    1.66 -        else {
    1.67 -            u64 psr;
    1.68 +    if (ps != mrr.ps)
    1.69 +        vtlb_insert(v, pte, itir, ifa);
    1.70  
    1.71 -            vtlb_insert(v, pte, itir, ifa);
    1.72 -            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
    1.73 -            if (!(pte & VTLB_PTE_IO)) {
    1.74 -                phy_pte  &= ~PAGE_FLAGS_RV_MASK;
    1.75 -                psr = ia64_clear_ic();
    1.76 -                ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
    1.77 -                ia64_set_psr(psr);
    1.78 -                ia64_srlz_i();
    1.79 -            }
    1.80 -        }
    1.81 +    if (ps >= mrr.ps) {
    1.82 +        vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
    1.83 +    } else { /* Subpaging */
    1.84 +        phy_pte &= ~PAGE_FLAGS_RV_MASK;
    1.85 +        psr = ia64_clear_ic();
    1.86 +        ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
    1.87 +        ia64_set_psr(psr);
    1.88 +        ia64_srlz_i();
    1.89      }
    1.90 -    else{
    1.91 -        phy_pte = translate_phy_pte(v, &pte, itir, ifa);
    1.92 -        if (ps != PAGE_SHIFT) {
    1.93 -            vtlb_insert(v, pte, itir, ifa);
    1.94 -            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
    1.95 -        }
    1.96 -        machine_tlb_purge(ifa, ps);
    1.97 -        vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
    1.98 -    }
    1.99 -    return ret;
   1.100 +    return 0;
   1.101  }
   1.102  
   1.103  /*
     2.1 --- a/xen/include/asm-ia64/vmmu.h	Wed Nov 07 10:53:41 2007 -0700
     2.2 +++ b/xen/include/asm-ia64/vmmu.h	Wed Nov 07 11:01:23 2007 -0700
     2.3 @@ -216,7 +216,6 @@ extern void machine_tlb_purge(u64 va, u6
     2.4  extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
     2.5  extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
     2.6  extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
     2.7 -extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va);
     2.8  extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
     2.9  extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
    2.10                                int type);