ia64/xen-unstable

changeset 19118:259d6ac13704

PoD: Remove on-stack arrays from pod_zero_check_superpage

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jan 29 12:48:33 2009 +0000 (2009-01-29)
parents 31798b19f25c
children aa89d1afb287
files xen/arch/x86/mm/p2m.c
line diff
     1.1 --- a/xen/arch/x86/mm/p2m.c	Thu Jan 29 12:44:31 2009 +0000
     1.2 +++ b/xen/arch/x86/mm/p2m.c	Thu Jan 29 12:48:33 2009 +0000
     1.3 @@ -713,14 +713,15 @@ p2m_pod_dump_data(struct domain *d)
     1.4  
     1.5  #define superpage_aligned(_x)  (((_x)&((1<<9)-1))==0)
     1.6  
     1.7 -/* Must be called w/ p2m lock held, page_alloc lock not held */
     1.8 +/* Search for all-zero superpages to be reclaimed as superpages for the
     1.9 + * PoD cache. Must be called w/ p2m lock held, page_alloc lock not held. */
    1.10  static int
    1.11  p2m_pod_zero_check_superpage(struct domain *d, unsigned long gfn)
    1.12  {
    1.13 -    mfn_t mfns[1<<9];
    1.14 -    p2m_type_t types[1<<9];
    1.15 -    unsigned long * map[1<<9] = { NULL };
    1.16 -    int ret=0, reset = 0, reset_max = 0;
    1.17 +    mfn_t mfn, mfn0 = _mfn(INVALID_MFN);
    1.18 +    p2m_type_t type, type0 = 0;
    1.19 +    unsigned long * map = NULL;
    1.20 +    int ret=0, reset = 0;
    1.21      int i, j;
    1.22  
    1.23      if ( !superpage_aligned(gfn) )
    1.24 @@ -730,7 +731,14 @@ p2m_pod_zero_check_superpage(struct doma
    1.25       * and aligned, and mapping them. */
    1.26      for ( i=0; i<(1<<9); i++ )
    1.27      {
    1.28 -        mfns[i] = gfn_to_mfn_query(d, gfn + i, types + i);
    1.29 +        
    1.30 +        mfn = gfn_to_mfn_query(d, gfn + i, &type);
    1.31 +
    1.32 +        if ( i == 0 )
    1.33 +        {
    1.34 +            mfn0 = mfn;
    1.35 +            type0 = type;
    1.36 +        }
    1.37  
    1.38          /* Conditions that must be met for superpage-superpage:
    1.39           * + All gfns are ram types
    1.40 @@ -739,36 +747,37 @@ p2m_pod_zero_check_superpage(struct doma
    1.41           * + The first mfn is 2-meg aligned
    1.42           * + All the other mfns are in sequence
    1.43           */
    1.44 -        if ( p2m_is_ram(types[i])
    1.45 -             && types[i] == types[0]
    1.46 -             && ( (mfn_to_page(mfns[i])->count_info & PGC_page_table) == 0 )
    1.47 -             && ( ( i == 0 && superpage_aligned(mfn_x(mfns[0])) )
    1.48 -                  || ( i != 0 && mfn_x(mfns[i]) == mfn_x(mfns[0]) + i ) ) )
    1.49 -            map[i] = map_domain_page(mfn_x(mfns[i]));
    1.50 -        else
    1.51 -            goto out_unmap;
    1.52 +        if ( !p2m_is_ram(type)
    1.53 +             || type != type0
    1.54 +             || ( (mfn_to_page(mfn)->count_info & PGC_page_table) != 0 )
    1.55 +             || !( ( i == 0 && superpage_aligned(mfn_x(mfn0)) )
    1.56 +                   || ( i != 0 && mfn_x(mfn) == (mfn_x(mfn0) + i) ) ) )
    1.57 +            goto out;
    1.58      }
    1.59  
    1.60      /* Now, do a quick check to see if it may be zero before unmapping. */
    1.61      for ( i=0; i<(1<<9); i++ )
    1.62      {
    1.63          /* Quick zero-check */
    1.64 +        map = map_domain_page(mfn_x(mfn0) + i);
    1.65 +
    1.66          for ( j=0; j<16; j++ )
    1.67 -            if( *(map[i]+j) != 0 )
    1.68 +            if( *(map+j) != 0 )
    1.69                  break;
    1.70  
    1.71 +        unmap_domain_page(map);
    1.72 +
    1.73          if ( j < 16 )
    1.74 -            goto out_unmap;
    1.75 +            goto out;
    1.76  
    1.77      }
    1.78  
    1.79      /* Try to remove the page, restoring old mapping if it fails. */
    1.80 -    reset_max = 1<<9;
    1.81      set_p2m_entry(d, gfn,
    1.82                    _mfn(POPULATE_ON_DEMAND_MFN), 9,
    1.83                    p2m_populate_on_demand);
    1.84  
    1.85 -    if ( (mfn_to_page(mfns[0])->u.inuse.type_info & PGT_count_mask) != 0 )
    1.86 +    if ( (mfn_to_page(mfn0)->u.inuse.type_info & PGT_count_mask) != 0 )
    1.87      {
    1.88          reset = 1;
    1.89          goto out_reset;
    1.90 @@ -793,36 +802,30 @@ p2m_pod_zero_check_superpage(struct doma
    1.91      /* Finally, do a full zero-check */
    1.92      for ( i=0; i < (1<<9); i++ )
    1.93      {
    1.94 -        for ( j=0; j<PAGE_SIZE/sizeof(*map[i]); j++ )
    1.95 -            if( *(map[i]+j) != 0 )
    1.96 +        map = map_domain_page(mfn_x(mfn0) + i);
    1.97 +
    1.98 +        for ( j=0; j<PAGE_SIZE/sizeof(*map); j++ )
    1.99 +            if( *(map+j) != 0 )
   1.100              {
   1.101                  reset = 1;
   1.102                  break;
   1.103              }
   1.104  
   1.105 +        unmap_domain_page(map);
   1.106 +
   1.107          if ( reset )
   1.108              goto out_reset;
   1.109      }
   1.110  
   1.111      /* Finally!  We've passed all the checks, and can add the mfn superpage
   1.112       * back on the PoD cache, and account for the new p2m PoD entries */
   1.113 -    p2m_pod_cache_add(d, mfn_to_page(mfns[0]), 9);
   1.114 +    p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
   1.115      d->arch.p2m->pod.entry_count += (1<<9);
   1.116  
   1.117  out_reset:
   1.118      if ( reset )
   1.119 -    {
   1.120 -        if (reset_max == (1<<9) )
   1.121 -            set_p2m_entry(d, gfn, mfns[0], 9, types[0]);
   1.122 -        else
   1.123 -            for ( i=0; i<reset_max; i++)
   1.124 -                set_p2m_entry(d, gfn + i, mfns[i], 0, types[i]);
   1.125 -    }
   1.126 +        set_p2m_entry(d, gfn, mfn0, 9, type0);
   1.127      
   1.128 -out_unmap:
   1.129 -    for ( i=0; i<(1<<9); i++ )
   1.130 -        if ( map[i] )
   1.131 -            unmap_domain_page(map[i]);
   1.132  out:
   1.133      return ret;
   1.134  }