ia64/xen-unstable

changeset 19136:4a2f93fb03eb

x86-64: fold shadow_page_info fields into page_info

... combining the list entry members of both structures and removing
the artificial 'mbz' member (shadow code must keep the real underlying
member 'count_info' at zero for the lifetime of pages use as shadows).

This also fixes a latent issue with u.inuse._domain not getting
explicitly cleared before returning shadow pages to the domain heap -
it just so happened that this member turned out to be zero in all
(normal?) cases when a shadow page ends its life (but there were
neither build nor run-time assertions that this would actually be the
case). The bug got exposed by a subsequent patch changing the order of
fields in struct page_info.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:08:06 2009 +0000 (2009-01-30)
parents deab3a069185
children 6fe44eb28f52
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/domain.h xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:04:24 2009 +0000
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:08:06 2009 +0000
     1.3 @@ -48,9 +48,9 @@ void shadow_domain_init(struct domain *d
     1.4      int i;
     1.5      shadow_lock_init(d);
     1.6      for ( i = 0; i <= SHADOW_MAX_ORDER; i++ )
     1.7 -        INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
     1.8 +        INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
     1.9      INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
    1.10 -    INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
    1.11 +    INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
    1.12  
    1.13      /* Use shadow pagetables for log-dirty support */
    1.14      paging_log_dirty_init(d, shadow_enable_log_dirty, 
    1.15 @@ -1291,9 +1291,9 @@ static inline int space_is_available(
    1.16      for ( ; order <= shadow_max_order(d); ++order )
    1.17      {
    1.18          unsigned int n = count;
    1.19 -        const struct list_head *p;
    1.20 -
    1.21 -        list_for_each ( p, &d->arch.paging.shadow.freelists[order] )
    1.22 +        const struct shadow_page_info *sp;
    1.23 +
    1.24 +        page_list_for_each ( sp, &d->arch.paging.shadow.freelists[order] )
    1.25              if ( --n == 0 )
    1.26                  return 1;
    1.27          count = (count + 1) >> 1;
    1.28 @@ -1307,7 +1307,7 @@ static inline int space_is_available(
    1.29  static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
    1.30  {
    1.31      struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    1.32 -    switch ( sp->type )
    1.33 +    switch ( sp->u.sh.type )
    1.34      {
    1.35      case SH_type_l2_32_shadow:
    1.36          SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(v,smfn);
    1.37 @@ -1322,7 +1322,7 @@ static void shadow_unhook_mappings(struc
    1.38          break;
    1.39  #endif
    1.40      default:
    1.41 -        SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->type);
    1.42 +        SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type);
    1.43          BUG();
    1.44      }
    1.45  }
    1.46 @@ -1334,7 +1334,7 @@ static inline void trace_shadow_prealloc
    1.47          /* Convert smfn to gfn */
    1.48          unsigned long gfn;
    1.49          ASSERT(mfn_valid(smfn));
    1.50 -        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->backpointer));
    1.51 +        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->u.sh.back));
    1.52          __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
    1.53                      sizeof(gfn), (unsigned char*)&gfn);
    1.54      }
    1.55 @@ -1350,8 +1350,7 @@ static void _shadow_prealloc(
    1.56      /* Need a vpcu for calling unpins; for now, since we don't have
    1.57       * per-vcpu shadows, any will do */
    1.58      struct vcpu *v, *v2;
    1.59 -    struct list_head *l, *t;
    1.60 -    struct shadow_page_info *sp;
    1.61 +    struct shadow_page_info *sp, *t;
    1.62      mfn_t smfn;
    1.63      int i;
    1.64  
    1.65 @@ -1365,9 +1364,8 @@ static void _shadow_prealloc(
    1.66  
    1.67      /* Stage one: walk the list of pinned pages, unpinning them */
    1.68      perfc_incr(shadow_prealloc_1);
    1.69 -    list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows)
    1.70 +    page_list_for_each_safe_reverse(sp, t, &d->arch.paging.shadow.pinned_shadows)
    1.71      {
    1.72 -        sp = list_entry(l, struct shadow_page_info, list);
    1.73          smfn = shadow_page_to_mfn(sp);
    1.74  
    1.75          /* Unpin this top-level shadow */
    1.76 @@ -1427,8 +1425,7 @@ void shadow_prealloc(struct domain *d, u
    1.77   * this domain's shadows */
    1.78  static void shadow_blow_tables(struct domain *d) 
    1.79  {
    1.80 -    struct list_head *l, *t;
    1.81 -    struct shadow_page_info *sp;
    1.82 +    struct shadow_page_info *sp, *t;
    1.83      struct vcpu *v = d->vcpu[0];
    1.84      mfn_t smfn;
    1.85      int i;
    1.86 @@ -1436,9 +1433,8 @@ static void shadow_blow_tables(struct do
    1.87      ASSERT(v != NULL);
    1.88  
    1.89      /* Pass one: unpin all pinned pages */
    1.90 -    list_for_each_backwards_safe(l,t, &d->arch.paging.shadow.pinned_shadows)
    1.91 +    page_list_for_each_safe_reverse(sp, t, &d->arch.paging.shadow.pinned_shadows)
    1.92      {
    1.93 -        sp = list_entry(l, struct shadow_page_info, list);
    1.94          smfn = shadow_page_to_mfn(sp);
    1.95          sh_unpin(v, smfn);
    1.96      }
    1.97 @@ -1515,7 +1511,7 @@ mfn_t shadow_alloc(struct domain *d,
    1.98  
    1.99      /* Find smallest order which can satisfy the request. */
   1.100      for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
   1.101 -        if ( !list_empty(&d->arch.paging.shadow.freelists[i]) )
   1.102 +        if ( (sp = page_list_remove_head(&d->arch.paging.shadow.freelists[i])) )
   1.103              goto found;
   1.104      
   1.105      /* If we get here, we failed to allocate. This should never happen.
   1.106 @@ -1526,16 +1522,12 @@ mfn_t shadow_alloc(struct domain *d,
   1.107      BUG();
   1.108  
   1.109   found:
   1.110 -    sp = list_entry(d->arch.paging.shadow.freelists[i].next, 
   1.111 -                    struct shadow_page_info, list);
   1.112 -    list_del(&sp->list);
   1.113 -            
   1.114      /* We may have to halve the chunk a number of times. */
   1.115      while ( i != order )
   1.116      {
   1.117          i--;
   1.118 -        sp->order = i;
   1.119 -        list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[i]);
   1.120 +        sp->u.sh.order = i;
   1.121 +        page_list_add_tail(sp, &d->arch.paging.shadow.freelists[i]);
   1.122          sp += 1 << i;
   1.123      }
   1.124      d->arch.paging.shadow.free_pages -= 1 << order;
   1.125 @@ -1557,11 +1549,11 @@ mfn_t shadow_alloc(struct domain *d,
   1.126          ASSERT(p != NULL);
   1.127          clear_page(p);
   1.128          sh_unmap_domain_page(p);
   1.129 -        INIT_LIST_HEAD(&sp[i].list);
   1.130 -        sp[i].type = shadow_type;
   1.131 -        sp[i].pinned = 0;
   1.132 -        sp[i].count = 0;
   1.133 -        sp[i].backpointer = backpointer;
   1.134 +        INIT_PAGE_LIST_ENTRY(&sp[i].list);
   1.135 +        sp[i].u.sh.type = shadow_type;
   1.136 +        sp[i].u.sh.pinned = 0;
   1.137 +        sp[i].u.sh.count = 0;
   1.138 +        sp[i].u.sh.back = backpointer;
   1.139          sp[i].next_shadow = NULL;
   1.140          perfc_incr(shadow_alloc_count);
   1.141      }
   1.142 @@ -1581,7 +1573,7 @@ void shadow_free(struct domain *d, mfn_t
   1.143      ASSERT(shadow_locked_by_me(d));
   1.144      perfc_incr(shadow_free);
   1.145  
   1.146 -    shadow_type = sp->type;
   1.147 +    shadow_type = sp->u.sh.type;
   1.148      ASSERT(shadow_type != SH_type_none);
   1.149      ASSERT(shadow_type != SH_type_p2m_table);
   1.150      order = shadow_order(shadow_type);
   1.151 @@ -1605,7 +1597,7 @@ void shadow_free(struct domain *d, mfn_t
   1.152          }
   1.153  #endif
   1.154          /* Strip out the type: this is now a free shadow page */
   1.155 -        sp[i].type = 0;
   1.156 +        sp[i].u.sh.type = 0;
   1.157          /* Remember the TLB timestamp so we will know whether to flush 
   1.158           * TLBs when we reuse the page.  Because the destructors leave the
   1.159           * contents of the pages in place, we can delay TLB flushes until
   1.160 @@ -1620,20 +1612,22 @@ void shadow_free(struct domain *d, mfn_t
   1.161          mask = 1 << order;
   1.162          if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
   1.163              /* Merge with predecessor block? */
   1.164 -            if ( ((sp-mask)->type != PGT_none) || ((sp-mask)->order != order) )
   1.165 +            if ( ((sp-mask)->u.sh.type != PGT_none) ||
   1.166 +                 ((sp-mask)->u.sh.order != order) )
   1.167                  break;
   1.168 -            list_del(&(sp-mask)->list);
   1.169              sp -= mask;
   1.170 +            page_list_del(sp, &d->arch.paging.shadow.freelists[order]);
   1.171          } else {
   1.172              /* Merge with successor block? */
   1.173 -            if ( ((sp+mask)->type != PGT_none) || ((sp+mask)->order != order) )
   1.174 +            if ( ((sp+mask)->u.sh.type != PGT_none) ||
   1.175 +                 ((sp+mask)->u.sh.order != order) )
   1.176                  break;
   1.177 -            list_del(&(sp+mask)->list);
   1.178 +            page_list_del(sp + mask, &d->arch.paging.shadow.freelists[order]);
   1.179          }
   1.180      }
   1.181  
   1.182 -    sp->order = order;
   1.183 -    list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);
   1.184 +    sp->u.sh.order = order;
   1.185 +    page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
   1.186  }
   1.187  
   1.188  /* Divert some memory from the pool to be used by the p2m mapping.
   1.189 @@ -1810,23 +1804,26 @@ static unsigned int sh_set_allocation(st
   1.190              d->arch.paging.shadow.total_pages += 1 << order;
   1.191              for ( j = 0; j < 1U << order; j++ )
   1.192              {
   1.193 -                sp[j].type = 0;  
   1.194 -                sp[j].pinned = 0;
   1.195 -                sp[j].count = 0;
   1.196 -                sp[j].mbz = 0;
   1.197 +                sp[j].u.sh.type = 0;
   1.198 +                sp[j].u.sh.pinned = 0;
   1.199 +                sp[j].u.sh.count = 0;
   1.200                  sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
   1.201              }
   1.202 -            sp->order = order;
   1.203 -            list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);
   1.204 +            sp->u.sh.order = order;
   1.205 +            page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
   1.206          } 
   1.207          else if ( d->arch.paging.shadow.total_pages > pages ) 
   1.208          {
   1.209              /* Need to return memory to domheap */
   1.210              _shadow_prealloc(d, order, 1);
   1.211 -            ASSERT(!list_empty(&d->arch.paging.shadow.freelists[order]));
   1.212 -            sp = list_entry(d->arch.paging.shadow.freelists[order].next,
   1.213 -                            struct shadow_page_info, list);
   1.214 -            list_del(&sp->list);
   1.215 +            sp = page_list_remove_head(&d->arch.paging.shadow.freelists[order]);
   1.216 +            ASSERT(sp);
   1.217 +            /*
   1.218 +             * The pages were allocated anonymously, but the owner field
   1.219 +             * gets overwritten normally, so need to clear it here.
   1.220 +             */
   1.221 +            for ( j = 0; j < 1U << order; j++ )
   1.222 +                page_set_owner(&((struct page_info *)sp)[j], NULL);
   1.223              d->arch.paging.shadow.free_pages -= 1 << order;
   1.224              d->arch.paging.shadow.total_pages -= 1 << order;
   1.225              free_domheap_pages((struct page_info *)sp, order);
   1.226 @@ -1886,37 +1883,38 @@ static void sh_hash_audit_bucket(struct 
   1.227      while ( sp )
   1.228      {
   1.229          /* Not a shadow? */
   1.230 -        BUG_ON( sp->mbz != 0 );
   1.231 +        BUG_ON( sp->count_info != 0 );
   1.232          /* Bogus type? */
   1.233 -        BUG_ON( sp->type == 0 ); 
   1.234 -        BUG_ON( sp->type > SH_type_max_shadow );
   1.235 +        BUG_ON( sp->u.sh.type == 0 );
   1.236 +        BUG_ON( sp->u.sh.type > SH_type_max_shadow );
   1.237          /* Wrong bucket? */
   1.238 -        BUG_ON( sh_hash(sp->backpointer, sp->type) != bucket ); 
   1.239 +        BUG_ON( sh_hash(sp->u.sh.back, sp->u.sh.type) != bucket );
   1.240          /* Duplicate entry? */
   1.241          for ( x = sp->next_shadow; x; x = x->next_shadow )
   1.242 -            BUG_ON( x->backpointer == sp->backpointer && x->type == sp->type );
   1.243 +            BUG_ON( x->u.sh.back == sp->u.sh.back &&
   1.244 +                    x->u.sh.type == sp->u.sh.type );
   1.245          /* Follow the backpointer to the guest pagetable */
   1.246 -        if ( sp->type != SH_type_fl1_32_shadow
   1.247 -             && sp->type != SH_type_fl1_pae_shadow
   1.248 -             && sp->type != SH_type_fl1_64_shadow )
   1.249 +        if ( sp->u.sh.type != SH_type_fl1_32_shadow
   1.250 +             && sp->u.sh.type != SH_type_fl1_pae_shadow
   1.251 +             && sp->u.sh.type != SH_type_fl1_64_shadow )
   1.252          {
   1.253 -            struct page_info *gpg = mfn_to_page(_mfn(sp->backpointer));
   1.254 +            struct page_info *gpg = mfn_to_page(_mfn(sp->u.sh.back));
   1.255              /* Bad shadow flags on guest page? */
   1.256 -            BUG_ON( !(gpg->shadow_flags & (1<<sp->type)) );
   1.257 +            BUG_ON( !(gpg->shadow_flags & (1<<sp->u.sh.type)) );
   1.258              /* Bad type count on guest page? */
   1.259  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   1.260 -            if ( sp->type == SH_type_l1_32_shadow
   1.261 -                 || sp->type == SH_type_l1_pae_shadow
   1.262 -                 || sp->type == SH_type_l1_64_shadow )
   1.263 +            if ( sp->u.sh.type == SH_type_l1_32_shadow
   1.264 +                 || sp->u.sh.type == SH_type_l1_pae_shadow
   1.265 +                 || sp->u.sh.type == SH_type_l1_64_shadow )
   1.266              {
   1.267                  if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page
   1.268                       && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
   1.269                  {
   1.270                      if ( !page_is_out_of_sync(gpg) )
   1.271                      {
   1.272 -                        SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
   1.273 +                        SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
   1.274                                       " and not OOS but has typecount %#lx\n",
   1.275 -                                     sp->backpointer, 
   1.276 +                                     sp->u.sh.back,
   1.277                                       mfn_x(shadow_page_to_mfn(sp)), 
   1.278                                       gpg->u.inuse.type_info);
   1.279                          BUG();
   1.280 @@ -1928,9 +1926,9 @@ static void sh_hash_audit_bucket(struct 
   1.281              if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
   1.282                   && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
   1.283              {
   1.284 -                SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
   1.285 +                SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
   1.286                               " but has typecount %#lx\n",
   1.287 -                             sp->backpointer, mfn_x(shadow_page_to_mfn(sp)), 
   1.288 +                             sp->u.sh.back, mfn_x(shadow_page_to_mfn(sp)),
   1.289                               gpg->u.inuse.type_info);
   1.290                  BUG();
   1.291              }
   1.292 @@ -2016,7 +2014,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   1.293      prev = NULL;
   1.294      while(sp)
   1.295      {
   1.296 -        if ( sp->backpointer == n && sp->type == t )
   1.297 +        if ( sp->u.sh.back == n && sp->u.sh.type == t )
   1.298          {
   1.299              /* Pull-to-front if 'sp' isn't already the head item */
   1.300              if ( unlikely(sp != d->arch.paging.shadow.hash_table[key]) )
   1.301 @@ -2148,12 +2146,12 @@ static void hash_foreach(struct vcpu *v,
   1.302           * deleted anything from the hash (lookups are OK, though). */
   1.303          for ( x = d->arch.paging.shadow.hash_table[i]; x; x = x->next_shadow )
   1.304          {
   1.305 -            if ( callback_mask & (1 << x->type) ) 
   1.306 +            if ( callback_mask & (1 << x->u.sh.type) )
   1.307              {
   1.308 -                ASSERT(x->type <= 15);
   1.309 -                ASSERT(callbacks[x->type] != NULL);
   1.310 -                done = callbacks[x->type](v, shadow_page_to_mfn(x), 
   1.311 -                                          callback_mfn);
   1.312 +                ASSERT(x->u.sh.type <= 15);
   1.313 +                ASSERT(callbacks[x->u.sh.type] != NULL);
   1.314 +                done = callbacks[x->u.sh.type](v, shadow_page_to_mfn(x),
   1.315 +                                               callback_mfn);
   1.316                  if ( done ) break;
   1.317              }
   1.318          }
   1.319 @@ -2171,7 +2169,7 @@ static void hash_foreach(struct vcpu *v,
   1.320  void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
   1.321  {
   1.322      struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.323 -    unsigned int t = sp->type;
   1.324 +    unsigned int t = sp->u.sh.type;
   1.325  
   1.326  
   1.327      SHADOW_PRINTK("smfn=%#lx\n", mfn_x(smfn));
   1.328 @@ -2183,7 +2181,7 @@ void sh_destroy_shadow(struct vcpu *v, m
   1.329             t == SH_type_fl1_64_shadow  || 
   1.330             t == SH_type_monitor_table  || 
   1.331             (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
   1.332 -           (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
   1.333 +           (page_get_owner(mfn_to_page(_mfn(sp->u.sh.back)))
   1.334              == v->domain)); 
   1.335  
   1.336      /* The down-shifts here are so that the switch statement is on nice
   1.337 @@ -2435,7 +2433,7 @@ int sh_remove_write_access(struct vcpu *
   1.338      {
   1.339          unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
   1.340          mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
   1.341 -        int shtype = mfn_to_shadow_page(last_smfn)->type;
   1.342 +        int shtype = mfn_to_shadow_page(last_smfn)->u.sh.type;
   1.343  
   1.344          if ( callbacks[shtype] ) 
   1.345              callbacks[shtype](v, last_smfn, gmfn);
   1.346 @@ -2483,20 +2481,20 @@ int sh_remove_write_access_from_sl1p(str
   1.347      ASSERT(mfn_valid(smfn));
   1.348      ASSERT(mfn_valid(gmfn));
   1.349      
   1.350 -    if ( sp->type == SH_type_l1_32_shadow
   1.351 -         || sp->type == SH_type_fl1_32_shadow )
   1.352 +    if ( sp->u.sh.type == SH_type_l1_32_shadow
   1.353 +         || sp->u.sh.type == SH_type_fl1_32_shadow )
   1.354      {
   1.355          return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2)
   1.356              (v, gmfn, smfn, off);
   1.357      }
   1.358  #if CONFIG_PAGING_LEVELS >= 3
   1.359 -    else if ( sp->type == SH_type_l1_pae_shadow
   1.360 -              || sp->type == SH_type_fl1_pae_shadow )
   1.361 +    else if ( sp->u.sh.type == SH_type_l1_pae_shadow
   1.362 +              || sp->u.sh.type == SH_type_fl1_pae_shadow )
   1.363          return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3)
   1.364              (v, gmfn, smfn, off);
   1.365  #if CONFIG_PAGING_LEVELS >= 4
   1.366 -    else if ( sp->type == SH_type_l1_64_shadow
   1.367 -              || sp->type == SH_type_fl1_64_shadow )
   1.368 +    else if ( sp->u.sh.type == SH_type_l1_64_shadow
   1.369 +              || sp->u.sh.type == SH_type_fl1_64_shadow )
   1.370          return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4)
   1.371              (v, gmfn, smfn, off);
   1.372  #endif
   1.373 @@ -2603,12 +2601,12 @@ static int sh_remove_shadow_via_pointer(
   1.374      void *vaddr;
   1.375      int rc;
   1.376  
   1.377 -    ASSERT(sp->type > 0);
   1.378 -    ASSERT(sp->type < SH_type_max_shadow);
   1.379 -    ASSERT(sp->type != SH_type_l2_32_shadow);
   1.380 -    ASSERT(sp->type != SH_type_l2_pae_shadow);
   1.381 -    ASSERT(sp->type != SH_type_l2h_pae_shadow);
   1.382 -    ASSERT(sp->type != SH_type_l4_64_shadow);
   1.383 +    ASSERT(sp->u.sh.type > 0);
   1.384 +    ASSERT(sp->u.sh.type < SH_type_max_shadow);
   1.385 +    ASSERT(sp->u.sh.type != SH_type_l2_32_shadow);
   1.386 +    ASSERT(sp->u.sh.type != SH_type_l2_pae_shadow);
   1.387 +    ASSERT(sp->u.sh.type != SH_type_l2h_pae_shadow);
   1.388 +    ASSERT(sp->u.sh.type != SH_type_l4_64_shadow);
   1.389      
   1.390      if (sp->up == 0) return 0;
   1.391      pmfn = _mfn(sp->up >> PAGE_SHIFT);
   1.392 @@ -2619,10 +2617,10 @@ static int sh_remove_shadow_via_pointer(
   1.393      ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
   1.394      
   1.395      /* Is this the only reference to this shadow? */
   1.396 -    rc = (sp->count == 1) ? 1 : 0;
   1.397 +    rc = (sp->u.sh.count == 1) ? 1 : 0;
   1.398  
   1.399      /* Blank the offending entry */
   1.400 -    switch (sp->type) 
   1.401 +    switch (sp->u.sh.type)
   1.402      {
   1.403      case SH_type_l1_32_shadow:
   1.404      case SH_type_l2_32_shadow:
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Jan 30 11:04:24 2009 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Jan 30 11:08:06 2009 +0000
     2.3 @@ -974,12 +974,12 @@ static int shadow_set_l2e(struct vcpu *v
     2.4  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     2.5          {
     2.6              struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
     2.7 -            mfn_t gl1mfn = _mfn(sp->backpointer);
     2.8 +            mfn_t gl1mfn = _mfn(sp->u.sh.back);
     2.9  
    2.10              /* If the shadow is a fl1 then the backpointer contains
    2.11                 the GFN instead of the GMFN, and it's definitely not
    2.12                 OOS. */
    2.13 -            if ( (sp->type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
    2.14 +            if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
    2.15                   && mfn_is_out_of_sync(gl1mfn) )
    2.16                  sh_resync(v, gl1mfn);
    2.17          }
    2.18 @@ -1194,8 +1194,8 @@ static inline void increment_ptr_to_gues
    2.19  do {                                                                    \
    2.20      int _i;                                                             \
    2.21      shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \
    2.22 -    ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow       \
    2.23 -           || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \
    2.24 +    ASSERT(mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow  \
    2.25 +           || mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
    2.26      for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
    2.27      {                                                                   \
    2.28          (_sl1e) = _sp + _i;                                             \
    2.29 @@ -1232,7 +1232,7 @@ do {                                    
    2.30  do {                                                                      \
    2.31      int _i, _j, __done = 0;                                               \
    2.32      int _xen = !shadow_mode_external(_dom);                               \
    2.33 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
    2.34 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
    2.35      for ( _j = 0; _j < 4 && !__done; _j++ )                               \
    2.36      {                                                                     \
    2.37          shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \
    2.38 @@ -1260,11 +1260,11 @@ do {                                    
    2.39      int _i;                                                                \
    2.40      int _xen = !shadow_mode_external(_dom);                                \
    2.41      shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \
    2.42 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
    2.43 -           || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
    2.44 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
    2.45 +           || mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
    2.46      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
    2.47          if ( (!(_xen))                                                     \
    2.48 -             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_pae_shadow\
    2.49 +             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
    2.50               || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES))                  \
    2.51                   < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
    2.52          {                                                                  \
    2.53 @@ -1285,13 +1285,13 @@ do {                                    
    2.54      int _i;                                                                 \
    2.55      int _xen = !shadow_mode_external(_dom);                                 \
    2.56      shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \
    2.57 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \
    2.58 -           mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \
    2.59 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
    2.60 +           mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
    2.61      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
    2.62      {                                                                       \
    2.63          if ( (!(_xen))                                                      \
    2.64               || !is_pv_32on64_domain(_dom)                                  \
    2.65 -             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \
    2.66 +             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
    2.67               || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
    2.68          {                                                                   \
    2.69              (_sl2e) = _sp + _i;                                             \
    2.70 @@ -1313,7 +1313,7 @@ do {                                    
    2.71  do {                                                                    \
    2.72      int _i;                                                             \
    2.73      shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \
    2.74 -    ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \
    2.75 +    ASSERT(mfn_to_shadow_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
    2.76      for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
    2.77      {                                                                   \
    2.78          (_sl3e) = _sp + _i;                                             \
    2.79 @@ -1331,7 +1331,7 @@ do {                                    
    2.80      shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \
    2.81      int _xen = !shadow_mode_external(_dom);                             \
    2.82      int _i;                                                             \
    2.83 -    ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
    2.84 +    ASSERT(mfn_to_shadow_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
    2.85      for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
    2.86      {                                                                   \
    2.87          if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \
    2.88 @@ -1519,14 +1519,12 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
    2.89           * of them, decide that this isn't an old linux guest, and stop
    2.90           * pinning l3es.  This is not very quick but it doesn't happen
    2.91           * very often. */
    2.92 -        struct list_head *l, *t;
    2.93 -        struct shadow_page_info *sp;
    2.94 +        struct shadow_page_info *sp, *t;
    2.95          struct vcpu *v2;
    2.96          int l4count = 0, vcpus = 0;
    2.97 -        list_for_each(l, &v->domain->arch.paging.shadow.pinned_shadows)
    2.98 +        page_list_for_each(sp, &v->domain->arch.paging.shadow.pinned_shadows)
    2.99          {
   2.100 -            sp = list_entry(l, struct shadow_page_info, list);
   2.101 -            if ( sp->type == SH_type_l4_64_shadow )
   2.102 +            if ( sp->u.sh.type == SH_type_l4_64_shadow )
   2.103                  l4count++;
   2.104          }
   2.105          for_each_vcpu ( v->domain, v2 ) 
   2.106 @@ -1534,10 +1532,9 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
   2.107          if ( l4count > 2 * vcpus ) 
   2.108          {
   2.109              /* Unpin all the pinned l3 tables, and don't pin any more. */
   2.110 -            list_for_each_safe(l, t, &v->domain->arch.paging.shadow.pinned_shadows)
   2.111 +            page_list_for_each_safe(sp, t, &v->domain->arch.paging.shadow.pinned_shadows)
   2.112              {
   2.113 -                sp = list_entry(l, struct shadow_page_info, list);
   2.114 -                if ( sp->type == SH_type_l3_64_shadow )
   2.115 +                if ( sp->u.sh.type == SH_type_l3_64_shadow )
   2.116                      sh_unpin(v, shadow_page_to_mfn(sp));
   2.117              }
   2.118              v->domain->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
   2.119 @@ -1921,7 +1918,7 @@ static shadow_l1e_t * shadow_get_and_cre
   2.120  void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
   2.121  {
   2.122      shadow_l4e_t *sl4e;
   2.123 -    u32 t = mfn_to_shadow_page(smfn)->type;
   2.124 +    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.125      mfn_t gmfn, sl4mfn;
   2.126  
   2.127      SHADOW_DEBUG(DESTROY_SHADOW,
   2.128 @@ -1929,7 +1926,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
   2.129      ASSERT(t == SH_type_l4_shadow);
   2.130  
   2.131      /* Record that the guest page isn't shadowed any more (in this type) */
   2.132 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.133 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
   2.134      delete_shadow_status(v, gmfn, t, smfn);
   2.135      shadow_demote(v, gmfn, t);
   2.136      /* Decrement refcounts of all the old entries */
   2.137 @@ -1950,7 +1947,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
   2.138  void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
   2.139  {
   2.140      shadow_l3e_t *sl3e;
   2.141 -    u32 t = mfn_to_shadow_page(smfn)->type;
   2.142 +    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.143      mfn_t gmfn, sl3mfn;
   2.144  
   2.145      SHADOW_DEBUG(DESTROY_SHADOW,
   2.146 @@ -1958,7 +1955,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
   2.147      ASSERT(t == SH_type_l3_shadow);
   2.148  
   2.149      /* Record that the guest page isn't shadowed any more (in this type) */
   2.150 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.151 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
   2.152      delete_shadow_status(v, gmfn, t, smfn);
   2.153      shadow_demote(v, gmfn, t);
   2.154  
   2.155 @@ -1980,7 +1977,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
   2.156  void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
   2.157  {
   2.158      shadow_l2e_t *sl2e;
   2.159 -    u32 t = mfn_to_shadow_page(smfn)->type;
   2.160 +    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.161      mfn_t gmfn, sl2mfn;
   2.162  
   2.163      SHADOW_DEBUG(DESTROY_SHADOW,
   2.164 @@ -1993,7 +1990,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
   2.165  #endif
   2.166  
   2.167      /* Record that the guest page isn't shadowed any more (in this type) */
   2.168 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.169 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
   2.170      delete_shadow_status(v, gmfn, t, smfn);
   2.171      shadow_demote(v, gmfn, t);
   2.172  
   2.173 @@ -2014,7 +2011,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.174  {
   2.175      struct domain *d = v->domain;
   2.176      shadow_l1e_t *sl1e;
   2.177 -    u32 t = mfn_to_shadow_page(smfn)->type;
   2.178 +    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.179  
   2.180      SHADOW_DEBUG(DESTROY_SHADOW,
   2.181                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   2.182 @@ -2023,12 +2020,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.183      /* Record that the guest page isn't shadowed any more (in this type) */
   2.184      if ( t == SH_type_fl1_shadow )
   2.185      {
   2.186 -        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->backpointer);
   2.187 +        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->u.sh.back);
   2.188          delete_fl1_shadow_status(v, gfn, smfn);
   2.189      }
   2.190      else 
   2.191      {
   2.192 -        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.193 +        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
   2.194          delete_shadow_status(v, gmfn, t, smfn);
   2.195          shadow_demote(v, gmfn, t);
   2.196      }
   2.197 @@ -2054,7 +2051,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.198  void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
   2.199  {
   2.200      struct domain *d = v->domain;
   2.201 -    ASSERT(mfn_to_shadow_page(mmfn)->type == SH_type_monitor_table);
   2.202 +    ASSERT(mfn_to_shadow_page(mmfn)->u.sh.type == SH_type_monitor_table);
   2.203  
   2.204  #if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
   2.205      {
   2.206 @@ -2298,7 +2295,7 @@ static int validate_gl2e(struct vcpu *v,
   2.207  
   2.208  #if SHADOW_PAGING_LEVELS == 3
   2.209          reserved_xen_slot = 
   2.210 -            ((mfn_to_shadow_page(sl2mfn)->type == SH_type_l2h_pae_shadow) &&
   2.211 +            ((mfn_to_shadow_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
   2.212               (shadow_index 
   2.213                >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
   2.214  #else /* SHADOW_PAGING_LEVELS == 2 */
   2.215 @@ -2352,7 +2349,7 @@ static int validate_gl1e(struct vcpu *v,
   2.216      result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
   2.217  
   2.218  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.219 -    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
   2.220 +    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
   2.221      if ( mfn_valid(gl1mfn) 
   2.222           && mfn_is_out_of_sync(gl1mfn) )
   2.223      {
   2.224 @@ -2437,7 +2434,7 @@ int sh_safe_not_to_sync(struct vcpu *v, 
   2.225      
   2.226      /* Up to l2 */
   2.227      sp = mfn_to_shadow_page(smfn);
   2.228 -    if ( sp->count != 1 || !sp->up )
   2.229 +    if ( sp->u.sh.count != 1 || !sp->up )
   2.230          return 0;
   2.231      smfn = _mfn(sp->up >> PAGE_SHIFT);
   2.232      ASSERT(mfn_valid(smfn));
   2.233 @@ -2445,14 +2442,14 @@ int sh_safe_not_to_sync(struct vcpu *v, 
   2.234  #if (SHADOW_PAGING_LEVELS == 4) 
   2.235      /* up to l3 */
   2.236      sp = mfn_to_shadow_page(smfn);
   2.237 -    if ( sp->count != 1 || !sp->up )
   2.238 +    if ( sp->u.sh.count != 1 || !sp->up )
   2.239          return 0;
   2.240      smfn = _mfn(sp->up >> PAGE_SHIFT);
   2.241      ASSERT(mfn_valid(smfn));
   2.242  
   2.243      /* up to l4 */
   2.244      sp = mfn_to_shadow_page(smfn);
   2.245 -    if ( sp->count != 1 
   2.246 +    if ( sp->u.sh.count != 1
   2.247           || sh_type_is_pinnable(v, SH_type_l3_64_shadow) || !sp->up )
   2.248          return 0;
   2.249      smfn = _mfn(sp->up >> PAGE_SHIFT);
   2.250 @@ -2971,7 +2968,7 @@ static int sh_page_fault(struct vcpu *v,
   2.251                                         sizeof(sl2e)) != 0)
   2.252                       || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
   2.253                       || !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
   2.254 -                                      shadow_l2e_get_mfn(sl2e))->backpointer))
   2.255 +                                      shadow_l2e_get_mfn(sl2e))->u.sh.back))
   2.256                       || unlikely(mfn_is_out_of_sync(gl1mfn)) )
   2.257                 {
   2.258                     /* Hit the slow path as if there had been no 
   2.259 @@ -3523,7 +3520,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.260      // easier than invalidating all of the individual 4K pages).
   2.261      //
   2.262      sl1mfn = shadow_l2e_get_mfn(sl2e);
   2.263 -    if ( mfn_to_shadow_page(sl1mfn)->type
   2.264 +    if ( mfn_to_shadow_page(sl1mfn)->u.sh.type
   2.265           == SH_type_fl1_shadow )
   2.266      {
   2.267          flush_tlb_local();
   2.268 @@ -3533,7 +3530,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.269  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.270      /* Check to see if the SL1 is out of sync. */
   2.271      {
   2.272 -        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
   2.273 +        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
   2.274          struct page_info *pg = mfn_to_page(gl1mfn);
   2.275          if ( mfn_valid(gl1mfn) 
   2.276               && page_is_out_of_sync(pg) )
   2.277 @@ -3563,7 +3560,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.278              }
   2.279  
   2.280              sl1mfn = shadow_l2e_get_mfn(sl2e);
   2.281 -            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
   2.282 +            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
   2.283              pg = mfn_to_page(gl1mfn);
   2.284              
   2.285              if ( likely(sh_mfn_is_a_page_table(gl1mfn)
   2.286 @@ -3968,7 +3965,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
   2.287          /* Need to repin the old toplevel shadow if it's been unpinned
   2.288           * by shadow_prealloc(): in PV mode we're still running on this
   2.289           * shadow and it's not safe to free it yet. */
   2.290 -        if ( !mfn_to_shadow_page(old_smfn)->pinned && !sh_pin(v, old_smfn) )
   2.291 +        if ( !mfn_to_shadow_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
   2.292          {
   2.293              SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
   2.294              domain_crash(v->domain);
   2.295 @@ -4269,9 +4266,9 @@ int sh_rm_write_access_from_sl1p(struct 
   2.296  
   2.297      sp = mfn_to_shadow_page(smfn);
   2.298  
   2.299 -    if ( sp->mbz != 0
   2.300 -         || (sp->type != SH_type_l1_shadow
   2.301 -             && sp->type != SH_type_fl1_shadow) )
   2.302 +    if ( sp->count_info != 0
   2.303 +         || (sp->u.sh.type != SH_type_l1_shadow
   2.304 +             && sp->u.sh.type != SH_type_fl1_shadow) )
   2.305          goto fail;
   2.306  
   2.307      sl1p = sh_map_domain_page(smfn);
   2.308 @@ -4410,7 +4407,7 @@ int sh_rm_mappings_from_l1(struct vcpu *
   2.309  void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
   2.310  /* Blank out a single shadow entry */
   2.311  {
   2.312 -    switch ( mfn_to_shadow_page(smfn)->type )
   2.313 +    switch ( mfn_to_shadow_page(smfn)->u.sh.type )
   2.314      {
   2.315      case SH_type_l1_shadow:
   2.316          (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
   2.317 @@ -4443,7 +4440,7 @@ int sh_remove_l1_shadow(struct vcpu *v, 
   2.318               && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
   2.319          {
   2.320              (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
   2.321 -            if ( mfn_to_shadow_page(sl1mfn)->type == 0 )
   2.322 +            if ( mfn_to_shadow_page(sl1mfn)->u.sh.type == 0 )
   2.323                  /* This breaks us cleanly out of the FOREACH macro */
   2.324                  done = 1;
   2.325          }
   2.326 @@ -4466,7 +4463,7 @@ int sh_remove_l2_shadow(struct vcpu *v, 
   2.327               && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
   2.328          {
   2.329              (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
   2.330 -            if ( mfn_to_shadow_page(sl2mfn)->type == 0 )
   2.331 +            if ( mfn_to_shadow_page(sl2mfn)->u.sh.type == 0 )
   2.332                  /* This breaks us cleanly out of the FOREACH macro */
   2.333                  done = 1;
   2.334          }
   2.335 @@ -4488,7 +4485,7 @@ int sh_remove_l3_shadow(struct vcpu *v, 
   2.336               && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
   2.337          {
   2.338              (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
   2.339 -            if ( mfn_to_shadow_page(sl3mfn)->type == 0 )
   2.340 +            if ( mfn_to_shadow_page(sl3mfn)->u.sh.type == 0 )
   2.341                  /* This breaks us cleanly out of the FOREACH macro */
   2.342                  done = 1;
   2.343          }
   2.344 @@ -4890,7 +4887,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
   2.345      int done = 0;
   2.346      
   2.347      /* Follow the backpointer */
   2.348 -    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
   2.349 +    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
   2.350  
   2.351  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.352      /* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
   2.353 @@ -4980,7 +4977,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
   2.354      int done = 0;
   2.355  
   2.356      /* Follow the backpointer */
   2.357 -    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
   2.358 +    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->u.sh.back);
   2.359  
   2.360  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.361      /* Only L1's may be out of sync. */
   2.362 @@ -5029,7 +5026,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
   2.363      int done = 0;
   2.364  
   2.365      /* Follow the backpointer */
   2.366 -    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->backpointer);
   2.367 +    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->u.sh.back);
   2.368  
   2.369  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.370      /* Only L1's may be out of sync. */
   2.371 @@ -5076,7 +5073,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
   2.372      int done = 0;
   2.373  
   2.374      /* Follow the backpointer */
   2.375 -    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
   2.376 +    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->u.sh.back);
   2.377  
   2.378  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.379      /* Only L1's may be out of sync. */
     3.1 --- a/xen/arch/x86/mm/shadow/private.h	Fri Jan 30 11:04:24 2009 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/private.h	Fri Jan 30 11:08:06 2009 +0000
     3.3 @@ -220,60 +220,6 @@ extern void shadow_audit_tables(struct v
     3.4  #undef GUEST_LEVELS
     3.5  #endif /* CONFIG_PAGING_LEVELS == 4 */
     3.6  
     3.7 -/******************************************************************************
     3.8 - * Page metadata for shadow pages.
     3.9 - */
    3.10 -
    3.11 -struct shadow_page_info
    3.12 -{
    3.13 -    union {
    3.14 -        /* Ensures that shadow_page_info is same size as page_info. */
    3.15 -        struct page_info page_info;
    3.16 -
    3.17 -        struct {
    3.18 -            union {
    3.19 -                /* When in use, guest page we're a shadow of */
    3.20 -                unsigned long backpointer;
    3.21 -                /* When free, order of the freelist we're on */
    3.22 -                unsigned int order;
    3.23 -            };
    3.24 -            union {
    3.25 -                /* When in use, next shadow in this hash chain */
    3.26 -                struct shadow_page_info *next_shadow;
    3.27 -                /* When free, TLB flush time when freed */
    3.28 -                u32 tlbflush_timestamp;
    3.29 -            };
    3.30 -            struct {
    3.31 -                unsigned long mbz;     /* Must be zero: count_info is here. */
    3.32 -                unsigned long type:5;   /* What kind of shadow is this? */
    3.33 -                unsigned long pinned:1; /* Is the shadow pinned? */
    3.34 -                unsigned long count:26; /* Reference count */
    3.35 -            } __attribute__((packed));
    3.36 -            union {
    3.37 -                /* For unused shadow pages, a list of pages of this order; for 
    3.38 -                 * pinnable shadows, if pinned, a list of other pinned shadows
    3.39 -                 * (see sh_type_is_pinnable() below for the definition of 
    3.40 -                 * "pinnable" shadow types). */
    3.41 -                struct list_head list;
    3.42 -                /* For non-pinnable shadows, a higher entry that points
    3.43 -                 * at us. */
    3.44 -                paddr_t up;
    3.45 -            };
    3.46 -        };
    3.47 -    };
    3.48 -};
    3.49 -
    3.50 -/* The structure above *must* be no larger than a struct page_info
    3.51 - * from mm.h, since we'll be using the same space in the frametable. 
    3.52 - * Also, the mbz field must line up with the count_info field of normal 
    3.53 - * pages, so they cannot be successfully get_page()d. */
    3.54 -static inline void shadow_check_page_struct_offsets(void) {
    3.55 -    BUILD_BUG_ON(sizeof (struct shadow_page_info) !=
    3.56 -                 sizeof (struct page_info));
    3.57 -    BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz) !=
    3.58 -                 offsetof(struct page_info, count_info));
    3.59 -};
    3.60 -
    3.61  /* Shadow type codes */
    3.62  #define SH_type_none           (0U) /* on the shadow free list */
    3.63  #define SH_type_min_shadow     (1U)
    3.64 @@ -532,13 +478,13 @@ mfn_t oos_snapshot_lookup(struct vcpu *v
    3.65  // in order to make it work with our mfn type.
    3.66  #undef mfn_to_page
    3.67  #define mfn_to_page(_m) (frame_table + mfn_x(_m))
    3.68 -#define mfn_to_shadow_page(_m) ((struct shadow_page_info *)mfn_to_page(_m))
    3.69 +#define mfn_to_shadow_page mfn_to_page
    3.70  
    3.71  // Override page_to_mfn from asm/page.h, which was #include'd above,
    3.72  // in order to make it work with our mfn type.
    3.73  #undef page_to_mfn
    3.74  #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
    3.75 -#define shadow_page_to_mfn(_spg) (page_to_mfn((struct page_info *)_spg))
    3.76 +#define shadow_page_to_mfn page_to_mfn
    3.77  
    3.78  // Override mfn_valid from asm/page.h, which was #include'd above,
    3.79  // in order to make it work with our mfn type.
    3.80 @@ -679,22 +625,22 @@ static inline int sh_get_ref(struct vcpu
    3.81  
    3.82      ASSERT(mfn_valid(smfn));
    3.83  
    3.84 -    x = sp->count;
    3.85 +    x = sp->u.sh.count;
    3.86      nx = x + 1;
    3.87  
    3.88      if ( unlikely(nx >= 1U<<26) )
    3.89      {
    3.90 -        SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
    3.91 -                       sp->backpointer, mfn_x(smfn));
    3.92 +        SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRpgmfn " smfn=%lx\n",
    3.93 +                       sp->u.sh.back, mfn_x(smfn));
    3.94          return 0;
    3.95      }
    3.96      
    3.97      /* Guarded by the shadow lock, so no need for atomic update */
    3.98 -    sp->count = nx;
    3.99 +    sp->u.sh.count = nx;
   3.100  
   3.101      /* We remember the first shadow entry that points to each shadow. */
   3.102      if ( entry_pa != 0 
   3.103 -         && !sh_type_is_pinnable(v, sp->type) 
   3.104 +         && !sh_type_is_pinnable(v, sp->u.sh.type)
   3.105           && sp->up == 0 ) 
   3.106          sp->up = entry_pa;
   3.107      
   3.108 @@ -710,26 +656,26 @@ static inline void sh_put_ref(struct vcp
   3.109      struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   3.110  
   3.111      ASSERT(mfn_valid(smfn));
   3.112 -    ASSERT(sp->mbz == 0);
   3.113 +    ASSERT(sp->count_info == 0);
   3.114  
   3.115      /* If this is the entry in the up-pointer, remove it */
   3.116      if ( entry_pa != 0 
   3.117 -         && !sh_type_is_pinnable(v, sp->type) 
   3.118 +         && !sh_type_is_pinnable(v, sp->u.sh.type)
   3.119           && sp->up == entry_pa ) 
   3.120          sp->up = 0;
   3.121  
   3.122 -    x = sp->count;
   3.123 +    x = sp->u.sh.count;
   3.124      nx = x - 1;
   3.125  
   3.126      if ( unlikely(x == 0) ) 
   3.127      {
   3.128          SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
   3.129 -                     mfn_x(smfn), sp->count, sp->type);
   3.130 +                     mfn_x(smfn), sp->u.sh.count, sp->u.sh.type);
   3.131          BUG();
   3.132      }
   3.133  
   3.134      /* Guarded by the shadow lock, so no need for atomic update */
   3.135 -    sp->count = nx;
   3.136 +    sp->u.sh.count = nx;
   3.137  
   3.138      if ( unlikely(nx == 0) ) 
   3.139          sh_destroy_shadow(v, smfn);
   3.140 @@ -745,22 +691,22 @@ static inline int sh_pin(struct vcpu *v,
   3.141      
   3.142      ASSERT(mfn_valid(smfn));
   3.143      sp = mfn_to_shadow_page(smfn);
   3.144 -    ASSERT(sh_type_is_pinnable(v, sp->type));
   3.145 -    if ( sp->pinned ) 
   3.146 +    ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
   3.147 +    if ( sp->u.sh.pinned )
   3.148      {
   3.149          /* Already pinned: take it out of the pinned-list so it can go 
   3.150           * at the front */
   3.151 -        list_del(&sp->list);
   3.152 +        page_list_del(sp, &v->domain->arch.paging.shadow.pinned_shadows);
   3.153      }
   3.154      else
   3.155      {
   3.156          /* Not pinned: pin it! */
   3.157          if ( !sh_get_ref(v, smfn, 0) )
   3.158              return 0;
   3.159 -        sp->pinned = 1;
   3.160 +        sp->u.sh.pinned = 1;
   3.161      }
   3.162      /* Put it at the head of the list of pinned shadows */
   3.163 -    list_add(&sp->list, &v->domain->arch.paging.shadow.pinned_shadows);
   3.164 +    page_list_add(sp, &v->domain->arch.paging.shadow.pinned_shadows);
   3.165      return 1;
   3.166  }
   3.167  
   3.168 @@ -772,11 +718,11 @@ static inline void sh_unpin(struct vcpu 
   3.169      
   3.170      ASSERT(mfn_valid(smfn));
   3.171      sp = mfn_to_shadow_page(smfn);
   3.172 -    ASSERT(sh_type_is_pinnable(v, sp->type));
   3.173 -    if ( sp->pinned )
   3.174 +    ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
   3.175 +    if ( sp->u.sh.pinned )
   3.176      {
   3.177 -        sp->pinned = 0;
   3.178 -        list_del(&sp->list);
   3.179 +        sp->u.sh.pinned = 0;
   3.180 +        page_list_del(sp, &v->domain->arch.paging.shadow.pinned_shadows);
   3.181          sp->up = 0; /* in case this stops being a pinnable type in future */
   3.182          sh_put_ref(v, smfn, 0);
   3.183      }
     4.1 --- a/xen/include/asm-x86/domain.h	Fri Jan 30 11:04:24 2009 +0000
     4.2 +++ b/xen/include/asm-x86/domain.h	Fri Jan 30 11:08:06 2009 +0000
     4.3 @@ -79,10 +79,10 @@ struct shadow_domain {
     4.4      int               locker; /* processor which holds the lock */
     4.5      const char       *locker_function; /* Func that took it */
     4.6      unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
     4.7 -    struct list_head  pinned_shadows;
     4.8 +    struct page_list_head pinned_shadows;
     4.9  
    4.10      /* Memory allocation */
    4.11 -    struct list_head  freelists[SHADOW_MAX_ORDER + 1];
    4.12 +    struct page_list_head freelists[SHADOW_MAX_ORDER + 1];
    4.13      struct page_list_head p2m_freelist;
    4.14      unsigned int      total_pages;  /* number of pages allocated */
    4.15      unsigned int      free_pages;   /* number of pages on freelists */
     5.1 --- a/xen/include/asm-x86/mm.h	Fri Jan 30 11:04:24 2009 +0000
     5.2 +++ b/xen/include/asm-x86/mm.h	Fri Jan 30 11:08:06 2009 +0000
     5.3 @@ -17,19 +17,39 @@
     5.4   */
     5.5  #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
     5.6  
     5.7 +/*
     5.8 + * This definition is solely for the use in struct page_info (and
     5.9 + * struct page_list_head), intended to allow easy adjustment once x86-64
    5.10 + * wants to support more than 16Tb.
    5.11 + * 'unsigned long' should be used for MFNs everywhere else.
    5.12 + */
    5.13 +#define __mfn_t unsigned int
    5.14 +#define PRpgmfn "08x"
    5.15 +
    5.16  #ifndef __i386__
    5.17  # undef page_list_entry
    5.18  struct page_list_entry
    5.19  {
    5.20 -    unsigned int next, prev;
    5.21 -    unsigned long _pad_for_sh_; /* until struct shadow_page_info gets updated */
    5.22 +    __mfn_t next, prev;
    5.23  };
    5.24  #endif
    5.25  
    5.26  struct page_info
    5.27 +/* Until all uses of the old type get cleaned up: */
    5.28 +#define shadow_page_info page_info
    5.29  {
    5.30 -    /* Each frame can be threaded onto a doubly-linked list. */
    5.31 -    struct page_list_entry list;
    5.32 +    union {
    5.33 +        /* Each frame can be threaded onto a doubly-linked list.
    5.34 +         *
    5.35 +         * For unused shadow pages, a list of pages of this order; for
    5.36 +         * pinnable shadows, if pinned, a list of other pinned shadows
    5.37 +         * (see sh_type_is_pinnable() below for the definition of
    5.38 +         * "pinnable" shadow types).
    5.39 +         */
    5.40 +        struct page_list_entry list;
    5.41 +        /* For non-pinnable shadows, a higher entry that points at us. */
    5.42 +        paddr_t up;
    5.43 +    };
    5.44  
    5.45      /* Reference count and various PGC_xxx flags and fields. */
    5.46      unsigned long count_info;
    5.47 @@ -45,6 +65,19 @@ struct page_info
    5.48              unsigned long type_info;
    5.49          } inuse;
    5.50  
    5.51 +        /* Page is in use as a shadow: count_info == 0. */
    5.52 +        struct {
    5.53 +            unsigned long type:5;   /* What kind of shadow is this? */
    5.54 +            unsigned long pinned:1; /* Is the shadow pinned? */
    5.55 +            unsigned long count:26; /* Reference count */
    5.56 +            union {
    5.57 +                /* When in use, GMFN of guest page we're a shadow of. */
    5.58 +                __mfn_t back;
    5.59 +                /* When free, order of the freelist we're on. */
    5.60 +                unsigned int order;
    5.61 +            };
    5.62 +        } sh;
    5.63 +
    5.64          /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
    5.65          struct {
    5.66              /* Order-size of the free chunk this page is the head of. */
    5.67 @@ -104,9 +137,14 @@ struct page_info
    5.68           * tracked for TLB-flush avoidance when a guest runs in shadow mode.
    5.69           */
    5.70          u32 shadow_flags;
    5.71 +
    5.72 +        /* When in use as a shadow, next shadow in this hash chain. */
    5.73 +        struct shadow_page_info *next_shadow;
    5.74      };
    5.75  };
    5.76  
    5.77 +#undef __mfn_t
    5.78 +
    5.79  #define PG_shift(idx)   (BITS_PER_LONG - (idx))
    5.80  #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
    5.81