ia64/xen-unstable

changeset 12561:6f0d8434d23f

[XEN] Use a separate struct shadow_page_info for shadow pages
and move the definitions of shadow types etc out of public headers.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Thu Nov 23 17:40:28 2006 +0000 (2006-11-23)
parents a8d2b1393b76
children 7a38b70788a5
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/arch/x86/mm/shadow/types.h xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Tue Nov 21 18:09:23 2006 -0800
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu Nov 23 17:40:28 2006 +0000
     1.3 @@ -190,7 +190,7 @@ struct x86_emulate_ops shadow_emulator_o
     1.4   * involves making sure there are no writable mappings available to the guest
     1.5   * for this page.
     1.6   */
     1.7 -void shadow_promote(struct vcpu *v, mfn_t gmfn, u32 type)
     1.8 +void shadow_promote(struct vcpu *v, mfn_t gmfn, unsigned int type)
     1.9  {
    1.10      struct page_info *page = mfn_to_page(gmfn);
    1.11  
    1.12 @@ -203,8 +203,8 @@ void shadow_promote(struct vcpu *v, mfn_
    1.13      if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
    1.14          page->shadow_flags = 0;
    1.15  
    1.16 -    ASSERT(!test_bit(type >> PGC_SH_type_shift, &page->shadow_flags));
    1.17 -    set_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
    1.18 +    ASSERT(!test_bit(type, &page->shadow_flags));
    1.19 +    set_bit(type, &page->shadow_flags);
    1.20  }
    1.21  
    1.22  void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type)
    1.23 @@ -212,9 +212,9 @@ void shadow_demote(struct vcpu *v, mfn_t
    1.24      struct page_info *page = mfn_to_page(gmfn);
    1.25  
    1.26      ASSERT(test_bit(_PGC_page_table, &page->count_info));
    1.27 -    ASSERT(test_bit(type >> PGC_SH_type_shift, &page->shadow_flags));
    1.28 -
    1.29 -    clear_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
    1.30 +    ASSERT(test_bit(type, &page->shadow_flags));
    1.31 +
    1.32 +    clear_bit(type, &page->shadow_flags);
    1.33  
    1.34      if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
    1.35      {
    1.36 @@ -352,75 +352,6 @@ shadow_validate_guest_pt_write(struct vc
    1.37  /**************************************************************************/
    1.38  /* Memory management for shadow pages. */ 
    1.39  
    1.40 -/* Meaning of the count_info field in shadow pages
    1.41 - * ----------------------------------------------
    1.42 - * 
    1.43 - * A count of all references to this page from other shadow pages and
    1.44 - * guest CR3s (a.k.a. v->arch.shadow.table).  
    1.45 - *
    1.46 - * The top bits hold the shadow type and the pinned bit.  Top-level
    1.47 - * shadows are pinned so that they don't disappear when not in a CR3
    1.48 - * somewhere.
    1.49 - *
    1.50 - * We don't need to use get|put_page for this as the updates are all
    1.51 - * protected by the shadow lock.  We can't use get|put_page for this
    1.52 - * as the size of the count on shadow pages is different from that on
    1.53 - * normal guest pages.
    1.54 - */
    1.55 -
    1.56 -/* Meaning of the type_info field in shadow pages
    1.57 - * ----------------------------------------------
    1.58 - * 
    1.59 - * type_info use depends on the shadow type (from count_info)
    1.60 - * 
    1.61 - * PGC_SH_none : This page is in the shadow free pool.  type_info holds
    1.62 - *                the chunk order for our freelist allocator.
    1.63 - *
    1.64 - * PGC_SH_l*_shadow : This page is in use as a shadow. type_info 
    1.65 - *                     holds the mfn of the guest page being shadowed,
    1.66 - *
    1.67 - * PGC_SH_fl1_*_shadow : This page is being used to shatter a superpage.
    1.68 - *                        type_info holds the gfn being shattered.
    1.69 - *
    1.70 - * PGC_SH_monitor_table : This page is part of a monitor table.
    1.71 - *                         type_info is not used.
    1.72 - */
    1.73 -
    1.74 -/* Meaning of the _domain field in shadow pages
    1.75 - * --------------------------------------------
    1.76 - *
    1.77 - * In shadow pages, this field will always have its least significant bit
    1.78 - * set.  This ensures that all attempts to get_page() will fail (as all
    1.79 - * valid pickled domain pointers have a zero for their least significant bit).
    1.80 - * Instead, the remaining upper bits are used to record the shadow generation
    1.81 - * counter when the shadow was created.
    1.82 - */
    1.83 -
    1.84 -/* Meaning of the shadow_flags field
    1.85 - * ----------------------------------
    1.86 - * 
    1.87 - * In guest pages that are shadowed, one bit for each kind of shadow they have.
    1.88 - * 
    1.89 - * In shadow pages, will be used for holding a representation of the populated
    1.90 - * entries in this shadow (either a min/max, or a bitmap, or ...)
    1.91 - *
    1.92 - * In monitor-table pages, holds the level of the particular page (to save
    1.93 - * spilling the shadow types into an extra bit by having three types of monitor
    1.94 - * page).
    1.95 - */
    1.96 -
    1.97 -/* Meaning of the list_head struct in shadow pages
    1.98 - * -----------------------------------------------
    1.99 - *
   1.100 - * In free shadow pages, this is used to hold the free-lists of chunks.
   1.101 - *
   1.102 - * In top-level shadow tables, this holds a linked-list of all top-level
   1.103 - * shadows (used for recovering memory and destroying shadows). 
   1.104 - *
   1.105 - * In lower-level shadows, this holds the physical address of a higher-level
   1.106 - * shadow entry that holds a reference to this shadow (or zero).
   1.107 - */
   1.108 -
   1.109  /* Allocating shadow pages
   1.110   * -----------------------
   1.111   *
   1.112 @@ -475,38 +406,32 @@ unsigned int shadow_min_acceptable_pages
   1.113          vcpu_count++;
   1.114  
   1.115      return (vcpu_count * 128);
   1.116 -}
   1.117 -
   1.118 -/* Using the type_info field to store freelist order */
   1.119 -#define SH_PFN_ORDER(_p) ((_p)->u.inuse.type_info)
   1.120 -#define SH_SET_PFN_ORDER(_p, _o)                       \
   1.121 - do { (_p)->u.inuse.type_info = (_o); } while (0)
   1.122 - 
   1.123 +} 
   1.124  
   1.125  /* Figure out the order of allocation needed for a given shadow type */
   1.126  static inline u32
   1.127 -shadow_order(u32 shadow_type) 
   1.128 +shadow_order(unsigned int shadow_type) 
   1.129  {
   1.130  #if CONFIG_PAGING_LEVELS > 2
   1.131      static const u32 type_to_order[16] = {
   1.132 -        0, /* PGC_SH_none           */
   1.133 -        1, /* PGC_SH_l1_32_shadow   */
   1.134 -        1, /* PGC_SH_fl1_32_shadow  */
   1.135 -        2, /* PGC_SH_l2_32_shadow   */
   1.136 -        0, /* PGC_SH_l1_pae_shadow  */
   1.137 -        0, /* PGC_SH_fl1_pae_shadow */
   1.138 -        0, /* PGC_SH_l2_pae_shadow  */
   1.139 -        0, /* PGC_SH_l2h_pae_shadow */
   1.140 -        0, /* PGC_SH_l1_64_shadow   */
   1.141 -        0, /* PGC_SH_fl1_64_shadow  */
   1.142 -        0, /* PGC_SH_l2_64_shadow   */
   1.143 -        0, /* PGC_SH_l3_64_shadow   */
   1.144 -        0, /* PGC_SH_l4_64_shadow   */
   1.145 -        2, /* PGC_SH_p2m_table      */
   1.146 -        0  /* PGC_SH_monitor_table  */
   1.147 +        0, /* SH_type_none           */
   1.148 +        1, /* SH_type_l1_32_shadow   */
   1.149 +        1, /* SH_type_fl1_32_shadow  */
   1.150 +        2, /* SH_type_l2_32_shadow   */
   1.151 +        0, /* SH_type_l1_pae_shadow  */
   1.152 +        0, /* SH_type_fl1_pae_shadow */
   1.153 +        0, /* SH_type_l2_pae_shadow  */
   1.154 +        0, /* SH_type_l2h_pae_shadow */
   1.155 +        0, /* SH_type_l1_64_shadow   */
   1.156 +        0, /* SH_type_fl1_64_shadow  */
   1.157 +        0, /* SH_type_l2_64_shadow   */
   1.158 +        0, /* SH_type_l3_64_shadow   */
   1.159 +        0, /* SH_type_l4_64_shadow   */
   1.160 +        2, /* SH_type_p2m_table      */
   1.161 +        0  /* SH_type_monitor_table  */
   1.162          };
   1.163 -    u32 type = (shadow_type & PGC_SH_type_mask) >> PGC_SH_type_shift;
   1.164 -    return type_to_order[type];
   1.165 +    ASSERT(shadow_type < 16);
   1.166 +    return type_to_order[shadow_type];
   1.167  #else  /* 32-bit Xen only ever shadows 32-bit guests on 32-bit shadows. */
   1.168      return 0;
   1.169  #endif
   1.170 @@ -528,10 +453,10 @@ static inline int chunk_is_available(str
   1.171   * non-Xen mappings in this top-level shadow mfn */
   1.172  void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
   1.173  {
   1.174 -    struct page_info *pg = mfn_to_page(smfn);
   1.175 -    switch ( (pg->count_info & PGC_SH_type_mask) >> PGC_SH_type_shift )
   1.176 +    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.177 +    switch ( sp->type )
   1.178      {
   1.179 -    case PGC_SH_l2_32_shadow >> PGC_SH_type_shift:
   1.180 +    case SH_type_l2_32_shadow:
   1.181  #if CONFIG_PAGING_LEVELS == 2
   1.182          SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings,2,2)(v,smfn);
   1.183  #else
   1.184 @@ -539,20 +464,18 @@ void shadow_unhook_mappings(struct vcpu 
   1.185  #endif
   1.186          break;
   1.187  #if CONFIG_PAGING_LEVELS >= 3
   1.188 -    case PGC_SH_l2_pae_shadow >> PGC_SH_type_shift:
   1.189 -    case PGC_SH_l2h_pae_shadow >> PGC_SH_type_shift:
   1.190 +    case SH_type_l2_pae_shadow:
   1.191 +    case SH_type_l2h_pae_shadow:
   1.192          SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings,3,3)(v,smfn);
   1.193          break;
   1.194  #endif
   1.195  #if CONFIG_PAGING_LEVELS >= 4
   1.196 -    case PGC_SH_l4_64_shadow >> PGC_SH_type_shift:
   1.197 +    case SH_type_l4_64_shadow:
   1.198          SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings,4,4)(v,smfn);
   1.199          break;
   1.200  #endif
   1.201      default:
   1.202 -        SHADOW_PRINTK("top-level shadow has bad type %08lx\n", 
   1.203 -                       (unsigned long)((pg->count_info & PGC_SH_type_mask)
   1.204 -                                       >> PGC_SH_type_shift));
   1.205 +        SHADOW_PRINTK("top-level shadow has bad type %08x\n", sp->type);
   1.206          BUG();
   1.207      }
   1.208  }
   1.209 @@ -569,7 +492,7 @@ void shadow_prealloc(struct domain *d, u
   1.210       * per-vcpu shadows, any will do */
   1.211      struct vcpu *v, *v2;
   1.212      struct list_head *l, *t;
   1.213 -    struct page_info *pg;
   1.214 +    struct shadow_page_info *sp;
   1.215      cpumask_t flushmask = CPU_MASK_NONE;
   1.216      mfn_t smfn;
   1.217  
   1.218 @@ -584,8 +507,8 @@ void shadow_prealloc(struct domain *d, u
   1.219      perfc_incrc(shadow_prealloc_1);
   1.220      list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
   1.221      {
   1.222 -        pg = list_entry(l, struct page_info, list);
   1.223 -        smfn = page_to_mfn(pg);
   1.224 +        sp = list_entry(l, struct shadow_page_info, list);
   1.225 +        smfn = shadow_page_to_mfn(sp);
   1.226  
   1.227          /* Unpin this top-level shadow */
   1.228          sh_unpin(v, smfn);
   1.229 @@ -600,8 +523,8 @@ void shadow_prealloc(struct domain *d, u
   1.230      perfc_incrc(shadow_prealloc_2);
   1.231      list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
   1.232      {
   1.233 -        pg = list_entry(l, struct page_info, list);
   1.234 -        smfn = page_to_mfn(pg);
   1.235 +        sp = list_entry(l, struct shadow_page_info, list);
   1.236 +        smfn = shadow_page_to_mfn(sp);
   1.237          shadow_unhook_mappings(v, smfn);
   1.238  
   1.239          /* Remember to flush TLBs: we have removed shadow entries that 
   1.240 @@ -642,7 +565,7 @@ void shadow_prealloc(struct domain *d, u
   1.241  static void shadow_blow_tables(unsigned char c)
   1.242  {
   1.243      struct list_head *l, *t;
   1.244 -    struct page_info *pg;
   1.245 +    struct shadow_page_info *sp;
   1.246      struct domain *d;
   1.247      struct vcpu *v;
   1.248      mfn_t smfn;
   1.249 @@ -657,16 +580,16 @@ static void shadow_blow_tables(unsigned 
   1.250              /* Pass one: unpin all top-level pages */
   1.251              list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
   1.252              {
   1.253 -                pg = list_entry(l, struct page_info, list);
   1.254 -                smfn = page_to_mfn(pg);
   1.255 +                sp = list_entry(l, struct shadow_page_info, list);
   1.256 +                smfn = shadow_page_to_mfn(sp);
   1.257                  sh_unpin(v, smfn);
   1.258              }
   1.259  
   1.260              /* Second pass: unhook entries of in-use shadows */
   1.261              list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
   1.262              {
   1.263 -                pg = list_entry(l, struct page_info, list);
   1.264 -                smfn = page_to_mfn(pg);
   1.265 +                sp = list_entry(l, struct shadow_page_info, list);
   1.266 +                smfn = shadow_page_to_mfn(sp);
   1.267                  shadow_unhook_mappings(v, smfn);
   1.268              }
   1.269              
   1.270 @@ -693,7 +616,7 @@ mfn_t shadow_alloc(struct domain *d,
   1.271                      u32 shadow_type,
   1.272                      unsigned long backpointer)
   1.273  {
   1.274 -    struct page_info *pg = NULL;
   1.275 +    struct shadow_page_info *sp = NULL;
   1.276      unsigned int order = shadow_order(shadow_type);
   1.277      cpumask_t mask;
   1.278      void *p;
   1.279 @@ -701,51 +624,54 @@ mfn_t shadow_alloc(struct domain *d,
   1.280  
   1.281      ASSERT(shadow_lock_is_acquired(d));
   1.282      ASSERT(order <= SHADOW_MAX_ORDER);
   1.283 -    ASSERT(shadow_type != PGC_SH_none);
   1.284 +    ASSERT(shadow_type != SH_type_none);
   1.285      perfc_incrc(shadow_alloc);
   1.286  
   1.287      /* Find smallest order which can satisfy the request. */
   1.288      for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
   1.289          if ( !list_empty(&d->arch.shadow.freelists[i]) )
   1.290          {
   1.291 -            pg = list_entry(d->arch.shadow.freelists[i].next, 
   1.292 -                            struct page_info, list);
   1.293 -            list_del(&pg->list);
   1.294 +            sp = list_entry(d->arch.shadow.freelists[i].next, 
   1.295 +                            struct shadow_page_info, list);
   1.296 +            list_del(&sp->list);
   1.297              
   1.298              /* We may have to halve the chunk a number of times. */
   1.299              while ( i != order )
   1.300              {
   1.301                  i--;
   1.302 -                SH_SET_PFN_ORDER(pg, i);
   1.303 -                list_add_tail(&pg->list, &d->arch.shadow.freelists[i]);
   1.304 -                pg += 1 << i;
   1.305 +                sp->order = i;
   1.306 +                list_add_tail(&sp->list, &d->arch.shadow.freelists[i]);
   1.307 +                sp += 1 << i;
   1.308              }
   1.309              d->arch.shadow.free_pages -= 1 << order;
   1.310  
   1.311              /* Init page info fields and clear the pages */
   1.312              for ( i = 0; i < 1<<order ; i++ ) 
   1.313              {
   1.314 -                pg[i].u.inuse.type_info = backpointer;
   1.315 -                pg[i].count_info = shadow_type;
   1.316 -                pg[i].shadow_flags = 0;
   1.317 -                INIT_LIST_HEAD(&pg[i].list);
   1.318                  /* Before we overwrite the old contents of this page, 
   1.319                   * we need to be sure that no TLB holds a pointer to it. */
   1.320                  mask = d->domain_dirty_cpumask;
   1.321 -                tlbflush_filter(mask, pg[i].tlbflush_timestamp);
   1.322 +                tlbflush_filter(mask, sp[i].tlbflush_timestamp);
   1.323                  if ( unlikely(!cpus_empty(mask)) )
   1.324                  {
   1.325                      perfc_incrc(shadow_alloc_tlbflush);
   1.326                      flush_tlb_mask(mask);
   1.327                  }
   1.328                  /* Now safe to clear the page for reuse */
   1.329 -                p = sh_map_domain_page(page_to_mfn(pg+i));
   1.330 +                p = sh_map_domain_page(shadow_page_to_mfn(sp+i));
   1.331                  ASSERT(p != NULL);
   1.332                  clear_page(p);
   1.333                  sh_unmap_domain_page(p);
   1.334 +                INIT_LIST_HEAD(&sp[i].list);
   1.335 +                sp[i].type = shadow_type;
   1.336 +                sp[i].pinned = 0;
   1.337 +                sp[i].logdirty = 0;
   1.338 +                sp[i].count = 0;
   1.339 +                sp[i].backpointer = backpointer;
   1.340 +                sp[i].next_shadow = NULL;
   1.341                  perfc_incr(shadow_alloc_count);
   1.342              }
   1.343 -            return page_to_mfn(pg);
   1.344 +            return shadow_page_to_mfn(sp);
   1.345          }
   1.346      
   1.347      /* If we get here, we failed to allocate. This should never happen.
   1.348 @@ -760,7 +686,7 @@ mfn_t shadow_alloc(struct domain *d,
   1.349  /* Return some shadow pages to the pool. */
   1.350  void shadow_free(struct domain *d, mfn_t smfn)
   1.351  {
   1.352 -    struct page_info *pg = mfn_to_page(smfn); 
   1.353 +    struct shadow_page_info *sp = mfn_to_shadow_page(smfn); 
   1.354      u32 shadow_type;
   1.355      unsigned long order;
   1.356      unsigned long mask;
   1.357 @@ -769,9 +695,9 @@ void shadow_free(struct domain *d, mfn_t
   1.358      ASSERT(shadow_lock_is_acquired(d));
   1.359      perfc_incrc(shadow_free);
   1.360  
   1.361 -    shadow_type = pg->count_info & PGC_SH_type_mask;
   1.362 -    ASSERT(shadow_type != PGC_SH_none);
   1.363 -    ASSERT(shadow_type != PGC_SH_p2m_table);
   1.364 +    shadow_type = sp->type;
   1.365 +    ASSERT(shadow_type != SH_type_none);
   1.366 +    ASSERT(shadow_type != SH_type_p2m_table);
   1.367      order = shadow_order(shadow_type);
   1.368  
   1.369      d->arch.shadow.free_pages += 1 << order;
   1.370 @@ -788,12 +714,12 @@ void shadow_free(struct domain *d, mfn_t
   1.371          }
   1.372  #endif
   1.373          /* Strip out the type: this is now a free shadow page */
   1.374 -        pg[i].count_info = 0;
   1.375 +        sp[i].type = 0;
   1.376          /* Remember the TLB timestamp so we will know whether to flush 
   1.377           * TLBs when we reuse the page.  Because the destructors leave the
   1.378           * contents of the pages in place, we can delay TLB flushes until
   1.379           * just before the allocator hands the page out again. */
   1.380 -        pg[i].tlbflush_timestamp = tlbflush_current_time();
   1.381 +        sp[i].tlbflush_timestamp = tlbflush_current_time();
   1.382          perfc_decr(shadow_alloc_count);
   1.383      }
   1.384  
   1.385 @@ -801,25 +727,23 @@ void shadow_free(struct domain *d, mfn_t
   1.386      while ( order < SHADOW_MAX_ORDER )
   1.387      {
   1.388          mask = 1 << order;
   1.389 -        if ( (mfn_x(page_to_mfn(pg)) & mask) ) {
   1.390 +        if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
   1.391              /* Merge with predecessor block? */
   1.392 -            if ( (((pg-mask)->count_info & PGC_SH_type_mask) != PGT_none) 
   1.393 -                 || (SH_PFN_ORDER(pg-mask) != order) )
   1.394 +            if ( ((sp-mask)->type != PGT_none) || ((sp-mask)->order != order) )
   1.395                  break;
   1.396 -            list_del(&(pg-mask)->list);
   1.397 -            pg -= mask;
   1.398 +            list_del(&(sp-mask)->list);
   1.399 +            sp -= mask;
   1.400          } else {
   1.401              /* Merge with successor block? */
   1.402 -            if ( (((pg+mask)->count_info & PGC_SH_type_mask) != PGT_none)
   1.403 -                 || (SH_PFN_ORDER(pg+mask) != order) )
   1.404 +            if ( ((sp+mask)->type != PGT_none) || ((sp+mask)->order != order) )
   1.405                  break;
   1.406 -            list_del(&(pg+mask)->list);
   1.407 +            list_del(&(sp+mask)->list);
   1.408          }
   1.409          order++;
   1.410      }
   1.411  
   1.412 -    SH_SET_PFN_ORDER(pg, order);
   1.413 -    list_add_tail(&pg->list, &d->arch.shadow.freelists[order]);
   1.414 +    sp->order = order;
   1.415 +    list_add_tail(&sp->list, &d->arch.shadow.freelists[order]);
   1.416  }
   1.417  
   1.418  /* Divert some memory from the pool to be used by the p2m mapping.
   1.419 @@ -843,7 +767,7 @@ shadow_alloc_p2m_pages(struct domain *d)
   1.420           < (shadow_min_acceptable_pages(d) + (1<<SHADOW_MAX_ORDER)) )
   1.421          return 0; /* Not enough shadow memory: need to increase it first */
   1.422      
   1.423 -    pg = mfn_to_page(shadow_alloc(d, PGC_SH_p2m_table, 0));
   1.424 +    pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
   1.425      d->arch.shadow.p2m_pages += (1<<SHADOW_MAX_ORDER);
   1.426      d->arch.shadow.total_pages -= (1<<SHADOW_MAX_ORDER);
   1.427      for (i = 0; i < (1<<SHADOW_MAX_ORDER); i++)
   1.428 @@ -1221,7 +1145,7 @@ static void shadow_p2m_teardown(struct d
   1.429          pg = list_entry(entry, struct page_info, list);
   1.430          list_del(entry);
   1.431          /* Should have just the one ref we gave it in alloc_p2m_page() */
   1.432 -        if ( (pg->count_info & PGC_SH_count_mask) != 1 )
   1.433 +        if ( (pg->count_info & PGC_count_mask) != 1 )
   1.434          {
   1.435              SHADOW_PRINTK("Odd p2m page count c=%#x t=%"PRtype_info"\n",
   1.436                             pg->count_info, pg->u.inuse.type_info);
   1.437 @@ -1256,7 +1180,7 @@ static unsigned int set_sh_allocation(st
   1.438                                         unsigned int pages,
   1.439                                         int *preempted)
   1.440  {
   1.441 -    struct page_info *pg;
   1.442 +    struct shadow_page_info *sp;
   1.443      unsigned int lower_bound;
   1.444      int j;
   1.445  
   1.446 @@ -1278,8 +1202,9 @@ static unsigned int set_sh_allocation(st
   1.447          if ( d->arch.shadow.total_pages < pages ) 
   1.448          {
   1.449              /* Need to allocate more memory from domheap */
   1.450 -            pg = alloc_domheap_pages(NULL, SHADOW_MAX_ORDER, 0); 
   1.451 -            if ( pg == NULL ) 
   1.452 +            sp = (struct shadow_page_info *)
   1.453 +                alloc_domheap_pages(NULL, SHADOW_MAX_ORDER, 0); 
   1.454 +            if ( sp == NULL ) 
   1.455              { 
   1.456                  SHADOW_PRINTK("failed to allocate shadow pages.\n");
   1.457                  return -ENOMEM;
   1.458 @@ -1288,11 +1213,15 @@ static unsigned int set_sh_allocation(st
   1.459              d->arch.shadow.total_pages += 1<<SHADOW_MAX_ORDER;
   1.460              for ( j = 0; j < 1<<SHADOW_MAX_ORDER; j++ ) 
   1.461              {
   1.462 -                pg[j].u.inuse.type_info = 0;  /* Free page */
   1.463 -                pg[j].tlbflush_timestamp = 0; /* Not in any TLB */
   1.464 +                sp[j].type = 0;  
   1.465 +                sp[j].pinned = 0;
   1.466 +                sp[j].logdirty = 0;
   1.467 +                sp[j].count = 0;
   1.468 +                sp[j].mbz = 0;
   1.469 +                sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
   1.470              }
   1.471 -            SH_SET_PFN_ORDER(pg, SHADOW_MAX_ORDER);
   1.472 -            list_add_tail(&pg->list, 
   1.473 +            sp->order = SHADOW_MAX_ORDER;
   1.474 +            list_add_tail(&sp->list, 
   1.475                            &d->arch.shadow.freelists[SHADOW_MAX_ORDER]);
   1.476          } 
   1.477          else if ( d->arch.shadow.total_pages > pages ) 
   1.478 @@ -1300,12 +1229,12 @@ static unsigned int set_sh_allocation(st
   1.479              /* Need to return memory to domheap */
   1.480              shadow_prealloc(d, SHADOW_MAX_ORDER);
   1.481              ASSERT(!list_empty(&d->arch.shadow.freelists[SHADOW_MAX_ORDER]));
   1.482 -            pg = list_entry(d->arch.shadow.freelists[SHADOW_MAX_ORDER].next, 
   1.483 -                            struct page_info, list);
   1.484 -            list_del(&pg->list);
   1.485 +            sp = list_entry(d->arch.shadow.freelists[SHADOW_MAX_ORDER].next, 
   1.486 +                            struct shadow_page_info, list);
   1.487 +            list_del(&sp->list);
   1.488              d->arch.shadow.free_pages -= 1<<SHADOW_MAX_ORDER;
   1.489              d->arch.shadow.total_pages -= 1<<SHADOW_MAX_ORDER;
   1.490 -            free_domheap_pages(pg, SHADOW_MAX_ORDER);
   1.491 +            free_domheap_pages((struct page_info *)sp, SHADOW_MAX_ORDER);
   1.492          }
   1.493  
   1.494          /* Check to see if we need to yield and try again */
   1.495 @@ -1357,7 +1286,7 @@ static void sh_hash_audit_bucket(struct 
   1.496  /* Audit one bucket of the hash table */
   1.497  {
   1.498      struct shadow_hash_entry *e, *x;
   1.499 -    struct page_info *pg;
   1.500 +    struct shadow_page_info *sp;
   1.501  
   1.502      if ( !(SHADOW_AUDIT_ENABLE) )
   1.503          return;
   1.504 @@ -1369,7 +1298,7 @@ static void sh_hash_audit_bucket(struct 
   1.505          /* Empty link? */
   1.506          BUG_ON( e->t == 0 ); 
   1.507          /* Bogus type? */
   1.508 -        BUG_ON( e->t > (PGC_SH_max_shadow >> PGC_SH_type_shift) );
   1.509 +        BUG_ON( e->t > SH_type_max_shadow );
   1.510          /* Wrong bucket? */
   1.511          BUG_ON( sh_hash(e->n, e->t) % SHADOW_HASH_BUCKETS != bucket ); 
   1.512          /* Duplicate entry? */
   1.513 @@ -1377,17 +1306,16 @@ static void sh_hash_audit_bucket(struct 
   1.514              BUG_ON( x->n == e->n && x->t == e->t );
   1.515          /* Bogus MFN? */
   1.516          BUG_ON( !valid_mfn(e->smfn) );
   1.517 -        pg = mfn_to_page(e->smfn);
   1.518 +        sp = mfn_to_shadow_page(e->smfn);
   1.519          /* Not a shadow? */
   1.520 -        BUG_ON( page_get_owner(pg) != 0 );
   1.521 +        BUG_ON( sp->mbz != 0 );
   1.522          /* Wrong kind of shadow? */
   1.523 -        BUG_ON( (pg->count_info & PGC_SH_type_mask) >> PGC_SH_type_shift 
   1.524 -                != e->t ); 
   1.525 +        BUG_ON( sp->type != e->t ); 
   1.526          /* Bad backlink? */
   1.527 -        BUG_ON( pg->u.inuse.type_info != e->n );
   1.528 -        if ( e->t != (PGC_SH_fl1_32_shadow >> PGC_SH_type_shift)
   1.529 -             && e->t != (PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift)
   1.530 -             && e->t != (PGC_SH_fl1_64_shadow >> PGC_SH_type_shift) )
   1.531 +        BUG_ON( sp->backpointer != e->n );
   1.532 +        if ( e->t != SH_type_fl1_32_shadow
   1.533 +             && e->t != SH_type_fl1_pae_shadow
   1.534 +             && e->t != SH_type_fl1_64_shadow )
   1.535          {
   1.536              struct page_info *gpg = mfn_to_page(_mfn(e->n));
   1.537              /* Bad shadow flags on guest page? */
   1.538 @@ -1752,66 +1680,66 @@ static void hash_foreach(struct vcpu *v,
   1.539  
   1.540  void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
   1.541  {
   1.542 -    struct page_info *pg = mfn_to_page(smfn);
   1.543 -    u32 t = pg->count_info & PGC_SH_type_mask;
   1.544 +    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.545 +    unsigned int t = sp->type;
   1.546  
   1.547  
   1.548      SHADOW_PRINTK("smfn=%#lx\n", mfn_x(smfn));
   1.549  
   1.550      /* Double-check, if we can, that the shadowed page belongs to this
   1.551       * domain, (by following the back-pointer). */
   1.552 -    ASSERT(t == PGC_SH_fl1_32_shadow  ||  
   1.553 -           t == PGC_SH_fl1_pae_shadow ||  
   1.554 -           t == PGC_SH_fl1_64_shadow  || 
   1.555 -           t == PGC_SH_monitor_table  || 
   1.556 -           (page_get_owner(mfn_to_page(_mfn(pg->u.inuse.type_info))) 
   1.557 +    ASSERT(t == SH_type_fl1_32_shadow  ||  
   1.558 +           t == SH_type_fl1_pae_shadow ||  
   1.559 +           t == SH_type_fl1_64_shadow  || 
   1.560 +           t == SH_type_monitor_table  || 
   1.561 +           (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
   1.562              == v->domain)); 
   1.563  
   1.564      /* The down-shifts here are so that the switch statement is on nice
   1.565       * small numbers that the compiler will enjoy */
   1.566 -    switch ( t >> PGC_SH_type_shift )
   1.567 +    switch ( t )
   1.568      {
   1.569  #if CONFIG_PAGING_LEVELS == 2
   1.570 -    case PGC_SH_l1_32_shadow >> PGC_SH_type_shift:
   1.571 -    case PGC_SH_fl1_32_shadow >> PGC_SH_type_shift:
   1.572 +    case SH_type_l1_32_shadow:
   1.573 +    case SH_type_fl1_32_shadow:
   1.574          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2, 2)(v, smfn); 
   1.575          break;
   1.576 -    case PGC_SH_l2_32_shadow >> PGC_SH_type_shift:
   1.577 +    case SH_type_l2_32_shadow:
   1.578          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2, 2)(v, smfn);
   1.579          break;
   1.580  #else /* PAE or 64bit */
   1.581 -    case PGC_SH_l1_32_shadow >> PGC_SH_type_shift:
   1.582 -    case PGC_SH_fl1_32_shadow >> PGC_SH_type_shift:
   1.583 +    case SH_type_l1_32_shadow:
   1.584 +    case SH_type_fl1_32_shadow:
   1.585          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 2)(v, smfn);
   1.586          break;
   1.587 -    case PGC_SH_l2_32_shadow >> PGC_SH_type_shift:
   1.588 +    case SH_type_l2_32_shadow:
   1.589          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 2)(v, smfn);
   1.590          break;
   1.591  #endif
   1.592  
   1.593  #if CONFIG_PAGING_LEVELS >= 3
   1.594 -    case PGC_SH_l1_pae_shadow >> PGC_SH_type_shift:
   1.595 -    case PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift:
   1.596 +    case SH_type_l1_pae_shadow:
   1.597 +    case SH_type_fl1_pae_shadow:
   1.598          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 3)(v, smfn);
   1.599          break;
   1.600 -    case PGC_SH_l2_pae_shadow >> PGC_SH_type_shift:
   1.601 -    case PGC_SH_l2h_pae_shadow >> PGC_SH_type_shift:
   1.602 +    case SH_type_l2_pae_shadow:
   1.603 +    case SH_type_l2h_pae_shadow:
   1.604          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 3)(v, smfn);
   1.605          break;
   1.606  #endif
   1.607  
   1.608  #if CONFIG_PAGING_LEVELS >= 4
   1.609 -    case PGC_SH_l1_64_shadow >> PGC_SH_type_shift:
   1.610 -    case PGC_SH_fl1_64_shadow >> PGC_SH_type_shift:
   1.611 +    case SH_type_l1_64_shadow:
   1.612 +    case SH_type_fl1_64_shadow:
   1.613          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
   1.614          break;
   1.615 -    case PGC_SH_l2_64_shadow >> PGC_SH_type_shift:
   1.616 +    case SH_type_l2_64_shadow:
   1.617          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
   1.618          break;
   1.619 -    case PGC_SH_l3_64_shadow >> PGC_SH_type_shift:
   1.620 +    case SH_type_l3_64_shadow:
   1.621          SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4, 4)(v, smfn);
   1.622          break;
   1.623 -    case PGC_SH_l4_64_shadow >> PGC_SH_type_shift:
   1.624 +    case SH_type_l4_64_shadow:
   1.625          SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4, 4)(v, smfn);
   1.626          break;
   1.627  #endif
   1.628 @@ -1867,12 +1795,12 @@ int shadow_remove_write_access(struct vc
   1.629      };
   1.630  
   1.631      static unsigned int callback_mask = 
   1.632 -          1 << (PGC_SH_l1_32_shadow >> PGC_SH_type_shift)
   1.633 -        | 1 << (PGC_SH_fl1_32_shadow >> PGC_SH_type_shift)
   1.634 -        | 1 << (PGC_SH_l1_pae_shadow >> PGC_SH_type_shift)
   1.635 -        | 1 << (PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift)
   1.636 -        | 1 << (PGC_SH_l1_64_shadow >> PGC_SH_type_shift)
   1.637 -        | 1 << (PGC_SH_fl1_64_shadow >> PGC_SH_type_shift)
   1.638 +          1 << SH_type_l1_32_shadow
   1.639 +        | 1 << SH_type_fl1_32_shadow
   1.640 +        | 1 << SH_type_l1_pae_shadow
   1.641 +        | 1 << SH_type_fl1_pae_shadow
   1.642 +        | 1 << SH_type_l1_64_shadow
   1.643 +        | 1 << SH_type_fl1_64_shadow
   1.644          ;
   1.645      struct page_info *pg = mfn_to_page(gmfn);
   1.646  
   1.647 @@ -1979,8 +1907,7 @@ int shadow_remove_write_access(struct vc
   1.648      {
   1.649          unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
   1.650          mfn_t last_smfn = _mfn(v->arch.shadow.last_writeable_pte_smfn);
   1.651 -        int shtype = (mfn_to_page(last_smfn)->count_info & PGC_SH_type_mask) 
   1.652 -            >> PGC_SH_type_shift;
   1.653 +        int shtype = mfn_to_shadow_page(last_smfn)->type;
   1.654  
   1.655          if ( callbacks[shtype] ) 
   1.656              callbacks[shtype](v, last_smfn, gmfn);
   1.657 @@ -2057,12 +1984,12 @@ int shadow_remove_all_mappings(struct vc
   1.658      };
   1.659  
   1.660      static unsigned int callback_mask = 
   1.661 -          1 << (PGC_SH_l1_32_shadow >> PGC_SH_type_shift)
   1.662 -        | 1 << (PGC_SH_fl1_32_shadow >> PGC_SH_type_shift)
   1.663 -        | 1 << (PGC_SH_l1_pae_shadow >> PGC_SH_type_shift)
   1.664 -        | 1 << (PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift)
   1.665 -        | 1 << (PGC_SH_l1_64_shadow >> PGC_SH_type_shift)
   1.666 -        | 1 << (PGC_SH_fl1_64_shadow >> PGC_SH_type_shift)
   1.667 +          1 << SH_type_l1_32_shadow
   1.668 +        | 1 << SH_type_fl1_32_shadow
   1.669 +        | 1 << SH_type_l1_pae_shadow
   1.670 +        | 1 << SH_type_fl1_pae_shadow
   1.671 +        | 1 << SH_type_l1_64_shadow
   1.672 +        | 1 << SH_type_fl1_64_shadow
   1.673          ;
   1.674  
   1.675      perfc_incrc(shadow_mappings);
   1.676 @@ -2106,34 +2033,34 @@ static int sh_remove_shadow_via_pointer(
   1.677  /* Follow this shadow's up-pointer, if it has one, and remove the reference
   1.678   * found there.  Returns 1 if that was the only reference to this shadow */
   1.679  {
   1.680 -    struct page_info *pg = mfn_to_page(smfn);
   1.681 +    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.682      mfn_t pmfn;
   1.683      void *vaddr;
   1.684      int rc;
   1.685  
   1.686 -    ASSERT((pg->count_info & PGC_SH_type_mask) > 0);
   1.687 -    ASSERT((pg->count_info & PGC_SH_type_mask) < PGC_SH_max_shadow);
   1.688 -    ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l2_32_shadow);
   1.689 -    ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l2_pae_shadow);
   1.690 -    ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l2h_pae_shadow);
   1.691 -    ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l4_64_shadow);
   1.692 +    ASSERT(sp->type > 0);
   1.693 +    ASSERT(sp->type < SH_type_max_shadow);
   1.694 +    ASSERT(sp->type != SH_type_l2_32_shadow);
   1.695 +    ASSERT(sp->type != SH_type_l2_pae_shadow);
   1.696 +    ASSERT(sp->type != SH_type_l2h_pae_shadow);
   1.697 +    ASSERT(sp->type != SH_type_l4_64_shadow);
   1.698      
   1.699 -    if (pg->up == 0) return 0;
   1.700 -    pmfn = _mfn(pg->up >> PAGE_SHIFT);
   1.701 +    if (sp->up == 0) return 0;
   1.702 +    pmfn = _mfn(sp->up >> PAGE_SHIFT);
   1.703      ASSERT(valid_mfn(pmfn));
   1.704      vaddr = sh_map_domain_page(pmfn);
   1.705      ASSERT(vaddr);
   1.706 -    vaddr += pg->up & (PAGE_SIZE-1);
   1.707 +    vaddr += sp->up & (PAGE_SIZE-1);
   1.708      ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
   1.709      
   1.710      /* Is this the only reference to this shadow? */
   1.711 -    rc = ((pg->count_info & PGC_SH_count_mask) == 1) ? 1 : 0;
   1.712 +    rc = (sp->count == 1) ? 1 : 0;
   1.713  
   1.714      /* Blank the offending entry */
   1.715 -    switch ((pg->count_info & PGC_SH_type_mask)) 
   1.716 +    switch (sp->type) 
   1.717      {
   1.718 -    case PGC_SH_l1_32_shadow:
   1.719 -    case PGC_SH_l2_32_shadow:
   1.720 +    case SH_type_l1_32_shadow:
   1.721 +    case SH_type_l2_32_shadow:
   1.722  #if CONFIG_PAGING_LEVELS == 2
   1.723          SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,2,2)(v, vaddr, pmfn);
   1.724  #else
   1.725 @@ -2141,16 +2068,16 @@ static int sh_remove_shadow_via_pointer(
   1.726  #endif
   1.727          break;
   1.728  #if CONFIG_PAGING_LEVELS >=3
   1.729 -    case PGC_SH_l1_pae_shadow:
   1.730 -    case PGC_SH_l2_pae_shadow:
   1.731 -    case PGC_SH_l2h_pae_shadow:
   1.732 +    case SH_type_l1_pae_shadow:
   1.733 +    case SH_type_l2_pae_shadow:
   1.734 +    case SH_type_l2h_pae_shadow:
   1.735          SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,3)(v, vaddr, pmfn);
   1.736          break;
   1.737  #if CONFIG_PAGING_LEVELS >= 4
   1.738 -    case PGC_SH_l1_64_shadow:
   1.739 -    case PGC_SH_l2_64_shadow:
   1.740 -    case PGC_SH_l3_64_shadow:
   1.741 -    case PGC_SH_l4_64_shadow:
   1.742 +    case SH_type_l1_64_shadow:
   1.743 +    case SH_type_l2_64_shadow:
   1.744 +    case SH_type_l3_64_shadow:
   1.745 +    case SH_type_l4_64_shadow:
   1.746          SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
   1.747          break;
   1.748  #endif
   1.749 @@ -2219,18 +2146,18 @@ void sh_remove_shadows(struct vcpu *v, m
   1.750      /* Another lookup table, for choosing which mask to use */
   1.751      static unsigned int masks[16] = {
   1.752          0, /* none    */
   1.753 -        1 << (PGC_SH_l2_32_shadow >> PGC_SH_type_shift), /* l1_32   */
   1.754 +        1 << SH_type_l2_32_shadow, /* l1_32   */
   1.755          0, /* fl1_32  */
   1.756          0, /* l2_32   */
   1.757 -        ((1 << (PGC_SH_l2h_pae_shadow >> PGC_SH_type_shift))
   1.758 -         | (1 << (PGC_SH_l2_pae_shadow >> PGC_SH_type_shift))), /* l1_pae  */
   1.759 +        ((1 << SH_type_l2h_pae_shadow)
   1.760 +         | (1 << SH_type_l2_pae_shadow)), /* l1_pae  */
   1.761          0, /* fl1_pae */
   1.762          0, /* l2_pae  */
   1.763          0, /* l2h_pae  */
   1.764 -        1 << (PGC_SH_l2_64_shadow >> PGC_SH_type_shift), /* l1_64   */
   1.765 +        1 << SH_type_l2_64_shadow, /* l1_64   */
   1.766          0, /* fl1_64  */
   1.767 -        1 << (PGC_SH_l3_64_shadow >> PGC_SH_type_shift), /* l2_64   */
   1.768 -        1 << (PGC_SH_l4_64_shadow >> PGC_SH_type_shift), /* l3_64   */
   1.769 +        1 << SH_type_l3_64_shadow, /* l2_64   */
   1.770 +        1 << SH_type_l4_64_shadow, /* l3_64   */
   1.771          0, /* l4_64   */
   1.772          0, /* p2m     */
   1.773          0  /* unused  */
   1.774 @@ -2257,31 +2184,31 @@ void sh_remove_shadows(struct vcpu *v, m
   1.775       * call will remove at most one shadow, and terminate immediately when
   1.776       * it does remove it, so we never walk the hash after doing a deletion.  */
   1.777  #define DO_UNSHADOW(_type) do {                                 \
   1.778 -    t = (_type) >> PGC_SH_type_shift;                           \
   1.779 +    t = (_type);                                                \
   1.780      smfn = shadow_hash_lookup(v, mfn_x(gmfn), t);               \
   1.781      if ( !sh_remove_shadow_via_pointer(v, smfn) && !fast )      \
   1.782          hash_foreach(v, masks[t], callbacks, smfn);             \
   1.783  } while (0)
   1.784  
   1.785      /* Top-level shadows need to be unpinned */
   1.786 -#define DO_UNPIN(_type) do {                                            \
   1.787 -    t = (_type) >> PGC_SH_type_shift;                                   \
   1.788 -    smfn = shadow_hash_lookup(v, mfn_x(gmfn), t);                       \
   1.789 -    if ( mfn_to_page(smfn)->count_info & PGC_SH_pinned )                \
   1.790 -        sh_unpin(v, smfn);                                              \
   1.791 +#define DO_UNPIN(_type) do {                            \
   1.792 +    t = (_type);                                        \
   1.793 +    smfn = shadow_hash_lookup(v, mfn_x(gmfn), t);       \
   1.794 +    if ( mfn_to_shadow_page(smfn)->pinned )             \
   1.795 +        sh_unpin(v, smfn);                              \
   1.796  } while (0)
   1.797  
   1.798 -    if ( sh_flags & SHF_L1_32 )   DO_UNSHADOW(PGC_SH_l1_32_shadow);
   1.799 -    if ( sh_flags & SHF_L2_32 )   DO_UNPIN(PGC_SH_l2_32_shadow);
   1.800 +    if ( sh_flags & SHF_L1_32 )   DO_UNSHADOW(SH_type_l1_32_shadow);
   1.801 +    if ( sh_flags & SHF_L2_32 )   DO_UNPIN(SH_type_l2_32_shadow);
   1.802  #if CONFIG_PAGING_LEVELS >= 3
   1.803 -    if ( sh_flags & SHF_L1_PAE )  DO_UNSHADOW(PGC_SH_l1_pae_shadow);
   1.804 -    if ( sh_flags & SHF_L2_PAE )  DO_UNPIN(PGC_SH_l2_pae_shadow);
   1.805 -    if ( sh_flags & SHF_L2H_PAE ) DO_UNPIN(PGC_SH_l2h_pae_shadow);
   1.806 +    if ( sh_flags & SHF_L1_PAE )  DO_UNSHADOW(SH_type_l1_pae_shadow);
   1.807 +    if ( sh_flags & SHF_L2_PAE )  DO_UNPIN(SH_type_l2_pae_shadow);
   1.808 +    if ( sh_flags & SHF_L2H_PAE ) DO_UNPIN(SH_type_l2h_pae_shadow);
   1.809  #if CONFIG_PAGING_LEVELS >= 4
   1.810 -    if ( sh_flags & SHF_L1_64 )   DO_UNSHADOW(PGC_SH_l1_64_shadow);
   1.811 -    if ( sh_flags & SHF_L2_64 )   DO_UNSHADOW(PGC_SH_l2_64_shadow);
   1.812 -    if ( sh_flags & SHF_L3_64 )   DO_UNSHADOW(PGC_SH_l3_64_shadow);
   1.813 -    if ( sh_flags & SHF_L4_64 )   DO_UNPIN(PGC_SH_l4_64_shadow);
   1.814 +    if ( sh_flags & SHF_L1_64 )   DO_UNSHADOW(SH_type_l1_64_shadow);
   1.815 +    if ( sh_flags & SHF_L2_64 )   DO_UNSHADOW(SH_type_l2_64_shadow);
   1.816 +    if ( sh_flags & SHF_L3_64 )   DO_UNSHADOW(SH_type_l3_64_shadow);
   1.817 +    if ( sh_flags & SHF_L4_64 )   DO_UNPIN(SH_type_l4_64_shadow);
   1.818  #endif
   1.819  #endif
   1.820  
   1.821 @@ -2292,7 +2219,7 @@ void sh_remove_shadows(struct vcpu *v, m
   1.822      if ( !fast && (pg->count_info & PGC_page_table) )
   1.823      {
   1.824          SHADOW_ERROR("can't find all shadows of mfn %05lx "
   1.825 -                     "(shadow_flags=%08x)\n",
   1.826 +                     "(shadow_flags=%08lx)\n",
   1.827                        mfn_x(gmfn), pg->shadow_flags);
   1.828          if ( all ) 
   1.829              domain_crash(v->domain);
   1.830 @@ -3021,16 +2948,16 @@ static int shadow_log_dirty_op(
   1.831      if ( clean ) 
   1.832      {
   1.833          struct list_head *l, *t;
   1.834 -        struct page_info *pg;
   1.835 +        struct shadow_page_info *sp;
   1.836  
   1.837          /* Need to revoke write access to the domain's pages again. 
   1.838           * In future, we'll have a less heavy-handed approach to this, 
   1.839           * but for now, we just unshadow everything except Xen. */
   1.840          list_for_each_safe(l, t, &d->arch.shadow.toplevel_shadows)
   1.841          {
   1.842 -            pg = list_entry(l, struct page_info, list);
   1.843 +            sp = list_entry(l, struct shadow_page_info, list);
   1.844              if ( d->vcpu[0] != NULL )
   1.845 -                shadow_unhook_mappings(d->vcpu[0], page_to_mfn(pg));
   1.846 +                shadow_unhook_mappings(d->vcpu[0], shadow_page_to_mfn(sp));
   1.847          }
   1.848  
   1.849          d->arch.shadow.fault_count = 0;
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Tue Nov 21 18:09:23 2006 -0800
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Nov 23 17:40:28 2006 +0000
     2.3 @@ -100,13 +100,12 @@ static inline mfn_t
     2.4  get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
     2.5  /* Look for FL1 shadows in the hash table */
     2.6  {
     2.7 -    mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn),
     2.8 -                                     PGC_SH_fl1_shadow >> PGC_SH_type_shift);
     2.9 +    mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn), SH_type_fl1_shadow);
    2.10  
    2.11      if ( unlikely(shadow_mode_log_dirty(v->domain) && valid_mfn(smfn)) )
    2.12      {
    2.13 -        struct page_info *page = mfn_to_page(smfn);
    2.14 -        if ( !(page->count_info & PGC_SH_log_dirty) )
    2.15 +        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    2.16 +        if ( !(sp->logdirty) )
    2.17              shadow_convert_to_log_dirty(v, smfn);
    2.18      }
    2.19  
    2.20 @@ -117,14 +116,13 @@ static inline mfn_t
    2.21  get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
    2.22  /* Look for shadows in the hash table */
    2.23  {
    2.24 -    mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn),
    2.25 -                                     shadow_type >> PGC_SH_type_shift);
    2.26 +    mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
    2.27      perfc_incrc(shadow_get_shadow_status);
    2.28  
    2.29      if ( unlikely(shadow_mode_log_dirty(v->domain) && valid_mfn(smfn)) )
    2.30      {
    2.31 -        struct page_info *page = mfn_to_page(smfn);
    2.32 -        if ( !(page->count_info & PGC_SH_log_dirty) )
    2.33 +        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    2.34 +        if ( !(sp->logdirty) )
    2.35              shadow_convert_to_log_dirty(v, smfn);
    2.36      }
    2.37  
    2.38 @@ -136,16 +134,15 @@ set_fl1_shadow_status(struct vcpu *v, gf
    2.39  /* Put an FL1 shadow into the hash table */
    2.40  {
    2.41      SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
    2.42 -                   gfn_x(gfn), PGC_SH_fl1_shadow, mfn_x(smfn));
    2.43 +                   gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
    2.44  
    2.45      if ( unlikely(shadow_mode_log_dirty(v->domain)) )
    2.46          // mark this shadow as a log dirty shadow...
    2.47 -        set_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
    2.48 +        mfn_to_shadow_page(smfn)->logdirty = 1;
    2.49      else
    2.50 -        clear_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
    2.51 -
    2.52 -    shadow_hash_insert(v, gfn_x(gfn),
    2.53 -                        PGC_SH_fl1_shadow >> PGC_SH_type_shift, smfn);
    2.54 +        mfn_to_shadow_page(smfn)->logdirty = 0;
    2.55 +
    2.56 +    shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
    2.57  }
    2.58  
    2.59  static inline void 
    2.60 @@ -161,15 +158,14 @@ set_shadow_status(struct vcpu *v, mfn_t 
    2.61  
    2.62      if ( unlikely(shadow_mode_log_dirty(d)) )
    2.63          // mark this shadow as a log dirty shadow...
    2.64 -        set_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
    2.65 +        mfn_to_shadow_page(smfn)->logdirty = 1;
    2.66      else
    2.67 -        clear_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
    2.68 +        mfn_to_shadow_page(smfn)->logdirty = 0;
    2.69  
    2.70      res = get_page(mfn_to_page(gmfn), d);
    2.71      ASSERT(res == 1);
    2.72  
    2.73 -    shadow_hash_insert(v, mfn_x(gmfn), shadow_type >> PGC_SH_type_shift,
    2.74 -                        smfn);
    2.75 +    shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
    2.76  }
    2.77  
    2.78  static inline void 
    2.79 @@ -177,9 +173,8 @@ delete_fl1_shadow_status(struct vcpu *v,
    2.80  /* Remove a shadow from the hash table */
    2.81  {
    2.82      SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
    2.83 -                   gfn_x(gfn), PGC_SH_fl1_shadow, mfn_x(smfn));
    2.84 -    shadow_hash_delete(v, gfn_x(gfn),
    2.85 -                        PGC_SH_fl1_shadow >> PGC_SH_type_shift, smfn);
    2.86 +                   gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
    2.87 +    shadow_hash_delete(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
    2.88  }
    2.89  
    2.90  static inline void 
    2.91 @@ -189,8 +184,7 @@ delete_shadow_status(struct vcpu *v, mfn
    2.92      SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n",
    2.93                     v->domain->domain_id, v->vcpu_id,
    2.94                     mfn_x(gmfn), shadow_type, mfn_x(smfn));
    2.95 -    shadow_hash_delete(v, mfn_x(gmfn),
    2.96 -                        shadow_type >> PGC_SH_type_shift, smfn);
    2.97 +    shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
    2.98      put_page(mfn_to_page(gmfn));
    2.99  }
   2.100  
   2.101 @@ -394,27 +388,27 @@ static void sh_audit_gw(struct vcpu *v, 
   2.102  #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
   2.103      if ( valid_mfn(gw->l4mfn)
   2.104           && valid_mfn((smfn = get_shadow_status(v, gw->l4mfn, 
   2.105 -                                                PGC_SH_l4_shadow))) )
   2.106 +                                                SH_type_l4_shadow))) )
   2.107          (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
   2.108      if ( valid_mfn(gw->l3mfn)
   2.109           && valid_mfn((smfn = get_shadow_status(v, gw->l3mfn, 
   2.110 -                                                PGC_SH_l3_shadow))) )
   2.111 +                                                SH_type_l3_shadow))) )
   2.112          (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
   2.113  #endif /* PAE or 64... */
   2.114      if ( valid_mfn(gw->l2mfn) )
   2.115      {
   2.116          if ( valid_mfn((smfn = get_shadow_status(v, gw->l2mfn, 
   2.117 -                                                 PGC_SH_l2_shadow))) )
   2.118 +                                                 SH_type_l2_shadow))) )
   2.119              (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
   2.120  #if GUEST_PAGING_LEVELS == 3
   2.121          if ( valid_mfn((smfn = get_shadow_status(v, gw->l2mfn, 
   2.122 -                                                 PGC_SH_l2h_shadow))) )
   2.123 +                                                 SH_type_l2h_shadow))) )
   2.124              (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
   2.125  #endif
   2.126      }
   2.127      if ( valid_mfn(gw->l1mfn)
   2.128           && valid_mfn((smfn = get_shadow_status(v, gw->l1mfn, 
   2.129 -                                                PGC_SH_l1_shadow))) )
   2.130 +                                                SH_type_l1_shadow))) )
   2.131          (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
   2.132      else if ( gw->l2e
   2.133                && (guest_l2e_get_flags(*gw->l2e) & _PAGE_PSE)
   2.134 @@ -1193,14 +1187,12 @@ static inline void increment_ptr_to_gues
   2.135  }
   2.136  
   2.137  /* All kinds of l1: touch all entries */
   2.138 -#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)       \
   2.139 +#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)        \
   2.140  do {                                                                    \
   2.141      int _i;                                                             \
   2.142      shadow_l1e_t *_sp = map_shadow_page((_sl1mfn));                     \
   2.143 -    ASSERT((mfn_to_page(_sl1mfn)->count_info & PGC_SH_type_mask)       \
   2.144 -           == PGC_SH_l1_shadow                                         \
   2.145 -           || (mfn_to_page(_sl1mfn)->count_info & PGC_SH_type_mask)    \
   2.146 -           == PGC_SH_fl1_shadow);                                      \
   2.147 +    ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow       \
   2.148 +           || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \
   2.149      for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
   2.150      {                                                                   \
   2.151          (_sl1e) = _sp + _i;                                             \
   2.152 @@ -1214,18 +1206,18 @@ do {                                    
   2.153  
   2.154  /* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */
   2.155  #if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
   2.156 -#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done,  _code)       \
   2.157 +#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done,  _code)        \
   2.158  do {                                                                    \
   2.159      int __done = 0;                                                     \
   2.160 -    _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p,                         \
   2.161 +    _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p,                          \
   2.162                           ({ (__done = _done); }), _code);               \
   2.163      _sl1mfn = _mfn(mfn_x(_sl1mfn) + 1);                                 \
   2.164      if ( !__done )                                                      \
   2.165 -        _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p,                     \
   2.166 +        _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p,                      \
   2.167                               ({ (__done = _done); }), _code);           \
   2.168  } while (0)
   2.169  #else /* Everything else; l1 shadows are only one page */
   2.170 -#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)        \
   2.171 +#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)         \
   2.172         _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)
   2.173  #endif
   2.174      
   2.175 @@ -1233,11 +1225,10 @@ do {                                    
   2.176  #if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
   2.177  
   2.178  /* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
   2.179 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)    \
   2.180 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)     \
   2.181  do {                                                                      \
   2.182      int _i, _j, __done = 0;                                               \
   2.183 -    ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask)         \
   2.184 -           == PGC_SH_l2_32_shadow);                                      \
   2.185 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
   2.186      for ( _j = 0; _j < 4 && !__done; _j++ )                               \
   2.187      {                                                                     \
   2.188          shadow_l2e_t *_sp = map_shadow_page(_sl2mfn);                     \
   2.189 @@ -1260,12 +1251,11 @@ do {                                    
   2.190  #elif GUEST_PAGING_LEVELS == 2
   2.191  
   2.192  /* 32-bit on 32-bit: avoid Xen entries */
   2.193 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)     \
   2.194 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)      \
   2.195  do {                                                                       \
   2.196      int _i;                                                                \
   2.197      shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
   2.198 -    ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask)          \
   2.199 -           == PGC_SH_l2_32_shadow);                                       \
   2.200 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);     \
   2.201      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
   2.202          if ( (!(_xen))                                                     \
   2.203               ||                                                            \
   2.204 @@ -1283,18 +1273,15 @@ do {                                    
   2.205  #elif GUEST_PAGING_LEVELS == 3
   2.206  
   2.207  /* PAE: if it's an l2h, don't touch Xen mappings */
   2.208 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)     \
   2.209 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)      \
   2.210  do {                                                                       \
   2.211      int _i;                                                                \
   2.212      shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
   2.213 -    ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask)          \
   2.214 -           == PGC_SH_l2_pae_shadow                                        \
   2.215 -           || (mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask)       \
   2.216 -           == PGC_SH_l2h_pae_shadow);                                     \
   2.217 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
   2.218 +           || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
   2.219      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
   2.220          if ( (!(_xen))                                                     \
   2.221 -             || ((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask)    \
   2.222 -                 != PGC_SH_l2h_pae_shadow)                                \
   2.223 +             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_pae_shadow\
   2.224               || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES))                  \
   2.225                   < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
   2.226          {                                                                  \
   2.227 @@ -1310,12 +1297,11 @@ do {                                    
   2.228  #else 
   2.229  
   2.230  /* 64-bit l2: touch all entries */
   2.231 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)  \
   2.232 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)   \
   2.233  do {                                                                    \
   2.234      int _i;                                                             \
   2.235      shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                     \
   2.236 -    ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask)       \
   2.237 -           == PGC_SH_l2_64_shadow);                                    \
   2.238 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow);  \
   2.239      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )              \
   2.240      {                                                                   \
   2.241          (_sl2e) = _sp + _i;                                             \
   2.242 @@ -1332,12 +1318,11 @@ do {                                    
   2.243  #if GUEST_PAGING_LEVELS == 4
   2.244  
   2.245  /* 64-bit l3: touch all entries */
   2.246 -#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code)        \
   2.247 +#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code)         \
   2.248  do {                                                                    \
   2.249      int _i;                                                             \
   2.250      shadow_l3e_t *_sp = map_shadow_page((_sl3mfn));                     \
   2.251 -    ASSERT((mfn_to_page(_sl3mfn)->count_info & PGC_SH_type_mask)       \
   2.252 -           == PGC_SH_l3_64_shadow);                                    \
   2.253 +    ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \
   2.254      for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
   2.255      {                                                                   \
   2.256          (_sl3e) = _sp + _i;                                             \
   2.257 @@ -1350,12 +1335,11 @@ do {                                    
   2.258  } while (0)
   2.259  
   2.260  /* 64-bit l4: avoid Xen mappings */
   2.261 -#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code)  \
   2.262 +#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code)   \
   2.263  do {                                                                    \
   2.264      int _i;                                                             \
   2.265      shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
   2.266 -    ASSERT((mfn_to_page(_sl4mfn)->count_info & PGC_SH_type_mask)       \
   2.267 -           == PGC_SH_l4_64_shadow);                                    \
   2.268 +    ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
   2.269      for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
   2.270      {                                                                   \
   2.271          if ( (!(_xen)) || is_guest_l4_slot(_i) )                        \
   2.272 @@ -1556,12 +1540,12 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
   2.273      SHADOW_DEBUG(MAKE_SHADOW, "(%05lx, %u)=>%05lx\n",
   2.274                    mfn_x(gmfn), shadow_type, mfn_x(smfn));
   2.275  
   2.276 -    if ( shadow_type != PGC_SH_l2_32_shadow 
   2.277 -         && shadow_type != PGC_SH_l2_pae_shadow 
   2.278 -         && shadow_type != PGC_SH_l2h_pae_shadow 
   2.279 -         && shadow_type != PGC_SH_l4_64_shadow )
   2.280 +    if ( shadow_type != SH_type_l2_32_shadow 
   2.281 +         && shadow_type != SH_type_l2_pae_shadow 
   2.282 +         && shadow_type != SH_type_l2h_pae_shadow 
   2.283 +         && shadow_type != SH_type_l4_64_shadow )
   2.284          /* Lower-level shadow, not yet linked form a higher level */
   2.285 -        mfn_to_page(smfn)->up = 0;
   2.286 +        mfn_to_shadow_page(smfn)->up = 0;
   2.287  
   2.288      // Create the Xen mappings...
   2.289      if ( !shadow_mode_external(v->domain) )
   2.290 @@ -1569,15 +1553,15 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
   2.291          switch (shadow_type) 
   2.292          {
   2.293  #if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4
   2.294 -        case PGC_SH_l4_shadow:
   2.295 +        case SH_type_l4_shadow:
   2.296              sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
   2.297  #endif
   2.298  #if CONFIG_PAGING_LEVELS == 3 && GUEST_PAGING_LEVELS == 3
   2.299 -        case PGC_SH_l2h_shadow:
   2.300 +        case SH_type_l2h_shadow:
   2.301              sh_install_xen_entries_in_l2h(v, smfn); break;
   2.302  #endif
   2.303  #if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
   2.304 -        case PGC_SH_l2_shadow:
   2.305 +        case SH_type_l2_shadow:
   2.306              sh_install_xen_entries_in_l2(v, gmfn, smfn); break;
   2.307  #endif
   2.308          default: /* Do nothing */ break;
   2.309 @@ -1594,7 +1578,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
   2.310  static mfn_t
   2.311  make_fl1_shadow(struct vcpu *v, gfn_t gfn)
   2.312  {
   2.313 -    mfn_t smfn = shadow_alloc(v->domain, PGC_SH_fl1_shadow,
   2.314 +    mfn_t smfn = shadow_alloc(v->domain, SH_type_fl1_shadow,
   2.315                                 (unsigned long) gfn_x(gfn));
   2.316  
   2.317      SHADOW_DEBUG(MAKE_SHADOW, "(%" SH_PRI_gfn ")=>%" SH_PRI_mfn "\n",
   2.318 @@ -1616,7 +1600,7 @@ sh_make_monitor_table(struct vcpu *v)
   2.319      {
   2.320          struct domain *d = v->domain;
   2.321          mfn_t m4mfn;
   2.322 -        m4mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
   2.323 +        m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
   2.324          sh_install_xen_entries_in_l4(v, m4mfn, m4mfn);
   2.325          /* Remember the level of this table */
   2.326          mfn_to_page(m4mfn)->shadow_flags = 4;
   2.327 @@ -1626,7 +1610,7 @@ sh_make_monitor_table(struct vcpu *v)
   2.328          {
   2.329              mfn_t m3mfn; 
   2.330              l4_pgentry_t *l4e;
   2.331 -            m3mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
   2.332 +            m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
   2.333              mfn_to_page(m3mfn)->shadow_flags = 3;
   2.334              l4e = sh_map_domain_page(m4mfn);
   2.335              l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
   2.336 @@ -1645,13 +1629,13 @@ sh_make_monitor_table(struct vcpu *v)
   2.337          l2_pgentry_t *l2e;
   2.338          int i;
   2.339  
   2.340 -        m3mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
   2.341 +        m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
   2.342          /* Remember the level of this table */
   2.343          mfn_to_page(m3mfn)->shadow_flags = 3;
   2.344  
   2.345          // Install a monitor l2 table in slot 3 of the l3 table.
   2.346          // This is used for all Xen entries, including linear maps
   2.347 -        m2mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
   2.348 +        m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
   2.349          mfn_to_page(m2mfn)->shadow_flags = 2;
   2.350          l3e = sh_map_domain_page(m3mfn);
   2.351          l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
   2.352 @@ -1675,7 +1659,7 @@ sh_make_monitor_table(struct vcpu *v)
   2.353      {
   2.354          struct domain *d = v->domain;
   2.355          mfn_t m2mfn;
   2.356 -        m2mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
   2.357 +        m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
   2.358          sh_install_xen_entries_in_l2(v, m2mfn, m2mfn);
   2.359          /* Remember the level of this table */
   2.360          mfn_to_page(m2mfn)->shadow_flags = 2;
   2.361 @@ -1732,11 +1716,11 @@ static shadow_l3e_t * shadow_get_and_cre
   2.362          int r;
   2.363          shadow_l4e_t new_sl4e;
   2.364          /* No l3 shadow installed: find and install it. */
   2.365 -        *sl3mfn = get_shadow_status(v, gw->l3mfn, PGC_SH_l3_shadow);
   2.366 +        *sl3mfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow);
   2.367          if ( !valid_mfn(*sl3mfn) ) 
   2.368          {
   2.369              /* No l3 shadow of this page exists at all: make one. */
   2.370 -            *sl3mfn = sh_make_shadow(v, gw->l3mfn, PGC_SH_l3_shadow);
   2.371 +            *sl3mfn = sh_make_shadow(v, gw->l3mfn, SH_type_l3_shadow);
   2.372          }
   2.373          /* Install the new sl3 table in the sl4e */
   2.374          l4e_propagate_from_guest(v, gw->l4e, gw->l4mfn, 
   2.375 @@ -1772,11 +1756,11 @@ static shadow_l2e_t * shadow_get_and_cre
   2.376          int r;
   2.377          shadow_l3e_t new_sl3e;
   2.378          /* No l2 shadow installed: find and install it. */
   2.379 -        *sl2mfn = get_shadow_status(v, gw->l2mfn, PGC_SH_l2_shadow);
   2.380 +        *sl2mfn = get_shadow_status(v, gw->l2mfn, SH_type_l2_shadow);
   2.381          if ( !valid_mfn(*sl2mfn) ) 
   2.382          {
   2.383              /* No l2 shadow of this page exists at all: make one. */
   2.384 -            *sl2mfn = sh_make_shadow(v, gw->l2mfn, PGC_SH_l2_shadow);
   2.385 +            *sl2mfn = sh_make_shadow(v, gw->l2mfn, SH_type_l2_shadow);
   2.386          }
   2.387          /* Install the new sl2 table in the sl3e */
   2.388          l3e_propagate_from_guest(v, gw->l3e, gw->l3mfn, 
   2.389 @@ -1852,11 +1836,11 @@ static shadow_l1e_t * shadow_get_and_cre
   2.390          {
   2.391              /* Shadowing an actual guest l1 table */
   2.392              if ( !valid_mfn(gw->l2mfn) ) return NULL; /* No guest page. */
   2.393 -            *sl1mfn = get_shadow_status(v, gw->l1mfn, PGC_SH_l1_shadow);
   2.394 +            *sl1mfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow);
   2.395              if ( !valid_mfn(*sl1mfn) ) 
   2.396              {
   2.397                  /* No l1 shadow of this page exists at all: make one. */
   2.398 -                *sl1mfn = sh_make_shadow(v, gw->l1mfn, PGC_SH_l1_shadow);
   2.399 +                *sl1mfn = sh_make_shadow(v, gw->l1mfn, SH_type_l1_shadow);
   2.400              }
   2.401          }
   2.402          /* Install the new sl1 table in the sl2e */
   2.403 @@ -1891,20 +1875,20 @@ static shadow_l1e_t * shadow_get_and_cre
   2.404  void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
   2.405  {
   2.406      shadow_l4e_t *sl4e;
   2.407 -    u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
   2.408 +    u32 t = mfn_to_shadow_page(smfn)->type;
   2.409      mfn_t gmfn, sl4mfn;
   2.410      int xen_mappings;
   2.411  
   2.412      SHADOW_DEBUG(DESTROY_SHADOW,
   2.413                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   2.414 -    ASSERT(t == PGC_SH_l4_shadow);
   2.415 +    ASSERT(t == SH_type_l4_shadow);
   2.416  
   2.417      /* Record that the guest page isn't shadowed any more (in this type) */
   2.418 -    gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
   2.419 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.420      delete_shadow_status(v, gmfn, t, smfn);
   2.421      shadow_demote(v, gmfn, t);
   2.422      /* Take this shadow off the list of root shadows */
   2.423 -    list_del_init(&mfn_to_page(smfn)->list);
   2.424 +    list_del_init(&mfn_to_shadow_page(smfn)->list);
   2.425  
   2.426      /* Decrement refcounts of all the old entries */
   2.427      xen_mappings = (!shadow_mode_external(v->domain));
   2.428 @@ -1925,15 +1909,15 @@ void sh_destroy_l4_shadow(struct vcpu *v
   2.429  void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
   2.430  {
   2.431      shadow_l3e_t *sl3e;
   2.432 -    u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
   2.433 +    u32 t = mfn_to_shadow_page(smfn)->type;
   2.434      mfn_t gmfn, sl3mfn;
   2.435  
   2.436      SHADOW_DEBUG(DESTROY_SHADOW,
   2.437                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   2.438 -    ASSERT(t == PGC_SH_l3_shadow);
   2.439 +    ASSERT(t == SH_type_l3_shadow);
   2.440  
   2.441      /* Record that the guest page isn't shadowed any more (in this type) */
   2.442 -    gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
   2.443 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.444      delete_shadow_status(v, gmfn, t, smfn);
   2.445      shadow_demote(v, gmfn, t);
   2.446  
   2.447 @@ -1955,22 +1939,22 @@ void sh_destroy_l3_shadow(struct vcpu *v
   2.448  void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
   2.449  {
   2.450      shadow_l2e_t *sl2e;
   2.451 -    u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
   2.452 +    u32 t = mfn_to_shadow_page(smfn)->type;
   2.453      mfn_t gmfn, sl2mfn;
   2.454      int xen_mappings;
   2.455  
   2.456      SHADOW_DEBUG(DESTROY_SHADOW,
   2.457                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   2.458 -    ASSERT(t == PGC_SH_l2_shadow 
   2.459 -           || t == PGC_SH_l2h_pae_shadow);
   2.460 +    ASSERT(t == SH_type_l2_shadow 
   2.461 +           || t == SH_type_l2h_pae_shadow);
   2.462  
   2.463      /* Record that the guest page isn't shadowed any more (in this type) */
   2.464 -    gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
   2.465 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.466      delete_shadow_status(v, gmfn, t, smfn);
   2.467      shadow_demote(v, gmfn, t);
   2.468  #if (GUEST_PAGING_LEVELS == 2) || (GUEST_PAGING_LEVELS == 3)
   2.469      /* Take this shadow off the list of root shadows */
   2.470 -    list_del_init(&mfn_to_page(smfn)->list);
   2.471 +    list_del_init(&mfn_to_shadow_page(smfn)->list);
   2.472  #endif
   2.473  
   2.474      /* Decrement refcounts of all the old entries */
   2.475 @@ -1978,7 +1962,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
   2.476      xen_mappings = (!shadow_mode_external(v->domain) &&
   2.477                      ((GUEST_PAGING_LEVELS == 2) ||
   2.478                       ((GUEST_PAGING_LEVELS == 3) &&
   2.479 -                      (t == PGC_SH_l2h_pae_shadow))));
   2.480 +                      (t == SH_type_l2h_pae_shadow))));
   2.481      SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
   2.482          if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT ) 
   2.483              sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
   2.484 @@ -1994,21 +1978,21 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.485  {
   2.486      struct domain *d = v->domain;
   2.487      shadow_l1e_t *sl1e;
   2.488 -    u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
   2.489 +    u32 t = mfn_to_shadow_page(smfn)->type;
   2.490  
   2.491      SHADOW_DEBUG(DESTROY_SHADOW,
   2.492                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   2.493 -    ASSERT(t == PGC_SH_l1_shadow || t == PGC_SH_fl1_shadow);
   2.494 +    ASSERT(t == SH_type_l1_shadow || t == SH_type_fl1_shadow);
   2.495  
   2.496      /* Record that the guest page isn't shadowed any more (in this type) */
   2.497 -    if ( t == PGC_SH_fl1_shadow )
   2.498 +    if ( t == SH_type_fl1_shadow )
   2.499      {
   2.500 -        gfn_t gfn = _gfn(mfn_to_page(smfn)->u.inuse.type_info);
   2.501 +        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->backpointer);
   2.502          delete_fl1_shadow_status(v, gfn, smfn);
   2.503      }
   2.504      else 
   2.505      {
   2.506 -        mfn_t gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
   2.507 +        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   2.508          delete_shadow_status(v, gmfn, t, smfn);
   2.509          shadow_demote(v, gmfn, t);
   2.510      }
   2.511 @@ -2032,8 +2016,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.512  void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
   2.513  {
   2.514      struct domain *d = v->domain;
   2.515 -    ASSERT((mfn_to_page(mmfn)->count_info & PGC_SH_type_mask)
   2.516 -           == PGC_SH_monitor_table);
   2.517 +    ASSERT(mfn_to_shadow_page(mmfn)->type == SH_type_monitor_table);
   2.518  
   2.519  #if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
   2.520      /* Need to destroy the l3 monitor page in slot 0 too */
   2.521 @@ -2129,7 +2112,7 @@ static int validate_gl4e(struct vcpu *v,
   2.522          gfn_t gl3gfn = guest_l4e_get_gfn(*new_gl4e);
   2.523          mfn_t gl3mfn = vcpu_gfn_to_mfn(v, gl3gfn);
   2.524          if ( valid_mfn(gl3mfn) )
   2.525 -            sl3mfn = get_shadow_status(v, gl3mfn, PGC_SH_l3_shadow);
   2.526 +            sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
   2.527          else
   2.528              result |= SHADOW_SET_ERROR;
   2.529      }
   2.530 @@ -2181,7 +2164,7 @@ static int validate_gl3e(struct vcpu *v,
   2.531          gfn_t gl2gfn = guest_l3e_get_gfn(*new_gl3e);
   2.532          mfn_t gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
   2.533          if ( valid_mfn(gl2mfn) )
   2.534 -            sl2mfn = get_shadow_status(v, gl2mfn, PGC_SH_l2_shadow);
   2.535 +            sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
   2.536          else
   2.537              result |= SHADOW_SET_ERROR;
   2.538      }
   2.539 @@ -2225,7 +2208,7 @@ static int validate_gl2e(struct vcpu *v,
   2.540          {
   2.541              mfn_t gl1mfn = vcpu_gfn_to_mfn(v, gl1gfn);
   2.542              if ( valid_mfn(gl1mfn) )
   2.543 -                sl1mfn = get_shadow_status(v, gl1mfn, PGC_SH_l1_shadow);
   2.544 +                sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
   2.545              else
   2.546                  result |= SHADOW_SET_ERROR;
   2.547          }
   2.548 @@ -2246,8 +2229,7 @@ static int validate_gl2e(struct vcpu *v,
   2.549  
   2.550  #if SHADOW_PAGING_LEVELS == 3
   2.551          reserved_xen_slot = 
   2.552 -            (((mfn_to_page(sl2mfn)->count_info & PGC_SH_type_mask)
   2.553 -              == PGC_SH_l2h_pae_shadow) &&
   2.554 +            ((mfn_to_shadow_page(sl2mfn)->type == SH_type_l2h_pae_shadow) &&
   2.555               (shadow_index 
   2.556                >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
   2.557  #else /* SHADOW_PAGING_LEVELS == 2 */
   2.558 @@ -2365,7 +2347,7 @@ sh_map_and_validate_gl4e(struct vcpu *v,
   2.559  {
   2.560  #if GUEST_PAGING_LEVELS >= 4
   2.561      return sh_map_and_validate(v, gl4mfn, new_gl4p, size, 
   2.562 -                                PGC_SH_l4_shadow, 
   2.563 +                                SH_type_l4_shadow, 
   2.564                                  shadow_l4_index, 
   2.565                                  validate_gl4e);
   2.566  #else // ! GUEST_PAGING_LEVELS >= 4
   2.567 @@ -2381,7 +2363,7 @@ sh_map_and_validate_gl3e(struct vcpu *v,
   2.568  {
   2.569  #if GUEST_PAGING_LEVELS >= 4
   2.570      return sh_map_and_validate(v, gl3mfn, new_gl3p, size, 
   2.571 -                                PGC_SH_l3_shadow, 
   2.572 +                                SH_type_l3_shadow, 
   2.573                                  shadow_l3_index, 
   2.574                                  validate_gl3e);
   2.575  #else // ! GUEST_PAGING_LEVELS >= 4
   2.576 @@ -2396,7 +2378,7 @@ sh_map_and_validate_gl2e(struct vcpu *v,
   2.577                            void *new_gl2p, u32 size)
   2.578  {
   2.579      return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
   2.580 -                                PGC_SH_l2_shadow, 
   2.581 +                                SH_type_l2_shadow, 
   2.582                                  shadow_l2_index, 
   2.583                                  validate_gl2e);
   2.584  }
   2.585 @@ -2407,7 +2389,7 @@ sh_map_and_validate_gl2he(struct vcpu *v
   2.586  {
   2.587  #if GUEST_PAGING_LEVELS == 3
   2.588      return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
   2.589 -                                PGC_SH_l2h_shadow, 
   2.590 +                                SH_type_l2h_shadow, 
   2.591                                  shadow_l2_index, 
   2.592                                  validate_gl2e);
   2.593  #else /* Non-PAE guests don't have different kinds of l2 table */
   2.594 @@ -2422,7 +2404,7 @@ sh_map_and_validate_gl1e(struct vcpu *v,
   2.595                            void *new_gl1p, u32 size)
   2.596  {
   2.597      return sh_map_and_validate(v, gl1mfn, new_gl1p, size, 
   2.598 -                                PGC_SH_l1_shadow, 
   2.599 +                                SH_type_l1_shadow, 
   2.600                                  shadow_l1_index, 
   2.601                                  validate_gl1e);
   2.602  }
   2.603 @@ -2923,8 +2905,8 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.604      // If so, then we'll need to flush the entire TLB (because that's
   2.605      // easier than invalidating all of the individual 4K pages).
   2.606      //
   2.607 -    if ( (mfn_to_page(shadow_l2e_get_mfn(sl2e))->count_info &
   2.608 -          PGC_SH_type_mask) == PGC_SH_fl1_shadow )
   2.609 +    if ( mfn_to_shadow_page(shadow_l2e_get_mfn(sl2e))->type
   2.610 +         == SH_type_fl1_shadow )
   2.611      {
   2.612          local_flush_tlb();
   2.613          return 0;
   2.614 @@ -3284,8 +3266,9 @@ sh_set_toplevel_shadow(struct vcpu *v,
   2.615      if ( valid_mfn(smfn) )
   2.616      {
   2.617          /* Pull this root shadow to the front of the list of roots. */
   2.618 -        list_del(&mfn_to_page(smfn)->list);
   2.619 -        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow.toplevel_shadows);
   2.620 +        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   2.621 +        list_del(&sp->list);
   2.622 +        list_add(&sp->list, &d->arch.shadow.toplevel_shadows);
   2.623      }
   2.624      else
   2.625      {
   2.626 @@ -3293,7 +3276,8 @@ sh_set_toplevel_shadow(struct vcpu *v,
   2.627          shadow_prealloc(d, SHADOW_MAX_ORDER); 
   2.628          /* Shadow the page. */
   2.629          smfn = sh_make_shadow(v, gmfn, root_type);
   2.630 -        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow.toplevel_shadows);
   2.631 +        list_add(&mfn_to_shadow_page(smfn)->list, 
   2.632 +                 &d->arch.shadow.toplevel_shadows);
   2.633      }
   2.634      ASSERT(valid_mfn(smfn));
   2.635      
   2.636 @@ -3444,7 +3428,7 @@ sh_update_cr3(struct vcpu *v)
   2.637  #if GUEST_PAGING_LEVELS == 2
   2.638      if ( shadow_remove_write_access(v, gmfn, 2, 0) != 0 )
   2.639          flush_tlb_mask(v->domain->domain_dirty_cpumask); 
   2.640 -    sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l2_shadow);
   2.641 +    sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
   2.642  #elif GUEST_PAGING_LEVELS == 3
   2.643      /* PAE guests have four shadow_table entries, based on the 
   2.644       * current values of the guest's four l3es. */
   2.645 @@ -3473,15 +3457,15 @@ sh_update_cr3(struct vcpu *v)
   2.646                  gl2gfn = guest_l3e_get_gfn(gl3e[i]);
   2.647                  gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
   2.648                  sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
   2.649 -                                       ? PGC_SH_l2h_shadow 
   2.650 -                                       : PGC_SH_l2_shadow);
   2.651 +                                       ? SH_type_l2h_shadow 
   2.652 +                                       : SH_type_l2_shadow);
   2.653              }
   2.654          }
   2.655      }
   2.656  #elif GUEST_PAGING_LEVELS == 4
   2.657      if ( shadow_remove_write_access(v, gmfn, 4, 0) != 0 )
   2.658          flush_tlb_mask(v->domain->domain_dirty_cpumask);
   2.659 -    sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l4_shadow);
   2.660 +    sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
   2.661  #else
   2.662  #error This should never happen 
   2.663  #endif
   2.664 @@ -3667,19 +3651,19 @@ int sh_remove_all_mappings(struct vcpu *
   2.665  void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
   2.666  /* Blank out a single shadow entry */
   2.667  {
   2.668 -    switch (mfn_to_page(smfn)->count_info & PGC_SH_type_mask) 
   2.669 +    switch ( mfn_to_shadow_page(smfn)->type )
   2.670      {
   2.671 -    case PGC_SH_l1_shadow:
   2.672 +    case SH_type_l1_shadow:
   2.673          shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
   2.674 -    case PGC_SH_l2_shadow:
   2.675 +    case SH_type_l2_shadow:
   2.676  #if GUEST_PAGING_LEVELS == 3
   2.677 -    case PGC_SH_l2h_shadow:
   2.678 +    case SH_type_l2h_shadow:
   2.679  #endif
   2.680          shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
   2.681  #if GUEST_PAGING_LEVELS >= 4
   2.682 -    case PGC_SH_l3_shadow:
   2.683 +    case SH_type_l3_shadow:
   2.684          shadow_set_l3e(v, ep, shadow_l3e_empty(), smfn); break;
   2.685 -    case PGC_SH_l4_shadow:
   2.686 +    case SH_type_l4_shadow:
   2.687          shadow_set_l4e(v, ep, shadow_l4e_empty(), smfn); break;
   2.688  #endif
   2.689      default: BUG(); /* Called with the wrong kind of shadow. */
   2.690 @@ -3703,7 +3687,7 @@ int sh_remove_l1_shadow(struct vcpu *v, 
   2.691               && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
   2.692          {
   2.693              shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
   2.694 -            if ( (mfn_to_page(sl1mfn)->count_info & PGC_SH_type_mask) == 0 )
   2.695 +            if ( mfn_to_shadow_page(sl1mfn)->type == 0 )
   2.696                  /* This breaks us cleanly out of the FOREACH macro */
   2.697                  done = 1;
   2.698          }
   2.699 @@ -3726,7 +3710,7 @@ int sh_remove_l2_shadow(struct vcpu *v, 
   2.700               && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
   2.701          {
   2.702              shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
   2.703 -            if ( (mfn_to_page(sl2mfn)->count_info & PGC_SH_type_mask) == 0 )
   2.704 +            if ( mfn_to_shadow_page(sl2mfn)->type == 0 )
   2.705                  /* This breaks us cleanly out of the FOREACH macro */
   2.706                  done = 1;
   2.707          }
   2.708 @@ -3748,7 +3732,7 @@ int sh_remove_l3_shadow(struct vcpu *v, 
   2.709               && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
   2.710          {
   2.711              shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
   2.712 -            if ( (mfn_to_page(sl3mfn)->count_info & PGC_SH_type_mask) == 0 )
   2.713 +            if ( mfn_to_shadow_page(sl3mfn)->type == 0 )
   2.714                  /* This breaks us cleanly out of the FOREACH macro */
   2.715                  done = 1;
   2.716          }
   2.717 @@ -3986,7 +3970,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
   2.718      int done = 0;
   2.719      
   2.720      /* Follow the backpointer */
   2.721 -    gl1mfn = _mfn(mfn_to_page(sl1mfn)->u.inuse.type_info);
   2.722 +    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
   2.723      gl1e = gp = sh_map_domain_page(gl1mfn);
   2.724      SHADOW_FOREACH_L1E(sl1mfn, sl1e, &gl1e, done, {
   2.725  
   2.726 @@ -4068,7 +4052,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
   2.727  #endif
   2.728  
   2.729      /* Follow the backpointer */
   2.730 -    gl2mfn = _mfn(mfn_to_page(sl2mfn)->u.inuse.type_info);
   2.731 +    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
   2.732      gl2e = gp = sh_map_domain_page(gl2mfn);
   2.733      SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, xen_mappings, {
   2.734  
   2.735 @@ -4083,7 +4067,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
   2.736              gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)  
   2.737                  ? get_fl1_shadow_status(v, gfn)
   2.738                  : get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl2mfn), 
   2.739 -                                    PGC_SH_l1_shadow);
   2.740 +                                    SH_type_l1_shadow);
   2.741              if ( mfn_x(gmfn) != mfn_x(mfn) )
   2.742                  AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
   2.743                             " (--> %" SH_PRI_mfn ")"
   2.744 @@ -4109,7 +4093,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
   2.745      int done = 0;
   2.746  
   2.747      /* Follow the backpointer */
   2.748 -    gl3mfn = _mfn(mfn_to_page(sl3mfn)->u.inuse.type_info);
   2.749 +    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->backpointer);
   2.750      gl3e = gp = sh_map_domain_page(gl3mfn);
   2.751      SHADOW_FOREACH_L3E(sl3mfn, sl3e, &gl3e, done, {
   2.752  
   2.753 @@ -4125,8 +4109,8 @@ int sh_audit_l3_table(struct vcpu *v, mf
   2.754                                       (GUEST_PAGING_LEVELS == 3 
   2.755                                        && !shadow_mode_external(v->domain)
   2.756                                        && (guest_index(gl3e) % 4) == 3)
   2.757 -                                     ? PGC_SH_l2h_pae_shadow
   2.758 -                                     : PGC_SH_l2_shadow);
   2.759 +                                     ? SH_type_l2h_pae_shadow
   2.760 +                                     : SH_type_l2_shadow);
   2.761              if ( mfn_x(gmfn) != mfn_x(mfn) )
   2.762                  AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn
   2.763                             " --> %" SH_PRI_mfn " != mfn %" SH_PRI_mfn,
   2.764 @@ -4148,7 +4132,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
   2.765      int xen_mappings = !shadow_mode_external(v->domain);
   2.766  
   2.767      /* Follow the backpointer */
   2.768 -    gl4mfn = _mfn(mfn_to_page(sl4mfn)->u.inuse.type_info);
   2.769 +    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
   2.770      gl4e = gp = sh_map_domain_page(gl4mfn);
   2.771      SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, xen_mappings,
   2.772      {
   2.773 @@ -4161,7 +4145,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
   2.774              gfn = guest_l4e_get_gfn(*gl4e);
   2.775              mfn = shadow_l4e_get_mfn(*sl4e);
   2.776              gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl4mfn), 
   2.777 -                                     PGC_SH_l3_shadow);
   2.778 +                                     SH_type_l3_shadow);
   2.779              if ( mfn_x(gmfn) != mfn_x(mfn) )
   2.780                  AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
   2.781                             " --> %" SH_PRI_mfn " != mfn %" SH_PRI_mfn,
     3.1 --- a/xen/arch/x86/mm/shadow/private.h	Tue Nov 21 18:09:23 2006 -0800
     3.2 +++ b/xen/arch/x86/mm/shadow/private.h	Thu Nov 23 17:40:28 2006 +0000
     3.3 @@ -129,6 +129,97 @@ extern void shadow_audit_p2m(struct doma
     3.4  #undef SHADOW_LEVELS
     3.5  #endif /* CONFIG_PAGING_LEVELS == 4 */
     3.6  
     3.7 +/******************************************************************************
     3.8 + * Page metadata for shadow pages.
     3.9 + */
    3.10 +
    3.11 +struct shadow_page_info
    3.12 +{
    3.13 +    union {
    3.14 +        /* When in use, guest page we're a shadow of */
    3.15 +        unsigned long backpointer;
    3.16 +        /* When free, order of the freelist we're on */
    3.17 +        unsigned int order;
    3.18 +    };
    3.19 +    union {
    3.20 +        /* When in use, next shadow in this hash chain */
    3.21 +        struct shadow_page_info *next_shadow;
    3.22 +        /* When free, TLB flush time when freed */
    3.23 +        u32 tlbflush_timestamp;
    3.24 +    };
    3.25 +    struct {
    3.26 +        unsigned int type:4;      /* What kind of shadow is this? */
    3.27 +        unsigned int pinned:1;    /* Is the shadow pinned? */
    3.28 +        unsigned int logdirty:1;  /* Was it made in log-dirty mode? */
    3.29 +        unsigned int count:26;    /* Reference count */
    3.30 +        u32 mbz;                  /* Must be zero: this is where the owner 
    3.31 +                                   * field lives in a non-shadow page */
    3.32 +    } __attribute__((packed));
    3.33 +    union {
    3.34 +        /* For unused shadow pages, a list of pages of this order; 
    3.35 +         * for top-level shadows, a list of other top-level shadows */
    3.36 +        struct list_head list;
    3.37 +        /* For lower-level shadows, a higher entry that points at us */
    3.38 +        paddr_t up;
    3.39 +    };
    3.40 +};
    3.41 +
    3.42 +/* The structure above *must* be the same size as a struct page_info
    3.43 + * from mm.h, since we'll be using the same space in the frametable. 
    3.44 + * Also, the mbz field must line up with the owner field of normal 
    3.45 + * pages, so they look properly like anonymous/xen pages. */
    3.46 +static inline void shadow_check_page_struct_offsets(void) {
    3.47 +    BUILD_BUG_ON(sizeof (struct shadow_page_info) 
    3.48 +                 != sizeof (struct page_info));
    3.49 +    BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz) 
    3.50 +                 != offsetof(struct page_info, u.inuse._domain));
    3.51 +};
    3.52 +
    3.53 +/* Shadow type codes */
    3.54 +#define SH_type_none           (0U) /* on the shadow free list */
    3.55 +#define SH_type_min_shadow     (1U)
    3.56 +#define SH_type_l1_32_shadow   (1U) /* shadowing a 32-bit L1 guest page */
    3.57 +#define SH_type_fl1_32_shadow  (2U) /* L1 shadow for a 32b 4M superpage */
    3.58 +#define SH_type_l2_32_shadow   (3U) /* shadowing a 32-bit L2 guest page */
    3.59 +#define SH_type_l1_pae_shadow  (4U) /* shadowing a pae L1 page */
    3.60 +#define SH_type_fl1_pae_shadow (5U) /* L1 shadow for pae 2M superpg */
    3.61 +#define SH_type_l2_pae_shadow  (6U) /* shadowing a pae L2-low page */
    3.62 +#define SH_type_l2h_pae_shadow (7U) /* shadowing a pae L2-high page */
    3.63 +#define SH_type_l1_64_shadow   (8U) /* shadowing a 64-bit L1 page */
    3.64 +#define SH_type_fl1_64_shadow  (9U) /* L1 shadow for 64-bit 2M superpg */
    3.65 +#define SH_type_l2_64_shadow  (10U) /* shadowing a 64-bit L2 page */
    3.66 +#define SH_type_l3_64_shadow  (11U) /* shadowing a 64-bit L3 page */
    3.67 +#define SH_type_l4_64_shadow  (12U) /* shadowing a 64-bit L4 page */
    3.68 +#define SH_type_max_shadow    (12U)
    3.69 +#define SH_type_p2m_table     (13U) /* in use as the p2m table */
    3.70 +#define SH_type_monitor_table (14U) /* in use as a monitor table */
    3.71 +#define SH_type_unused        (15U)
    3.72 +
    3.73 +/*
    3.74 + * Definitions for the shadow_flags field in page_info.
    3.75 + * These flags are stored on *guest* pages...
    3.76 + * Bits 1-13 are encodings for the shadow types.
    3.77 + */
    3.78 +#define SHF_page_type_mask \
    3.79 +    (((1u << (SH_type_max_shadow + 1u)) - 1u) - \
    3.80 +     ((1u << SH_type_min_shadow) - 1u))
    3.81 +
    3.82 +#define SHF_L1_32   (1u << SH_type_l1_32_shadow)
    3.83 +#define SHF_FL1_32  (1u << SH_type_fl1_32_shadow)
    3.84 +#define SHF_L2_32   (1u << SH_type_l2_32_shadow)
    3.85 +#define SHF_L1_PAE  (1u << SH_type_l1_pae_shadow)
    3.86 +#define SHF_FL1_PAE (1u << SH_type_fl1_pae_shadow)
    3.87 +#define SHF_L2_PAE  (1u << SH_type_l2_pae_shadow)
    3.88 +#define SHF_L2H_PAE (1u << SH_type_l2h_pae_shadow)
    3.89 +#define SHF_L1_64   (1u << SH_type_l1_64_shadow)
    3.90 +#define SHF_FL1_64  (1u << SH_type_fl1_64_shadow)
    3.91 +#define SHF_L2_64   (1u << SH_type_l2_64_shadow)
    3.92 +#define SHF_L3_64   (1u << SH_type_l3_64_shadow)
    3.93 +#define SHF_L4_64   (1u << SH_type_l4_64_shadow)
    3.94 +
    3.95 +/* Used for hysteresis when automatically unhooking mappings on fork/exit */
    3.96 +#define SHF_unhooked_mappings (1u<<31)
    3.97 +
    3.98  
    3.99  /******************************************************************************
   3.100   * Various function declarations 
   3.101 @@ -173,12 +264,14 @@ void sh_install_xen_entries_in_l2(struct
   3.102  // Override mfn_to_page from asm/page.h, which was #include'd above,
   3.103  // in order to make it work with our mfn type.
   3.104  #undef mfn_to_page
   3.105 -#define mfn_to_page(_mfn) (frame_table + mfn_x(_mfn))
   3.106 +#define mfn_to_page(_m) (frame_table + mfn_x(_m))
   3.107 +#define mfn_to_shadow_page(_m) ((struct shadow_page_info *)mfn_to_page(_m))
   3.108  
   3.109  // Override page_to_mfn from asm/page.h, which was #include'd above,
   3.110  // in order to make it work with our mfn type.
   3.111  #undef page_to_mfn
   3.112  #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
   3.113 +#define shadow_page_to_mfn(_spg) (page_to_mfn((struct page_info *)_spg))
   3.114  
   3.115  // Override mfn_valid from asm/page.h, which was #include'd above,
   3.116  // in order to make it work with our mfn type.
   3.117 @@ -189,28 +282,24 @@ void sh_install_xen_entries_in_l2(struct
   3.118  static inline void *
   3.119  sh_map_domain_page(mfn_t mfn)
   3.120  {
   3.121 -    /* XXX Using the monitor-table as a map will happen here  */
   3.122      return map_domain_page(mfn_x(mfn));
   3.123  }
   3.124  
   3.125  static inline void 
   3.126  sh_unmap_domain_page(void *p) 
   3.127  {
   3.128 -    /* XXX Using the monitor-table as a map will happen here  */
   3.129      unmap_domain_page(p);
   3.130  }
   3.131  
   3.132  static inline void *
   3.133  sh_map_domain_page_global(mfn_t mfn)
   3.134  {
   3.135 -    /* XXX Using the monitor-table as a map will happen here  */
   3.136      return map_domain_page_global(mfn_x(mfn));
   3.137  }
   3.138  
   3.139  static inline void 
   3.140  sh_unmap_domain_page_global(void *p) 
   3.141  {
   3.142 -    /* XXX Using the monitor-table as a map will happen here  */
   3.143      unmap_domain_page_global(p);
   3.144  }
   3.145  
   3.146 @@ -253,8 +342,7 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
   3.147  
   3.148  
   3.149  /**************************************************************************/
   3.150 -/* Shadow-page refcounting. See comment in shadow-common.c about the  
   3.151 - * use of struct page_info fields for shadow pages */
   3.152 +/* Shadow-page refcounting. */
   3.153  
   3.154  void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
   3.155  
   3.156 @@ -264,27 +352,26 @@ void sh_destroy_shadow(struct vcpu *v, m
   3.157  static inline void sh_get_ref(mfn_t smfn, paddr_t entry_pa)
   3.158  {
   3.159      u32 x, nx;
   3.160 -    struct page_info *page = mfn_to_page(smfn);
   3.161 +    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   3.162  
   3.163      ASSERT(mfn_valid(smfn));
   3.164  
   3.165 -    x = page->count_info & PGC_SH_count_mask;
   3.166 +    x = sp->count;
   3.167      nx = x + 1;
   3.168  
   3.169 -    if ( unlikely(nx & ~PGC_SH_count_mask) )
   3.170 +    if ( unlikely(nx >= 1U<<26) )
   3.171      {
   3.172          SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
   3.173 -                       page->u.inuse.type_info, mfn_x(smfn));
   3.174 +                       sp->backpointer, mfn_x(smfn));
   3.175          domain_crash_synchronous();
   3.176      }
   3.177      
   3.178      /* Guarded by the shadow lock, so no need for atomic update */
   3.179 -    page->count_info &= ~PGC_SH_count_mask;
   3.180 -    page->count_info |= nx;
   3.181 +    sp->count = nx;
   3.182  
   3.183      /* We remember the first shadow entry that points to each shadow. */
   3.184 -    if ( entry_pa != 0 && page->up == 0 ) 
   3.185 -        page->up = entry_pa;
   3.186 +    if ( entry_pa != 0 && sp->up == 0 ) 
   3.187 +        sp->up = entry_pa;
   3.188  }
   3.189  
   3.190  
   3.191 @@ -293,31 +380,27 @@ static inline void sh_get_ref(mfn_t smfn
   3.192  static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
   3.193  {
   3.194      u32 x, nx;
   3.195 -    struct page_info *page = mfn_to_page(smfn);
   3.196 +    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   3.197  
   3.198      ASSERT(mfn_valid(smfn));
   3.199 -    ASSERT(page_get_owner(page) == NULL);
   3.200 +    ASSERT(sp->mbz == 0);
   3.201  
   3.202      /* If this is the entry in the up-pointer, remove it */
   3.203 -    if ( entry_pa != 0 && page->up == entry_pa ) 
   3.204 -        page->up = 0;
   3.205 +    if ( entry_pa != 0 && sp->up == entry_pa ) 
   3.206 +        sp->up = 0;
   3.207  
   3.208 -    x = page->count_info & PGC_SH_count_mask;
   3.209 +    x = sp->count;
   3.210      nx = x - 1;
   3.211  
   3.212      if ( unlikely(x == 0) ) 
   3.213      {
   3.214 -        SHADOW_PRINTK("shadow ref underflow, smfn=%lx oc=%08x t=%" 
   3.215 -                       PRtype_info "\n",
   3.216 -                       mfn_x(smfn),
   3.217 -                       page->count_info & PGC_SH_count_mask,
   3.218 -                       page->u.inuse.type_info);
   3.219 +        SHADOW_PRINTK("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
   3.220 +                      mfn_x(smfn), sp->count, sp->type);
   3.221          domain_crash_synchronous();
   3.222      }
   3.223  
   3.224      /* Guarded by the shadow lock, so no need for atomic update */
   3.225 -    page->count_info &= ~PGC_SH_count_mask;
   3.226 -    page->count_info |= nx;
   3.227 +    sp->count = nx;
   3.228  
   3.229      if ( unlikely(nx == 0) ) 
   3.230          sh_destroy_shadow(v, smfn);
   3.231 @@ -327,27 +410,27 @@ static inline void sh_put_ref(struct vcp
   3.232  /* Pin a shadow page: take an extra refcount and set the pin bit. */
   3.233  static inline void sh_pin(mfn_t smfn)
   3.234  {
   3.235 -    struct page_info *page;
   3.236 +    struct shadow_page_info *sp;
   3.237      
   3.238      ASSERT(mfn_valid(smfn));
   3.239 -    page = mfn_to_page(smfn);
   3.240 -    if ( !(page->count_info & PGC_SH_pinned) ) 
   3.241 +    sp = mfn_to_shadow_page(smfn);
   3.242 +    if ( !(sp->pinned) ) 
   3.243      {
   3.244          sh_get_ref(smfn, 0);
   3.245 -        page->count_info |= PGC_SH_pinned;
   3.246 +        sp->pinned = 1;
   3.247      }
   3.248  }
   3.249  
   3.250  /* Unpin a shadow page: unset the pin bit and release the extra ref. */
   3.251  static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
   3.252  {
   3.253 -    struct page_info *page;
   3.254 +    struct shadow_page_info *sp;
   3.255      
   3.256      ASSERT(mfn_valid(smfn));
   3.257 -    page = mfn_to_page(smfn);
   3.258 -    if ( page->count_info & PGC_SH_pinned )
   3.259 +    sp = mfn_to_shadow_page(smfn);
   3.260 +    if ( sp->pinned )
   3.261      {
   3.262 -        page->count_info &= ~PGC_SH_pinned;
   3.263 +        sp->pinned = 0;
   3.264          sh_put_ref(v, smfn, 0);
   3.265      }
   3.266  }
     4.1 --- a/xen/arch/x86/mm/shadow/types.h	Tue Nov 21 18:09:23 2006 -0800
     4.2 +++ b/xen/arch/x86/mm/shadow/types.h	Thu Nov 23 17:40:28 2006 +0000
     4.3 @@ -281,9 +281,9 @@ static inline guest_l2e_t guest_l2e_from
     4.4  #define guest_l2_table_offset(a) l2_table_offset_32(a)
     4.5  
     4.6  /* The shadow types needed for the various levels. */
     4.7 -#define PGC_SH_l1_shadow  PGC_SH_l1_32_shadow
     4.8 -#define PGC_SH_l2_shadow  PGC_SH_l2_32_shadow
     4.9 -#define PGC_SH_fl1_shadow PGC_SH_fl1_32_shadow
    4.10 +#define SH_type_l1_shadow  SH_type_l1_32_shadow
    4.11 +#define SH_type_l2_shadow  SH_type_l2_32_shadow
    4.12 +#define SH_type_fl1_shadow SH_type_fl1_32_shadow
    4.13  
    4.14  #else /* GUEST_PAGING_LEVELS != 2 */
    4.15  
    4.16 @@ -381,16 +381,16 @@ static inline guest_l4e_t guest_l4e_from
    4.17  
    4.18  /* The shadow types needed for the various levels. */
    4.19  #if GUEST_PAGING_LEVELS == 3
    4.20 -#define PGC_SH_l1_shadow  PGC_SH_l1_pae_shadow
    4.21 -#define PGC_SH_fl1_shadow PGC_SH_fl1_pae_shadow
    4.22 -#define PGC_SH_l2_shadow  PGC_SH_l2_pae_shadow
    4.23 -#define PGC_SH_l2h_shadow PGC_SH_l2h_pae_shadow
    4.24 +#define SH_type_l1_shadow  SH_type_l1_pae_shadow
    4.25 +#define SH_type_fl1_shadow SH_type_fl1_pae_shadow
    4.26 +#define SH_type_l2_shadow  SH_type_l2_pae_shadow
    4.27 +#define SH_type_l2h_shadow SH_type_l2h_pae_shadow
    4.28  #else
    4.29 -#define PGC_SH_l1_shadow  PGC_SH_l1_64_shadow
    4.30 -#define PGC_SH_fl1_shadow PGC_SH_fl1_64_shadow
    4.31 -#define PGC_SH_l2_shadow  PGC_SH_l2_64_shadow
    4.32 -#define PGC_SH_l3_shadow  PGC_SH_l3_64_shadow
    4.33 -#define PGC_SH_l4_shadow  PGC_SH_l4_64_shadow
    4.34 +#define SH_type_l1_shadow  SH_type_l1_64_shadow
    4.35 +#define SH_type_fl1_shadow SH_type_fl1_64_shadow
    4.36 +#define SH_type_l2_shadow  SH_type_l2_64_shadow
    4.37 +#define SH_type_l3_shadow  SH_type_l3_64_shadow
    4.38 +#define SH_type_l4_shadow  SH_type_l4_64_shadow
    4.39  #endif
    4.40  
    4.41  #endif /* GUEST_PAGING_LEVELS != 2 */
     5.1 --- a/xen/include/asm-x86/mm.h	Tue Nov 21 18:09:23 2006 -0800
     5.2 +++ b/xen/include/asm-x86/mm.h	Thu Nov 23 17:40:28 2006 +0000
     5.3 @@ -20,11 +20,7 @@
     5.4  struct page_info
     5.5  {
     5.6      /* Each frame can be threaded onto a doubly-linked list. */
     5.7 -    union {
     5.8 -        struct list_head list;
     5.9 -        /* Shadow uses this field as an up-pointer in lower-level shadows */
    5.10 -        paddr_t up;
    5.11 -    };
    5.12 +    struct list_head list;
    5.13  
    5.14      /* Reference count and various PGC_xxx flags and fields. */
    5.15      u32 count_info;
    5.16 @@ -59,11 +55,11 @@ struct page_info
    5.17          u32 tlbflush_timestamp;
    5.18  
    5.19          /*
    5.20 -         * Guest pages with a shadow. This does not conflict with
    5.21 +         * Guest pages with a shadow.  This does not conflict with
    5.22           * tlbflush_timestamp since page table pages are explicitly not
    5.23           * tracked for TLB-flush avoidance when a guest runs in shadow mode.
    5.24           */
    5.25 -        u32 shadow_flags;
    5.26 +        unsigned long shadow_flags;
    5.27      };
    5.28  };
    5.29  
    5.30 @@ -103,38 +99,6 @@ struct page_info
    5.31   /* 29-bit count of references to this frame. */
    5.32  #define PGC_count_mask      ((1U<<29)-1)
    5.33  
    5.34 -/* shadow uses the count_info on shadow pages somewhat differently */
    5.35 -/* NB: please coordinate any changes here with the SHF's in shadow.h */
    5.36 -#define PGC_SH_none           (0U<<28) /* on the shadow free list */
    5.37 -#define PGC_SH_min_shadow     (1U<<28)
    5.38 -#define PGC_SH_l1_32_shadow   (1U<<28) /* shadowing a 32-bit L1 guest page */
    5.39 -#define PGC_SH_fl1_32_shadow  (2U<<28) /* L1 shadow for a 32b 4M superpage */
    5.40 -#define PGC_SH_l2_32_shadow   (3U<<28) /* shadowing a 32-bit L2 guest page */
    5.41 -#define PGC_SH_l1_pae_shadow  (4U<<28) /* shadowing a pae L1 page */
    5.42 -#define PGC_SH_fl1_pae_shadow (5U<<28) /* L1 shadow for pae 2M superpg */
    5.43 -#define PGC_SH_l2_pae_shadow  (6U<<28) /* shadowing a pae L2-low page */
    5.44 -#define PGC_SH_l2h_pae_shadow (7U<<28) /* shadowing a pae L2-high page */
    5.45 -#define PGC_SH_l1_64_shadow   (8U<<28) /* shadowing a 64-bit L1 page */
    5.46 -#define PGC_SH_fl1_64_shadow  (9U<<28) /* L1 shadow for 64-bit 2M superpg */
    5.47 -#define PGC_SH_l2_64_shadow  (10U<<28) /* shadowing a 64-bit L2 page */
    5.48 -#define PGC_SH_l3_64_shadow  (11U<<28) /* shadowing a 64-bit L3 page */
    5.49 -#define PGC_SH_l4_64_shadow  (12U<<28) /* shadowing a 64-bit L4 page */
    5.50 -#define PGC_SH_max_shadow    (12U<<28)
    5.51 -#define PGC_SH_p2m_table     (13U<<28) /* in use as the p2m table */
    5.52 -#define PGC_SH_monitor_table (14U<<28) /* in use as a monitor table */
    5.53 -#define PGC_SH_unused        (15U<<28)
    5.54 -
    5.55 -#define PGC_SH_type_mask     (15U<<28)
    5.56 -#define PGC_SH_type_shift          28
    5.57 -
    5.58 -#define PGC_SH_pinned         (1U<<27)
    5.59 -
    5.60 -#define _PGC_SH_log_dirty          26
    5.61 -#define PGC_SH_log_dirty      (1U<<26)
    5.62 -
    5.63 -/* 26 bit ref count for shadow pages */
    5.64 -#define PGC_SH_count_mask    ((1U<<26) - 1)
    5.65 -
    5.66  /* We trust the slab allocator in slab.c, and our use of it. */
    5.67  #define PageSlab(page)	    (1)
    5.68  #define PageSetSlab(page)   ((void)0)
     6.1 --- a/xen/include/asm-x86/shadow.h	Tue Nov 21 18:09:23 2006 -0800
     6.2 +++ b/xen/include/asm-x86/shadow.h	Thu Nov 23 17:40:28 2006 +0000
     6.3 @@ -577,32 +577,6 @@ void
     6.4  shadow_guest_physmap_remove_page(struct domain *d, unsigned long gfn,
     6.5                                    unsigned long mfn);
     6.6  
     6.7 -/*
     6.8 - * Definitions for the shadow_flags field in page_info.
     6.9 - * These flags are stored on *guest* pages...
    6.10 - * Bits 1-13 are encodings for the shadow types.
    6.11 - */
    6.12 -#define PGC_SH_type_to_index(_type) ((_type) >> PGC_SH_type_shift)
    6.13 -#define SHF_page_type_mask \
    6.14 -    (((1u << (PGC_SH_type_to_index(PGC_SH_max_shadow) + 1u)) - 1u) - \
    6.15 -     ((1u << PGC_SH_type_to_index(PGC_SH_min_shadow)) - 1u))
    6.16 -
    6.17 -#define SHF_L1_32   (1u << PGC_SH_type_to_index(PGC_SH_l1_32_shadow))
    6.18 -#define SHF_FL1_32  (1u << PGC_SH_type_to_index(PGC_SH_fl1_32_shadow))
    6.19 -#define SHF_L2_32   (1u << PGC_SH_type_to_index(PGC_SH_l2_32_shadow))
    6.20 -#define SHF_L1_PAE  (1u << PGC_SH_type_to_index(PGC_SH_l1_pae_shadow))
    6.21 -#define SHF_FL1_PAE (1u << PGC_SH_type_to_index(PGC_SH_fl1_pae_shadow))
    6.22 -#define SHF_L2_PAE  (1u << PGC_SH_type_to_index(PGC_SH_l2_pae_shadow))
    6.23 -#define SHF_L2H_PAE (1u << PGC_SH_type_to_index(PGC_SH_l2h_pae_shadow))
    6.24 -#define SHF_L1_64   (1u << PGC_SH_type_to_index(PGC_SH_l1_64_shadow))
    6.25 -#define SHF_FL1_64  (1u << PGC_SH_type_to_index(PGC_SH_fl1_64_shadow))
    6.26 -#define SHF_L2_64   (1u << PGC_SH_type_to_index(PGC_SH_l2_64_shadow))
    6.27 -#define SHF_L3_64   (1u << PGC_SH_type_to_index(PGC_SH_l3_64_shadow))
    6.28 -#define SHF_L4_64   (1u << PGC_SH_type_to_index(PGC_SH_l4_64_shadow))
    6.29 -
    6.30 -/* Used for hysteresis when automatically unhooking mappings on fork/exit */
    6.31 -#define SHF_unhooked_mappings (1u<<31)
    6.32 -
    6.33  /* 
    6.34   * Allocation of shadow pages 
    6.35   */