ia64/xen-unstable

changeset 19144:2e1734aa8db3

x86: page_info cleanups.
1. No reason for i386 not to use the same definitions as x64
2. No need for shadow_page_info names to hang around.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:33:27 2009 +0000 (2009-01-30)
parents 86159a906bec
children 414373f007e8
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/domain.h xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:16:52 2009 +0000
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:33:27 2009 +0000
     1.3 @@ -1291,7 +1291,7 @@ static inline int space_is_available(
     1.4      for ( ; order <= shadow_max_order(d); ++order )
     1.5      {
     1.6          unsigned int n = count;
     1.7 -        const struct shadow_page_info *sp;
     1.8 +        const struct page_info *sp;
     1.9  
    1.10          page_list_for_each ( sp, &d->arch.paging.shadow.freelists[order] )
    1.11              if ( --n == 0 )
    1.12 @@ -1306,7 +1306,7 @@ static inline int space_is_available(
    1.13   * non-Xen mappings in this top-level shadow mfn */
    1.14  static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
    1.15  {
    1.16 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    1.17 +    struct page_info *sp = mfn_to_page(smfn);
    1.18      switch ( sp->u.sh.type )
    1.19      {
    1.20      case SH_type_l2_32_shadow:
    1.21 @@ -1334,7 +1334,7 @@ static inline void trace_shadow_prealloc
    1.22          /* Convert smfn to gfn */
    1.23          unsigned long gfn;
    1.24          ASSERT(mfn_valid(smfn));
    1.25 -        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->v.sh.back));
    1.26 +        gfn = mfn_to_gfn(d, _mfn(mfn_to_page(smfn)->v.sh.back));
    1.27          __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
    1.28                      sizeof(gfn), (unsigned char*)&gfn);
    1.29      }
    1.30 @@ -1350,7 +1350,7 @@ static void _shadow_prealloc(
    1.31      /* Need a vpcu for calling unpins; for now, since we don't have
    1.32       * per-vcpu shadows, any will do */
    1.33      struct vcpu *v, *v2;
    1.34 -    struct shadow_page_info *sp, *t;
    1.35 +    struct page_info *sp, *t;
    1.36      mfn_t smfn;
    1.37      int i;
    1.38  
    1.39 @@ -1366,7 +1366,7 @@ static void _shadow_prealloc(
    1.40      perfc_incr(shadow_prealloc_1);
    1.41      page_list_for_each_safe_reverse(sp, t, &d->arch.paging.shadow.pinned_shadows)
    1.42      {
    1.43 -        smfn = shadow_page_to_mfn(sp);
    1.44 +        smfn = page_to_mfn(sp);
    1.45  
    1.46          /* Unpin this top-level shadow */
    1.47          trace_shadow_prealloc_unpin(d, smfn);
    1.48 @@ -1425,7 +1425,7 @@ void shadow_prealloc(struct domain *d, u
    1.49   * this domain's shadows */
    1.50  static void shadow_blow_tables(struct domain *d) 
    1.51  {
    1.52 -    struct shadow_page_info *sp, *t;
    1.53 +    struct page_info *sp, *t;
    1.54      struct vcpu *v = d->vcpu[0];
    1.55      mfn_t smfn;
    1.56      int i;
    1.57 @@ -1435,7 +1435,7 @@ static void shadow_blow_tables(struct do
    1.58      /* Pass one: unpin all pinned pages */
    1.59      page_list_for_each_safe_reverse(sp, t, &d->arch.paging.shadow.pinned_shadows)
    1.60      {
    1.61 -        smfn = shadow_page_to_mfn(sp);
    1.62 +        smfn = page_to_mfn(sp);
    1.63          sh_unpin(v, smfn);
    1.64      }
    1.65          
    1.66 @@ -1489,21 +1489,17 @@ static __init int shadow_blow_tables_key
    1.67  __initcall(shadow_blow_tables_keyhandler_init);
    1.68  #endif /* !NDEBUG */
    1.69  
    1.70 -#ifdef __i386__
    1.71 -# define next_shadow(pg) ((pg)->next_shadow)
    1.72 -# define set_next_shadow(pg, n) ((void)((pg)->next_shadow = (n)))
    1.73 -#else
    1.74 -static inline struct shadow_page_info *
    1.75 -next_shadow(const struct shadow_page_info *sp)
    1.76 +static inline struct page_info *
    1.77 +next_shadow(const struct page_info *sp)
    1.78  {
    1.79 -    return sp->next_shadow ? mfn_to_shadow_page(_mfn(sp->next_shadow)) : NULL;
    1.80 +    return sp->next_shadow ? mfn_to_page(_mfn(sp->next_shadow)) : NULL;
    1.81  }
    1.82 +
    1.83  static inline void
    1.84 -set_next_shadow(struct shadow_page_info *sp, struct shadow_page_info *next)
    1.85 +set_next_shadow(struct page_info *sp, struct page_info *next)
    1.86  {
    1.87 -    sp->next_shadow = next ? mfn_x(shadow_page_to_mfn(next)) : 0;
    1.88 +    sp->next_shadow = next ? mfn_x(page_to_mfn(next)) : 0;
    1.89  }
    1.90 -#endif
    1.91  
    1.92  /* Allocate another shadow's worth of (contiguous, aligned) pages,
    1.93   * and fill in the type and backpointer fields of their page_infos. 
    1.94 @@ -1512,7 +1508,7 @@ mfn_t shadow_alloc(struct domain *d,
    1.95                      u32 shadow_type,
    1.96                      unsigned long backpointer)
    1.97  {
    1.98 -    struct shadow_page_info *sp = NULL;
    1.99 +    struct page_info *sp = NULL;
   1.100      unsigned int order = shadow_order(shadow_type);
   1.101      cpumask_t mask;
   1.102      void *p;
   1.103 @@ -1561,7 +1557,7 @@ mfn_t shadow_alloc(struct domain *d,
   1.104              flush_tlb_mask(mask);
   1.105          }
   1.106          /* Now safe to clear the page for reuse */
   1.107 -        p = sh_map_domain_page(shadow_page_to_mfn(sp+i));
   1.108 +        p = sh_map_domain_page(page_to_mfn(sp+i));
   1.109          ASSERT(p != NULL);
   1.110          clear_page(p);
   1.111          sh_unmap_domain_page(p);
   1.112 @@ -1573,14 +1569,14 @@ mfn_t shadow_alloc(struct domain *d,
   1.113          set_next_shadow(&sp[i], NULL);
   1.114          perfc_incr(shadow_alloc_count);
   1.115      }
   1.116 -    return shadow_page_to_mfn(sp);
   1.117 +    return page_to_mfn(sp);
   1.118  }
   1.119  
   1.120  
   1.121  /* Return some shadow pages to the pool. */
   1.122  void shadow_free(struct domain *d, mfn_t smfn)
   1.123  {
   1.124 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn); 
   1.125 +    struct page_info *sp = mfn_to_page(smfn); 
   1.126      u32 shadow_type;
   1.127      unsigned long order;
   1.128      unsigned long mask;
   1.129 @@ -1626,7 +1622,7 @@ void shadow_free(struct domain *d, mfn_t
   1.130      for ( ; order < shadow_max_order(d); ++order )
   1.131      {
   1.132          mask = 1 << order;
   1.133 -        if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
   1.134 +        if ( (mfn_x(page_to_mfn(sp)) & mask) ) {
   1.135              /* Merge with predecessor block? */
   1.136              if ( ((sp-mask)->u.sh.type != PGT_none) ||
   1.137                   ((sp-mask)->v.free.order != order) )
   1.138 @@ -1787,7 +1783,7 @@ static unsigned int sh_set_allocation(st
   1.139                                        unsigned int pages,
   1.140                                        int *preempted)
   1.141  {
   1.142 -    struct shadow_page_info *sp;
   1.143 +    struct page_info *sp;
   1.144      unsigned int lower_bound;
   1.145      unsigned int j, order = shadow_max_order(d);
   1.146  
   1.147 @@ -1809,7 +1805,7 @@ static unsigned int sh_set_allocation(st
   1.148          if ( d->arch.paging.shadow.total_pages < pages ) 
   1.149          {
   1.150              /* Need to allocate more memory from domheap */
   1.151 -            sp = (struct shadow_page_info *)
   1.152 +            sp = (struct page_info *)
   1.153                  alloc_domheap_pages(NULL, order, MEMF_node(domain_to_node(d)));
   1.154              if ( sp == NULL ) 
   1.155              { 
   1.156 @@ -1890,7 +1886,7 @@ static inline key_t sh_hash(unsigned lon
   1.157  static void sh_hash_audit_bucket(struct domain *d, int bucket)
   1.158  /* Audit one bucket of the hash table */
   1.159  {
   1.160 -    struct shadow_page_info *sp, *x;
   1.161 +    struct page_info *sp, *x;
   1.162  
   1.163      if ( !(SHADOW_AUDIT_ENABLE) )
   1.164          return;
   1.165 @@ -1931,7 +1927,7 @@ static void sh_hash_audit_bucket(struct 
   1.166                          SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
   1.167                                       " and not OOS but has typecount %#lx\n",
   1.168                                       sp->v.sh.back,
   1.169 -                                     mfn_x(shadow_page_to_mfn(sp)), 
   1.170 +                                     mfn_x(page_to_mfn(sp)), 
   1.171                                       gpg->u.inuse.type_info);
   1.172                          BUG();
   1.173                      }
   1.174 @@ -1944,7 +1940,7 @@ static void sh_hash_audit_bucket(struct 
   1.175              {
   1.176                  SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
   1.177                               " but has typecount %#lx\n",
   1.178 -                             sp->v.sh.back, mfn_x(shadow_page_to_mfn(sp)),
   1.179 +                             sp->v.sh.back, mfn_x(page_to_mfn(sp)),
   1.180                               gpg->u.inuse.type_info);
   1.181                  BUG();
   1.182              }
   1.183 @@ -1983,15 +1979,15 @@ static void sh_hash_audit(struct domain 
   1.184   * Returns 0 for success, 1 for error. */
   1.185  static int shadow_hash_alloc(struct domain *d)
   1.186  {
   1.187 -    struct shadow_page_info **table;
   1.188 +    struct page_info **table;
   1.189  
   1.190      ASSERT(shadow_locked_by_me(d));
   1.191      ASSERT(!d->arch.paging.shadow.hash_table);
   1.192  
   1.193 -    table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);
   1.194 +    table = xmalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
   1.195      if ( !table ) return 1;
   1.196      memset(table, 0, 
   1.197 -           SHADOW_HASH_BUCKETS * sizeof (struct shadow_page_info *));
   1.198 +           SHADOW_HASH_BUCKETS * sizeof (struct page_info *));
   1.199      d->arch.paging.shadow.hash_table = table;
   1.200      return 0;
   1.201  }
   1.202 @@ -2013,7 +2009,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   1.203   * or INVALID_MFN if it doesn't exist */
   1.204  {
   1.205      struct domain *d = v->domain;
   1.206 -    struct shadow_page_info *sp, *prev;
   1.207 +    struct page_info *sp, *prev;
   1.208      key_t key;
   1.209  
   1.210      ASSERT(shadow_locked_by_me(d));
   1.211 @@ -2037,7 +2033,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   1.212              {
   1.213                  if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
   1.214                      /* Can't reorder: someone is walking the hash chains */
   1.215 -                    return shadow_page_to_mfn(sp);
   1.216 +                    return page_to_mfn(sp);
   1.217                  else 
   1.218                  {
   1.219                      ASSERT(prev);
   1.220 @@ -2052,7 +2048,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   1.221              {
   1.222                  perfc_incr(shadow_hash_lookup_head);
   1.223              }
   1.224 -            return shadow_page_to_mfn(sp);
   1.225 +            return page_to_mfn(sp);
   1.226          }
   1.227          prev = sp;
   1.228          sp = next_shadow(sp);
   1.229 @@ -2067,7 +2063,7 @@ void shadow_hash_insert(struct vcpu *v, 
   1.230  /* Put a mapping (n,t)->smfn into the hash table */
   1.231  {
   1.232      struct domain *d = v->domain;
   1.233 -    struct shadow_page_info *sp;
   1.234 +    struct page_info *sp;
   1.235      key_t key;
   1.236      
   1.237      ASSERT(shadow_locked_by_me(d));
   1.238 @@ -2081,7 +2077,7 @@ void shadow_hash_insert(struct vcpu *v, 
   1.239      sh_hash_audit_bucket(d, key);
   1.240      
   1.241      /* Insert this shadow at the top of the bucket */
   1.242 -    sp = mfn_to_shadow_page(smfn);
   1.243 +    sp = mfn_to_page(smfn);
   1.244      set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
   1.245      d->arch.paging.shadow.hash_table[key] = sp;
   1.246      
   1.247 @@ -2093,7 +2089,7 @@ void shadow_hash_delete(struct vcpu *v, 
   1.248  /* Excise the mapping (n,t)->smfn from the hash table */
   1.249  {
   1.250      struct domain *d = v->domain;
   1.251 -    struct shadow_page_info *sp, *x;
   1.252 +    struct page_info *sp, *x;
   1.253      key_t key;
   1.254  
   1.255      ASSERT(shadow_locked_by_me(d));
   1.256 @@ -2106,7 +2102,7 @@ void shadow_hash_delete(struct vcpu *v, 
   1.257      key = sh_hash(n, t);
   1.258      sh_hash_audit_bucket(d, key);
   1.259      
   1.260 -    sp = mfn_to_shadow_page(smfn);
   1.261 +    sp = mfn_to_page(smfn);
   1.262      if ( d->arch.paging.shadow.hash_table[key] == sp ) 
   1.263          /* Easy case: we're deleting the head item. */
   1.264          d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
   1.265 @@ -2148,7 +2144,7 @@ static void hash_foreach(struct vcpu *v,
   1.266  {
   1.267      int i, done = 0;
   1.268      struct domain *d = v->domain;
   1.269 -    struct shadow_page_info *x;
   1.270 +    struct page_info *x;
   1.271  
   1.272      /* Say we're here, to stop hash-lookups reordering the chains */
   1.273      ASSERT(shadow_locked_by_me(d));
   1.274 @@ -2166,7 +2162,7 @@ static void hash_foreach(struct vcpu *v,
   1.275              {
   1.276                  ASSERT(x->u.sh.type <= 15);
   1.277                  ASSERT(callbacks[x->u.sh.type] != NULL);
   1.278 -                done = callbacks[x->u.sh.type](v, shadow_page_to_mfn(x),
   1.279 +                done = callbacks[x->u.sh.type](v, page_to_mfn(x),
   1.280                                                 callback_mfn);
   1.281                  if ( done ) break;
   1.282              }
   1.283 @@ -2184,7 +2180,7 @@ static void hash_foreach(struct vcpu *v,
   1.284  
   1.285  void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
   1.286  {
   1.287 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.288 +    struct page_info *sp = mfn_to_page(smfn);
   1.289      unsigned int t = sp->u.sh.type;
   1.290  
   1.291  
   1.292 @@ -2449,7 +2445,7 @@ int sh_remove_write_access(struct vcpu *
   1.293      {
   1.294          unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
   1.295          mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
   1.296 -        int shtype = mfn_to_shadow_page(last_smfn)->u.sh.type;
   1.297 +        int shtype = mfn_to_page(last_smfn)->u.sh.type;
   1.298  
   1.299          if ( callbacks[shtype] ) 
   1.300              callbacks[shtype](v, last_smfn, gmfn);
   1.301 @@ -2492,7 +2488,7 @@ int sh_remove_write_access(struct vcpu *
   1.302  int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
   1.303                                       mfn_t smfn, unsigned long off)
   1.304  {
   1.305 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.306 +    struct page_info *sp = mfn_to_page(smfn);
   1.307      
   1.308      ASSERT(mfn_valid(smfn));
   1.309      ASSERT(mfn_valid(gmfn));
   1.310 @@ -2612,7 +2608,7 @@ static int sh_remove_shadow_via_pointer(
   1.311  /* Follow this shadow's up-pointer, if it has one, and remove the reference
   1.312   * found there.  Returns 1 if that was the only reference to this shadow */
   1.313  {
   1.314 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
   1.315 +    struct page_info *sp = mfn_to_page(smfn);
   1.316      mfn_t pmfn;
   1.317      void *vaddr;
   1.318      int rc;
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Jan 30 11:16:52 2009 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Jan 30 11:33:27 2009 +0000
     2.3 @@ -973,7 +973,7 @@ static int shadow_set_l2e(struct vcpu *v
     2.4          }
     2.5  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     2.6          {
     2.7 -            struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
     2.8 +            struct page_info *sp = mfn_to_page(sl1mfn);
     2.9              mfn_t gl1mfn = _mfn(sp->v.sh.back);
    2.10  
    2.11              /* If the shadow is a fl1 then the backpointer contains
    2.12 @@ -1194,8 +1194,8 @@ static inline void increment_ptr_to_gues
    2.13  do {                                                                    \
    2.14      int _i;                                                             \
    2.15      shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \
    2.16 -    ASSERT(mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow  \
    2.17 -           || mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
    2.18 +    ASSERT(mfn_to_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow  \
    2.19 +           || mfn_to_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
    2.20      for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
    2.21      {                                                                   \
    2.22          (_sl1e) = _sp + _i;                                             \
    2.23 @@ -1232,7 +1232,7 @@ do {                                    
    2.24  do {                                                                      \
    2.25      int _i, _j, __done = 0;                                               \
    2.26      int _xen = !shadow_mode_external(_dom);                               \
    2.27 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
    2.28 +    ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
    2.29      for ( _j = 0; _j < 4 && !__done; _j++ )                               \
    2.30      {                                                                     \
    2.31          shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \
    2.32 @@ -1260,11 +1260,11 @@ do {                                    
    2.33      int _i;                                                                \
    2.34      int _xen = !shadow_mode_external(_dom);                                \
    2.35      shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \
    2.36 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
    2.37 -           || mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
    2.38 +    ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
    2.39 +           || mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
    2.40      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
    2.41          if ( (!(_xen))                                                     \
    2.42 -             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
    2.43 +             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
    2.44               || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES))                  \
    2.45                   < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
    2.46          {                                                                  \
    2.47 @@ -1285,13 +1285,13 @@ do {                                    
    2.48      int _i;                                                                 \
    2.49      int _xen = !shadow_mode_external(_dom);                                 \
    2.50      shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \
    2.51 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
    2.52 -           mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
    2.53 +    ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
    2.54 +           mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
    2.55      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
    2.56      {                                                                       \
    2.57          if ( (!(_xen))                                                      \
    2.58               || !is_pv_32on64_domain(_dom)                                  \
    2.59 -             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
    2.60 +             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
    2.61               || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
    2.62          {                                                                   \
    2.63              (_sl2e) = _sp + _i;                                             \
    2.64 @@ -1313,7 +1313,7 @@ do {                                    
    2.65  do {                                                                    \
    2.66      int _i;                                                             \
    2.67      shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \
    2.68 -    ASSERT(mfn_to_shadow_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
    2.69 +    ASSERT(mfn_to_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
    2.70      for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
    2.71      {                                                                   \
    2.72          (_sl3e) = _sp + _i;                                             \
    2.73 @@ -1331,7 +1331,7 @@ do {                                    
    2.74      shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \
    2.75      int _xen = !shadow_mode_external(_dom);                             \
    2.76      int _i;                                                             \
    2.77 -    ASSERT(mfn_to_shadow_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
    2.78 +    ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
    2.79      for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
    2.80      {                                                                   \
    2.81          if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \
    2.82 @@ -1506,7 +1506,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
    2.83           && shadow_type != SH_type_l2h_pae_shadow 
    2.84           && shadow_type != SH_type_l4_64_shadow )
    2.85          /* Lower-level shadow, not yet linked form a higher level */
    2.86 -        mfn_to_shadow_page(smfn)->up = 0;
    2.87 +        mfn_to_page(smfn)->up = 0;
    2.88  
    2.89  #if GUEST_PAGING_LEVELS == 4
    2.90  #if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
    2.91 @@ -1519,7 +1519,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
    2.92           * of them, decide that this isn't an old linux guest, and stop
    2.93           * pinning l3es.  This is not very quick but it doesn't happen
    2.94           * very often. */
    2.95 -        struct shadow_page_info *sp, *t;
    2.96 +        struct page_info *sp, *t;
    2.97          struct vcpu *v2;
    2.98          int l4count = 0, vcpus = 0;
    2.99          page_list_for_each(sp, &v->domain->arch.paging.shadow.pinned_shadows)
   2.100 @@ -1535,7 +1535,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
   2.101              page_list_for_each_safe(sp, t, &v->domain->arch.paging.shadow.pinned_shadows)
   2.102              {
   2.103                  if ( sp->u.sh.type == SH_type_l3_64_shadow )
   2.104 -                    sh_unpin(v, shadow_page_to_mfn(sp));
   2.105 +                    sh_unpin(v, page_to_mfn(sp));
   2.106              }
   2.107              v->domain->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
   2.108          }
   2.109 @@ -1918,7 +1918,7 @@ static shadow_l1e_t * shadow_get_and_cre
   2.110  void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
   2.111  {
   2.112      shadow_l4e_t *sl4e;
   2.113 -    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.114 +    u32 t = mfn_to_page(smfn)->u.sh.type;
   2.115      mfn_t gmfn, sl4mfn;
   2.116  
   2.117      SHADOW_DEBUG(DESTROY_SHADOW,
   2.118 @@ -1926,7 +1926,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
   2.119      ASSERT(t == SH_type_l4_shadow);
   2.120  
   2.121      /* Record that the guest page isn't shadowed any more (in this type) */
   2.122 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
   2.123 +    gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
   2.124      delete_shadow_status(v, gmfn, t, smfn);
   2.125      shadow_demote(v, gmfn, t);
   2.126      /* Decrement refcounts of all the old entries */
   2.127 @@ -1947,7 +1947,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
   2.128  void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
   2.129  {
   2.130      shadow_l3e_t *sl3e;
   2.131 -    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.132 +    u32 t = mfn_to_page(smfn)->u.sh.type;
   2.133      mfn_t gmfn, sl3mfn;
   2.134  
   2.135      SHADOW_DEBUG(DESTROY_SHADOW,
   2.136 @@ -1955,7 +1955,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
   2.137      ASSERT(t == SH_type_l3_shadow);
   2.138  
   2.139      /* Record that the guest page isn't shadowed any more (in this type) */
   2.140 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
   2.141 +    gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
   2.142      delete_shadow_status(v, gmfn, t, smfn);
   2.143      shadow_demote(v, gmfn, t);
   2.144  
   2.145 @@ -1977,7 +1977,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
   2.146  void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
   2.147  {
   2.148      shadow_l2e_t *sl2e;
   2.149 -    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.150 +    u32 t = mfn_to_page(smfn)->u.sh.type;
   2.151      mfn_t gmfn, sl2mfn;
   2.152  
   2.153      SHADOW_DEBUG(DESTROY_SHADOW,
   2.154 @@ -1990,7 +1990,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
   2.155  #endif
   2.156  
   2.157      /* Record that the guest page isn't shadowed any more (in this type) */
   2.158 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
   2.159 +    gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
   2.160      delete_shadow_status(v, gmfn, t, smfn);
   2.161      shadow_demote(v, gmfn, t);
   2.162  
   2.163 @@ -2011,7 +2011,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.164  {
   2.165      struct domain *d = v->domain;
   2.166      shadow_l1e_t *sl1e;
   2.167 -    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
   2.168 +    u32 t = mfn_to_page(smfn)->u.sh.type;
   2.169  
   2.170      SHADOW_DEBUG(DESTROY_SHADOW,
   2.171                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   2.172 @@ -2020,12 +2020,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.173      /* Record that the guest page isn't shadowed any more (in this type) */
   2.174      if ( t == SH_type_fl1_shadow )
   2.175      {
   2.176 -        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->v.sh.back);
   2.177 +        gfn_t gfn = _gfn(mfn_to_page(smfn)->v.sh.back);
   2.178          delete_fl1_shadow_status(v, gfn, smfn);
   2.179      }
   2.180      else 
   2.181      {
   2.182 -        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
   2.183 +        mfn_t gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
   2.184          delete_shadow_status(v, gmfn, t, smfn);
   2.185          shadow_demote(v, gmfn, t);
   2.186      }
   2.187 @@ -2051,7 +2051,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
   2.188  void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
   2.189  {
   2.190      struct domain *d = v->domain;
   2.191 -    ASSERT(mfn_to_shadow_page(mmfn)->u.sh.type == SH_type_monitor_table);
   2.192 +    ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
   2.193  
   2.194  #if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
   2.195      {
   2.196 @@ -2295,7 +2295,7 @@ static int validate_gl2e(struct vcpu *v,
   2.197  
   2.198  #if SHADOW_PAGING_LEVELS == 3
   2.199          reserved_xen_slot = 
   2.200 -            ((mfn_to_shadow_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
   2.201 +            ((mfn_to_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
   2.202               (shadow_index 
   2.203                >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
   2.204  #else /* SHADOW_PAGING_LEVELS == 2 */
   2.205 @@ -2349,7 +2349,7 @@ static int validate_gl1e(struct vcpu *v,
   2.206      result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
   2.207  
   2.208  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.209 -    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
   2.210 +    gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
   2.211      if ( mfn_valid(gl1mfn) 
   2.212           && mfn_is_out_of_sync(gl1mfn) )
   2.213      {
   2.214 @@ -2426,14 +2426,14 @@ void sh_resync_l1(struct vcpu *v, mfn_t 
   2.215   *      called in the *mode* of the vcpu that unsynced it.  Clear?  Good. */
   2.216  int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
   2.217  {
   2.218 -    struct shadow_page_info *sp;
   2.219 +    struct page_info *sp;
   2.220      mfn_t smfn;
   2.221  
   2.222      smfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
   2.223      ASSERT(mfn_valid(smfn)); /* Otherwise we would not have been called */
   2.224      
   2.225      /* Up to l2 */
   2.226 -    sp = mfn_to_shadow_page(smfn);
   2.227 +    sp = mfn_to_page(smfn);
   2.228      if ( sp->u.sh.count != 1 || !sp->up )
   2.229          return 0;
   2.230      smfn = _mfn(sp->up >> PAGE_SHIFT);
   2.231 @@ -2441,14 +2441,14 @@ int sh_safe_not_to_sync(struct vcpu *v, 
   2.232  
   2.233  #if (SHADOW_PAGING_LEVELS == 4) 
   2.234      /* up to l3 */
   2.235 -    sp = mfn_to_shadow_page(smfn);
   2.236 +    sp = mfn_to_page(smfn);
   2.237      if ( sp->u.sh.count != 1 || !sp->up )
   2.238          return 0;
   2.239      smfn = _mfn(sp->up >> PAGE_SHIFT);
   2.240      ASSERT(mfn_valid(smfn));
   2.241  
   2.242      /* up to l4 */
   2.243 -    sp = mfn_to_shadow_page(smfn);
   2.244 +    sp = mfn_to_page(smfn);
   2.245      if ( sp->u.sh.count != 1
   2.246           || sh_type_is_pinnable(v, SH_type_l3_64_shadow) || !sp->up )
   2.247          return 0;
   2.248 @@ -2967,7 +2967,7 @@ static int sh_page_fault(struct vcpu *v,
   2.249                                          + shadow_l2_linear_offset(va)),
   2.250                                         sizeof(sl2e)) != 0)
   2.251                       || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
   2.252 -                     || !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
   2.253 +                     || !mfn_valid(gl1mfn = _mfn(mfn_to_page(
   2.254                                        shadow_l2e_get_mfn(sl2e))->v.sh.back))
   2.255                       || unlikely(mfn_is_out_of_sync(gl1mfn)) )
   2.256                 {
   2.257 @@ -3520,7 +3520,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.258      // easier than invalidating all of the individual 4K pages).
   2.259      //
   2.260      sl1mfn = shadow_l2e_get_mfn(sl2e);
   2.261 -    if ( mfn_to_shadow_page(sl1mfn)->u.sh.type
   2.262 +    if ( mfn_to_page(sl1mfn)->u.sh.type
   2.263           == SH_type_fl1_shadow )
   2.264      {
   2.265          flush_tlb_local();
   2.266 @@ -3530,7 +3530,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.267  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.268      /* Check to see if the SL1 is out of sync. */
   2.269      {
   2.270 -        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
   2.271 +        mfn_t gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
   2.272          struct page_info *pg = mfn_to_page(gl1mfn);
   2.273          if ( mfn_valid(gl1mfn) 
   2.274               && page_is_out_of_sync(pg) )
   2.275 @@ -3560,7 +3560,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   2.276              }
   2.277  
   2.278              sl1mfn = shadow_l2e_get_mfn(sl2e);
   2.279 -            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
   2.280 +            gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
   2.281              pg = mfn_to_page(gl1mfn);
   2.282              
   2.283              if ( likely(sh_mfn_is_a_page_table(gl1mfn)
   2.284 @@ -3965,7 +3965,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
   2.285          /* Need to repin the old toplevel shadow if it's been unpinned
   2.286           * by shadow_prealloc(): in PV mode we're still running on this
   2.287           * shadow and it's not safe to free it yet. */
   2.288 -        if ( !mfn_to_shadow_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
   2.289 +        if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
   2.290          {
   2.291              SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
   2.292              domain_crash(v->domain);
   2.293 @@ -4259,12 +4259,12 @@ int sh_rm_write_access_from_sl1p(struct 
   2.294  {
   2.295      int r;
   2.296      shadow_l1e_t *sl1p, sl1e;
   2.297 -    struct shadow_page_info *sp;
   2.298 +    struct page_info *sp;
   2.299  
   2.300      ASSERT(mfn_valid(gmfn));
   2.301      ASSERT(mfn_valid(smfn));
   2.302  
   2.303 -    sp = mfn_to_shadow_page(smfn);
   2.304 +    sp = mfn_to_page(smfn);
   2.305  
   2.306      if ( sp->count_info != 0
   2.307           || (sp->u.sh.type != SH_type_l1_shadow
   2.308 @@ -4407,7 +4407,7 @@ int sh_rm_mappings_from_l1(struct vcpu *
   2.309  void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
   2.310  /* Blank out a single shadow entry */
   2.311  {
   2.312 -    switch ( mfn_to_shadow_page(smfn)->u.sh.type )
   2.313 +    switch ( mfn_to_page(smfn)->u.sh.type )
   2.314      {
   2.315      case SH_type_l1_shadow:
   2.316          (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
   2.317 @@ -4440,7 +4440,7 @@ int sh_remove_l1_shadow(struct vcpu *v, 
   2.318               && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
   2.319          {
   2.320              (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
   2.321 -            if ( mfn_to_shadow_page(sl1mfn)->u.sh.type == 0 )
   2.322 +            if ( mfn_to_page(sl1mfn)->u.sh.type == 0 )
   2.323                  /* This breaks us cleanly out of the FOREACH macro */
   2.324                  done = 1;
   2.325          }
   2.326 @@ -4463,7 +4463,7 @@ int sh_remove_l2_shadow(struct vcpu *v, 
   2.327               && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
   2.328          {
   2.329              (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
   2.330 -            if ( mfn_to_shadow_page(sl2mfn)->u.sh.type == 0 )
   2.331 +            if ( mfn_to_page(sl2mfn)->u.sh.type == 0 )
   2.332                  /* This breaks us cleanly out of the FOREACH macro */
   2.333                  done = 1;
   2.334          }
   2.335 @@ -4485,7 +4485,7 @@ int sh_remove_l3_shadow(struct vcpu *v, 
   2.336               && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
   2.337          {
   2.338              (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
   2.339 -            if ( mfn_to_shadow_page(sl3mfn)->u.sh.type == 0 )
   2.340 +            if ( mfn_to_page(sl3mfn)->u.sh.type == 0 )
   2.341                  /* This breaks us cleanly out of the FOREACH macro */
   2.342                  done = 1;
   2.343          }
   2.344 @@ -4887,7 +4887,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
   2.345      int done = 0;
   2.346      
   2.347      /* Follow the backpointer */
   2.348 -    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
   2.349 +    gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
   2.350  
   2.351  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.352      /* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
   2.353 @@ -4977,7 +4977,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
   2.354      int done = 0;
   2.355  
   2.356      /* Follow the backpointer */
   2.357 -    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->v.sh.back);
   2.358 +    gl2mfn = _mfn(mfn_to_page(sl2mfn)->v.sh.back);
   2.359  
   2.360  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.361      /* Only L1's may be out of sync. */
   2.362 @@ -5026,7 +5026,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
   2.363      int done = 0;
   2.364  
   2.365      /* Follow the backpointer */
   2.366 -    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->v.sh.back);
   2.367 +    gl3mfn = _mfn(mfn_to_page(sl3mfn)->v.sh.back);
   2.368  
   2.369  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.370      /* Only L1's may be out of sync. */
   2.371 @@ -5073,7 +5073,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
   2.372      int done = 0;
   2.373  
   2.374      /* Follow the backpointer */
   2.375 -    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->v.sh.back);
   2.376 +    gl4mfn = _mfn(mfn_to_page(sl4mfn)->v.sh.back);
   2.377  
   2.378  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.379      /* Only L1's may be out of sync. */
     3.1 --- a/xen/arch/x86/mm/shadow/private.h	Fri Jan 30 11:16:52 2009 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/private.h	Fri Jan 30 11:33:27 2009 +0000
     3.3 @@ -478,13 +478,11 @@ mfn_t oos_snapshot_lookup(struct vcpu *v
     3.4  // in order to make it work with our mfn type.
     3.5  #undef mfn_to_page
     3.6  #define mfn_to_page(_m) (frame_table + mfn_x(_m))
     3.7 -#define mfn_to_shadow_page mfn_to_page
     3.8  
     3.9  // Override page_to_mfn from asm/page.h, which was #include'd above,
    3.10  // in order to make it work with our mfn type.
    3.11  #undef page_to_mfn
    3.12  #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
    3.13 -#define shadow_page_to_mfn page_to_mfn
    3.14  
    3.15  // Override mfn_valid from asm/page.h, which was #include'd above,
    3.16  // in order to make it work with our mfn type.
    3.17 @@ -621,7 +619,7 @@ void sh_destroy_shadow(struct vcpu *v, m
    3.18  static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
    3.19  {
    3.20      u32 x, nx;
    3.21 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    3.22 +    struct page_info *sp = mfn_to_page(smfn);
    3.23  
    3.24      ASSERT(mfn_valid(smfn));
    3.25  
    3.26 @@ -653,7 +651,7 @@ static inline int sh_get_ref(struct vcpu
    3.27  static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
    3.28  {
    3.29      u32 x, nx;
    3.30 -    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    3.31 +    struct page_info *sp = mfn_to_page(smfn);
    3.32  
    3.33      ASSERT(mfn_valid(smfn));
    3.34      ASSERT(sp->count_info == 0);
    3.35 @@ -687,10 +685,10 @@ static inline void sh_put_ref(struct vcp
    3.36   * Returns 0 for failure, 1 for success. */
    3.37  static inline int sh_pin(struct vcpu *v, mfn_t smfn)
    3.38  {
    3.39 -    struct shadow_page_info *sp;
    3.40 +    struct page_info *sp;
    3.41      
    3.42      ASSERT(mfn_valid(smfn));
    3.43 -    sp = mfn_to_shadow_page(smfn);
    3.44 +    sp = mfn_to_page(smfn);
    3.45      ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
    3.46      if ( sp->u.sh.pinned )
    3.47      {
    3.48 @@ -714,10 +712,10 @@ static inline int sh_pin(struct vcpu *v,
    3.49   * of pinned shadows, and release the extra ref. */
    3.50  static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
    3.51  {
    3.52 -    struct shadow_page_info *sp;
    3.53 +    struct page_info *sp;
    3.54      
    3.55      ASSERT(mfn_valid(smfn));
    3.56 -    sp = mfn_to_shadow_page(smfn);
    3.57 +    sp = mfn_to_page(smfn);
    3.58      ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
    3.59      if ( sp->u.sh.pinned )
    3.60      {
     4.1 --- a/xen/include/asm-x86/domain.h	Fri Jan 30 11:16:52 2009 +0000
     4.2 +++ b/xen/include/asm-x86/domain.h	Fri Jan 30 11:33:27 2009 +0000
     4.3 @@ -92,7 +92,7 @@ struct shadow_domain {
     4.4      pagetable_t unpaged_pagetable;
     4.5  
     4.6      /* Shadow hashtable */
     4.7 -    struct shadow_page_info **hash_table;
     4.8 +    struct page_info **hash_table;
     4.9      int hash_walking;  /* Some function is walking the hash table */
    4.10  
    4.11      /* Fast MMIO path heuristic */
     5.1 --- a/xen/include/asm-x86/mm.h	Fri Jan 30 11:16:52 2009 +0000
     5.2 +++ b/xen/include/asm-x86/mm.h	Fri Jan 30 11:33:27 2009 +0000
     5.3 @@ -20,23 +20,19 @@
     5.4  /*
     5.5   * This definition is solely for the use in struct page_info (and
     5.6   * struct page_list_head), intended to allow easy adjustment once x86-64
     5.7 - * wants to support more than 16Tb.
     5.8 + * wants to support more than 16TB.
     5.9   * 'unsigned long' should be used for MFNs everywhere else.
    5.10   */
    5.11  #define __mfn_t unsigned int
    5.12  #define PRpgmfn "08x"
    5.13  
    5.14 -#ifndef __i386__
    5.15 -# undef page_list_entry
    5.16 +#undef page_list_entry
    5.17  struct page_list_entry
    5.18  {
    5.19      __mfn_t next, prev;
    5.20  };
    5.21 -#endif
    5.22  
    5.23  struct page_info
    5.24 -/* Until all uses of the old type get cleaned up: */
    5.25 -#define shadow_page_info page_info
    5.26  {
    5.27      union {
    5.28          /* Each frame can be threaded onto a doubly-linked list.
    5.29 @@ -151,11 +147,7 @@ struct page_info
    5.30          u32 shadow_flags;
    5.31  
    5.32          /* When in use as a shadow, next shadow in this hash chain. */
    5.33 -#ifdef __i386__
    5.34 -        struct shadow_page_info *next_shadow;
    5.35 -#else
    5.36          __mfn_t next_shadow;
    5.37 -#endif
    5.38      };
    5.39  };
    5.40