ia64/xen-unstable

changeset 19138:162cdb596b9a

x86: re-arrange struct page_info members

By combining the overlay fields that are 8 bytes long (on x86-64) into
a union separate from the one used for the 4 byte wide fields, no
unnecessary padding will be inserted while at the same time avoiding
to use __attribute__((__packed__)) on any of the sub-structures (which
risks misaligning structure members without immediately noticing).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:10:43 2009 +0000 (2009-01-30)
parents 6fe44eb28f52
children 2d70ad9c3bc7
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:09:44 2009 +0000
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:10:43 2009 +0000
     1.3 @@ -1334,7 +1334,7 @@ static inline void trace_shadow_prealloc
     1.4          /* Convert smfn to gfn */
     1.5          unsigned long gfn;
     1.6          ASSERT(mfn_valid(smfn));
     1.7 -        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->u.sh.back));
     1.8 +        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->v.sh.back));
     1.9          __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
    1.10                      sizeof(gfn), (unsigned char*)&gfn);
    1.11      }
    1.12 @@ -1542,7 +1542,7 @@ mfn_t shadow_alloc(struct domain *d,
    1.13      while ( i != order )
    1.14      {
    1.15          i--;
    1.16 -        sp->u.sh.order = i;
    1.17 +        sp->v.free.order = i;
    1.18          page_list_add_tail(sp, &d->arch.paging.shadow.freelists[i]);
    1.19          sp += 1 << i;
    1.20      }
    1.21 @@ -1569,7 +1569,7 @@ mfn_t shadow_alloc(struct domain *d,
    1.22          sp[i].u.sh.type = shadow_type;
    1.23          sp[i].u.sh.pinned = 0;
    1.24          sp[i].u.sh.count = 0;
    1.25 -        sp[i].u.sh.back = backpointer;
    1.26 +        sp[i].v.sh.back = backpointer;
    1.27          set_next_shadow(&sp[i], NULL);
    1.28          perfc_incr(shadow_alloc_count);
    1.29      }
    1.30 @@ -1629,20 +1629,20 @@ void shadow_free(struct domain *d, mfn_t
    1.31          if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
    1.32              /* Merge with predecessor block? */
    1.33              if ( ((sp-mask)->u.sh.type != PGT_none) ||
    1.34 -                 ((sp-mask)->u.sh.order != order) )
    1.35 +                 ((sp-mask)->v.free.order != order) )
    1.36                  break;
    1.37              sp -= mask;
    1.38              page_list_del(sp, &d->arch.paging.shadow.freelists[order]);
    1.39          } else {
    1.40              /* Merge with successor block? */
    1.41              if ( ((sp+mask)->u.sh.type != PGT_none) ||
    1.42 -                 ((sp+mask)->u.sh.order != order) )
    1.43 +                 ((sp+mask)->v.free.order != order) )
    1.44                  break;
    1.45              page_list_del(sp + mask, &d->arch.paging.shadow.freelists[order]);
    1.46          }
    1.47      }
    1.48  
    1.49 -    sp->u.sh.order = order;
    1.50 +    sp->v.free.order = order;
    1.51      page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
    1.52  }
    1.53  
    1.54 @@ -1825,7 +1825,7 @@ static unsigned int sh_set_allocation(st
    1.55                  sp[j].u.sh.count = 0;
    1.56                  sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
    1.57              }
    1.58 -            sp->u.sh.order = order;
    1.59 +            sp->v.free.order = order;
    1.60              page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
    1.61          } 
    1.62          else if ( d->arch.paging.shadow.total_pages > pages ) 
    1.63 @@ -1904,17 +1904,17 @@ static void sh_hash_audit_bucket(struct 
    1.64          BUG_ON( sp->u.sh.type == 0 );
    1.65          BUG_ON( sp->u.sh.type > SH_type_max_shadow );
    1.66          /* Wrong bucket? */
    1.67 -        BUG_ON( sh_hash(sp->u.sh.back, sp->u.sh.type) != bucket );
    1.68 +        BUG_ON( sh_hash(sp->v.sh.back, sp->u.sh.type) != bucket );
    1.69          /* Duplicate entry? */
    1.70          for ( x = next_shadow(sp); x; x = next_shadow(x) )
    1.71 -            BUG_ON( x->u.sh.back == sp->u.sh.back &&
    1.72 +            BUG_ON( x->v.sh.back == sp->v.sh.back &&
    1.73                      x->u.sh.type == sp->u.sh.type );
    1.74          /* Follow the backpointer to the guest pagetable */
    1.75          if ( sp->u.sh.type != SH_type_fl1_32_shadow
    1.76               && sp->u.sh.type != SH_type_fl1_pae_shadow
    1.77               && sp->u.sh.type != SH_type_fl1_64_shadow )
    1.78          {
    1.79 -            struct page_info *gpg = mfn_to_page(_mfn(sp->u.sh.back));
    1.80 +            struct page_info *gpg = mfn_to_page(_mfn(sp->v.sh.back));
    1.81              /* Bad shadow flags on guest page? */
    1.82              BUG_ON( !(gpg->shadow_flags & (1<<sp->u.sh.type)) );
    1.83              /* Bad type count on guest page? */
    1.84 @@ -1930,7 +1930,7 @@ static void sh_hash_audit_bucket(struct 
    1.85                      {
    1.86                          SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
    1.87                                       " and not OOS but has typecount %#lx\n",
    1.88 -                                     sp->u.sh.back,
    1.89 +                                     sp->v.sh.back,
    1.90                                       mfn_x(shadow_page_to_mfn(sp)), 
    1.91                                       gpg->u.inuse.type_info);
    1.92                          BUG();
    1.93 @@ -1944,7 +1944,7 @@ static void sh_hash_audit_bucket(struct 
    1.94              {
    1.95                  SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
    1.96                               " but has typecount %#lx\n",
    1.97 -                             sp->u.sh.back, mfn_x(shadow_page_to_mfn(sp)),
    1.98 +                             sp->v.sh.back, mfn_x(shadow_page_to_mfn(sp)),
    1.99                               gpg->u.inuse.type_info);
   1.100                  BUG();
   1.101              }
   1.102 @@ -2030,7 +2030,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   1.103      prev = NULL;
   1.104      while(sp)
   1.105      {
   1.106 -        if ( sp->u.sh.back == n && sp->u.sh.type == t )
   1.107 +        if ( sp->v.sh.back == n && sp->u.sh.type == t )
   1.108          {
   1.109              /* Pull-to-front if 'sp' isn't already the head item */
   1.110              if ( unlikely(sp != d->arch.paging.shadow.hash_table[key]) )
   1.111 @@ -2197,7 +2197,7 @@ void sh_destroy_shadow(struct vcpu *v, m
   1.112             t == SH_type_fl1_64_shadow  || 
   1.113             t == SH_type_monitor_table  || 
   1.114             (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
   1.115 -           (page_get_owner(mfn_to_page(_mfn(sp->u.sh.back)))
   1.116 +           (page_get_owner(mfn_to_page(_mfn(sp->v.sh.back)))
   1.117              == v->domain)); 
   1.118  
   1.119      /* The down-shifts here are so that the switch statement is on nice
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Jan 30 11:09:44 2009 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Jan 30 11:10:43 2009 +0000
     2.3 @@ -974,7 +974,7 @@ static int shadow_set_l2e(struct vcpu *v
     2.4  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     2.5          {
     2.6              struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
     2.7 -            mfn_t gl1mfn = _mfn(sp->u.sh.back);
     2.8 +            mfn_t gl1mfn = _mfn(sp->v.sh.back);
     2.9  
    2.10              /* If the shadow is a fl1 then the backpointer contains
    2.11                 the GFN instead of the GMFN, and it's definitely not
    2.12 @@ -1926,7 +1926,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
    2.13      ASSERT(t == SH_type_l4_shadow);
    2.14  
    2.15      /* Record that the guest page isn't shadowed any more (in this type) */
    2.16 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
    2.17 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
    2.18      delete_shadow_status(v, gmfn, t, smfn);
    2.19      shadow_demote(v, gmfn, t);
    2.20      /* Decrement refcounts of all the old entries */
    2.21 @@ -1955,7 +1955,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
    2.22      ASSERT(t == SH_type_l3_shadow);
    2.23  
    2.24      /* Record that the guest page isn't shadowed any more (in this type) */
    2.25 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
    2.26 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
    2.27      delete_shadow_status(v, gmfn, t, smfn);
    2.28      shadow_demote(v, gmfn, t);
    2.29  
    2.30 @@ -1990,7 +1990,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
    2.31  #endif
    2.32  
    2.33      /* Record that the guest page isn't shadowed any more (in this type) */
    2.34 -    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
    2.35 +    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
    2.36      delete_shadow_status(v, gmfn, t, smfn);
    2.37      shadow_demote(v, gmfn, t);
    2.38  
    2.39 @@ -2020,12 +2020,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
    2.40      /* Record that the guest page isn't shadowed any more (in this type) */
    2.41      if ( t == SH_type_fl1_shadow )
    2.42      {
    2.43 -        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->u.sh.back);
    2.44 +        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->v.sh.back);
    2.45          delete_fl1_shadow_status(v, gfn, smfn);
    2.46      }
    2.47      else 
    2.48      {
    2.49 -        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
    2.50 +        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
    2.51          delete_shadow_status(v, gmfn, t, smfn);
    2.52          shadow_demote(v, gmfn, t);
    2.53      }
    2.54 @@ -2349,7 +2349,7 @@ static int validate_gl1e(struct vcpu *v,
    2.55      result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
    2.56  
    2.57  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
    2.58 -    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
    2.59 +    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
    2.60      if ( mfn_valid(gl1mfn) 
    2.61           && mfn_is_out_of_sync(gl1mfn) )
    2.62      {
    2.63 @@ -2968,7 +2968,7 @@ static int sh_page_fault(struct vcpu *v,
    2.64                                         sizeof(sl2e)) != 0)
    2.65                       || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
    2.66                       || !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
    2.67 -                                      shadow_l2e_get_mfn(sl2e))->u.sh.back))
    2.68 +                                      shadow_l2e_get_mfn(sl2e))->v.sh.back))
    2.69                       || unlikely(mfn_is_out_of_sync(gl1mfn)) )
    2.70                 {
    2.71                     /* Hit the slow path as if there had been no 
    2.72 @@ -3530,7 +3530,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
    2.73  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
    2.74      /* Check to see if the SL1 is out of sync. */
    2.75      {
    2.76 -        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
    2.77 +        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
    2.78          struct page_info *pg = mfn_to_page(gl1mfn);
    2.79          if ( mfn_valid(gl1mfn) 
    2.80               && page_is_out_of_sync(pg) )
    2.81 @@ -3560,7 +3560,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
    2.82              }
    2.83  
    2.84              sl1mfn = shadow_l2e_get_mfn(sl2e);
    2.85 -            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
    2.86 +            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
    2.87              pg = mfn_to_page(gl1mfn);
    2.88              
    2.89              if ( likely(sh_mfn_is_a_page_table(gl1mfn)
    2.90 @@ -4887,7 +4887,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
    2.91      int done = 0;
    2.92      
    2.93      /* Follow the backpointer */
    2.94 -    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
    2.95 +    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
    2.96  
    2.97  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
    2.98      /* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
    2.99 @@ -4977,7 +4977,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
   2.100      int done = 0;
   2.101  
   2.102      /* Follow the backpointer */
   2.103 -    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->u.sh.back);
   2.104 +    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->v.sh.back);
   2.105  
   2.106  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   2.107      /* Only L1's may be out of sync. */
   2.108 @@ -5026,7 +5026,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
   2.109      int done = 0;
   2.110  
   2.111      /* Follow the backpointer */
   2.112 -    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->u.sh.back);
   2.113 +    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->v.sh.back);
   2.114  
   2.115  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.116      /* Only L1's may be out of sync. */
   2.117 @@ -5073,7 +5073,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
   2.118      int done = 0;
   2.119  
   2.120      /* Follow the backpointer */
   2.121 -    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->u.sh.back);
   2.122 +    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->v.sh.back);
   2.123  
   2.124  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   2.125      /* Only L1's may be out of sync. */
     3.1 --- a/xen/arch/x86/mm/shadow/private.h	Fri Jan 30 11:09:44 2009 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/private.h	Fri Jan 30 11:10:43 2009 +0000
     3.3 @@ -631,7 +631,7 @@ static inline int sh_get_ref(struct vcpu
     3.4      if ( unlikely(nx >= 1U<<26) )
     3.5      {
     3.6          SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRpgmfn " smfn=%lx\n",
     3.7 -                       sp->u.sh.back, mfn_x(smfn));
     3.8 +                       sp->v.sh.back, mfn_x(smfn));
     3.9          return 0;
    3.10      }
    3.11      
     4.1 --- a/xen/include/asm-x86/mm.h	Fri Jan 30 11:09:44 2009 +0000
     4.2 +++ b/xen/include/asm-x86/mm.h	Fri Jan 30 11:10:43 2009 +0000
     4.3 @@ -15,7 +15,7 @@
     4.4   *  1. 'struct page_info' contains a 'struct page_list_entry list'.
     4.5   *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
     4.6   */
     4.7 -#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
     4.8 +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
     4.9  
    4.10  /*
    4.11   * This definition is solely for the use in struct page_info (and
    4.12 @@ -59,8 +59,6 @@ struct page_info
    4.13  
    4.14          /* Page is in use: ((count_info & PGC_count_mask) != 0). */
    4.15          struct {
    4.16 -            /* Owner of this page (NULL if page is anonymous). */
    4.17 -            u32 _domain; /* pickled format */
    4.18              /* Type reference count and various PGT_xxx flags and fields. */
    4.19              unsigned long type_info;
    4.20          } inuse;
    4.21 @@ -70,18 +68,10 @@ struct page_info
    4.22              unsigned long type:5;   /* What kind of shadow is this? */
    4.23              unsigned long pinned:1; /* Is the shadow pinned? */
    4.24              unsigned long count:26; /* Reference count */
    4.25 -            union {
    4.26 -                /* When in use, GMFN of guest page we're a shadow of. */
    4.27 -                __mfn_t back;
    4.28 -                /* When free, order of the freelist we're on. */
    4.29 -                unsigned int order;
    4.30 -            };
    4.31          } sh;
    4.32  
    4.33          /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
    4.34          struct {
    4.35 -            /* Order-size of the free chunk this page is the head of. */
    4.36 -            u32 order;
    4.37              /* Mask of possibly-tainted TLBs. */
    4.38              cpumask_t cpumask;
    4.39          } free;
    4.40 @@ -89,6 +79,28 @@ struct page_info
    4.41      } u;
    4.42  
    4.43      union {
    4.44 +
    4.45 +        /* Page is in use, but not as a shadow. */
    4.46 +        struct {
    4.47 +            /* Owner of this page (NULL if page is anonymous). */
    4.48 +            u32 _domain; /* pickled format */
    4.49 +        } inuse;
    4.50 +
    4.51 +        /* Page is in use as a shadow. */
    4.52 +        struct {
    4.53 +            /* GMFN of guest page we're a shadow of. */
    4.54 +            __mfn_t back;
    4.55 +        } sh;
    4.56 +
    4.57 +        /* Page is on a free list (including shadow code free lists). */
    4.58 +        struct {
    4.59 +            /* Order-size of the free chunk this page is the head of. */
    4.60 +            unsigned int order;
    4.61 +        } free;
    4.62 +
    4.63 +    } v;
    4.64 +
    4.65 +    union {
    4.66          /*
    4.67           * Timestamp from 'TLB clock', used to avoid extra safety flushes.
    4.68           * Only valid for: a) free pages, and b) pages with zero type count
    4.69 @@ -225,10 +237,10 @@ struct page_info
    4.70  #define SHADOW_OOS_FIXUPS 2
    4.71  
    4.72  #define page_get_owner(_p)                                              \
    4.73 -    ((struct domain *)((_p)->u.inuse._domain ?                          \
    4.74 -                       mfn_to_virt((_p)->u.inuse._domain) : NULL))
    4.75 +    ((struct domain *)((_p)->v.inuse._domain ?                          \
    4.76 +                       mfn_to_virt((_p)->v.inuse._domain) : NULL))
    4.77  #define page_set_owner(_p,_d)                                           \
    4.78 -    ((_p)->u.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
    4.79 +    ((_p)->v.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
    4.80  
    4.81  #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
    4.82  #define vaddr_get_owner(va)   (page_get_owner(virt_to_page((va))))