ia64/xen-unstable

changeset 17984:9a1d98a9d21b

OOS cleanup: Fixup arrays instead of fixup tables.

This patch avoids compromises with evil concepts (reverse maps) in the
shadow code and limits to a maximum value the number of writable
mappings of OOS pages.

Code is simpler and performance do not degrade.

Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sat Jul 05 14:01:27 2008 +0100 (2008-07-05)
parents 30c20e467f0b
children 5b7e60d70394
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/domain.h xen/include/asm-x86/mm.h xen/include/asm-x86/perfc_defn.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Sat Jul 05 14:00:19 2008 +0100
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Sat Jul 05 14:01:27 2008 +0100
     1.3 @@ -69,12 +69,14 @@ void shadow_domain_init(struct domain *d
     1.4  void shadow_vcpu_init(struct vcpu *v)
     1.5  {
     1.6  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     1.7 -    int i;
     1.8 +    int i, j;
     1.9  
    1.10      for ( i = 0; i < SHADOW_OOS_PAGES; i++ )
    1.11      {
    1.12          v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN);
    1.13          v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN);
    1.14 +        for ( j = 0; j < SHADOW_OOS_FIXUPS; j++ )
    1.15 +            v->arch.paging.shadow.oos_fixup[i].smfn[j] = _mfn(INVALID_MFN);
    1.16      }
    1.17  #endif
    1.18  
    1.19 @@ -579,132 +581,86 @@ static inline void _sh_resync_l1(struct 
    1.20  #endif
    1.21  }
    1.22  
    1.23 -#define _FIXUP_IDX(_b, _i) ((_b) * SHADOW_OOS_FT_HASH + (_i))
    1.24 +
    1.25 +/*
    1.26 + * Fixup arrays: We limit the maximum number of writable mappings to
    1.27 + * SHADOW_OOS_FIXUPS and store enough information to remove them
    1.28 + * quickly on resync.
    1.29 + */
    1.30 +
    1.31 +static inline int oos_fixup_flush_gmfn(struct vcpu *v, mfn_t gmfn,
    1.32 +                                       struct oos_fixup *fixup)
    1.33 +{
    1.34 +    int i;
    1.35 +    for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ )
    1.36 +    {
    1.37 +        if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
    1.38 +        {
    1.39 +            sh_remove_write_access_from_sl1p(v, gmfn,
    1.40 +                                             fixup->smfn[i], 
    1.41 +                                             fixup->off[i]);
    1.42 +            fixup->smfn[i] = _mfn(INVALID_MFN);
    1.43 +        }
    1.44 +    }
    1.45 +
    1.46 +    /* Always flush the TLBs. See comment on oos_fixup_add(). */
    1.47 +    return 1;
    1.48 +}
    1.49  
    1.50  void oos_fixup_add(struct vcpu *v, mfn_t gmfn,
    1.51 -                   mfn_t smfn, unsigned long off)
    1.52 +                   mfn_t smfn,  unsigned long off)
    1.53  {
    1.54 -    int idx, i, free = 0, free_slot = 0;
    1.55 -    struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups;
    1.56 -
    1.57 -    idx = mfn_x(gmfn) % SHADOW_OOS_FT_HASH;
    1.58 -    for ( i = 0; i < SHADOW_OOS_FT_ENTRIES; i++ )
    1.59 +    int idx, next;
    1.60 +    mfn_t *oos;
    1.61 +    struct oos_fixup *oos_fixup;
    1.62 +    struct domain *d = v->domain;
    1.63 +
    1.64 +    perfc_incr(shadow_oos_fixup_add);
    1.65 +    
    1.66 +    for_each_vcpu(d, v) 
    1.67      {
    1.68 -        if ( !mfn_valid(fixups[_FIXUP_IDX(idx, i)].gmfn)
    1.69 -             || !mfn_is_out_of_sync(fixups[_FIXUP_IDX(idx, i)].gmfn) )
    1.70 +        oos = v->arch.paging.shadow.oos;
    1.71 +        oos_fixup = v->arch.paging.shadow.oos_fixup;
    1.72 +        idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
    1.73 +        if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
    1.74 +            idx = (idx + 1) % SHADOW_OOS_PAGES;
    1.75 +        if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
    1.76          {
    1.77 -            free = 1;
    1.78 -            free_slot = _FIXUP_IDX(idx, i);
    1.79 -        }
    1.80 -        else if ( (mfn_x(fixups[_FIXUP_IDX(idx, i)].gmfn) == mfn_x(gmfn))
    1.81 -                  && (mfn_x(fixups[_FIXUP_IDX(idx, i)].smfn) == mfn_x(smfn))
    1.82 -                  && (fixups[_FIXUP_IDX(idx, i)].off == off) )
    1.83 -        {
    1.84 -            perfc_incr(shadow_oos_fixup_no_add);
    1.85 +            next = oos_fixup[idx].next;
    1.86 +
    1.87 +            if ( mfn_x(oos_fixup[idx].smfn[next]) != INVALID_MFN )
    1.88 +            {
    1.89 +                /* Reuse this slot and remove current writable mapping. */
    1.90 +                sh_remove_write_access_from_sl1p(v, gmfn, 
    1.91 +                                                 oos_fixup[idx].smfn[next],
    1.92 +                                                 oos_fixup[idx].off[next]);
    1.93 +                perfc_incr(shadow_oos_fixup_evict);
    1.94 +                /* We should flush the TLBs now, because we removed a
    1.95 +                   writable mapping, but since the shadow is already
    1.96 +                   OOS we have no problem if another vcpu write to
    1.97 +                   this page table. We just have to be very careful to
    1.98 +                   *always* flush the tlbs on resync. */
    1.99 +            }
   1.100 +
   1.101 +            oos_fixup[idx].smfn[next] = smfn;
   1.102 +            oos_fixup[idx].off[next] = off;
   1.103 +            oos_fixup[idx].next = (next + 1) % SHADOW_OOS_FIXUPS;
   1.104              return;
   1.105          }
   1.106      }
   1.107  
   1.108 -    if ( free )
   1.109 -    {
   1.110 -        if ( !v->arch.paging.shadow.oos_fixup_used )
   1.111 -            v->arch.paging.shadow.oos_fixup_used = 1;
   1.112 -        fixups[free_slot].gmfn = gmfn;
   1.113 -        fixups[free_slot].smfn = smfn;
   1.114 -        fixups[free_slot].off = off;
   1.115 -        perfc_incr(shadow_oos_fixup_add_ok);
   1.116 -        return;
   1.117 -    }
   1.118 -
   1.119 -
   1.120 -    perfc_incr(shadow_oos_fixup_add_fail);
   1.121 -}
   1.122 -
   1.123 -void oos_fixup_remove(struct vcpu *v, mfn_t gmfn)
   1.124 -{
   1.125 -    int idx, i;
   1.126 -    struct domain *d = v->domain;
   1.127 -
   1.128 -    perfc_incr(shadow_oos_fixup_remove);
   1.129 -
   1.130 -    /* If the domain is dying we might get called when deallocating
   1.131 -     * the shadows. Fixup tables are already freed so exit now. */
   1.132 -    if ( d->is_dying )
   1.133 -        return;
   1.134 -
   1.135 -    idx = mfn_x(gmfn) % SHADOW_OOS_FT_HASH;
   1.136 -    for_each_vcpu(d, v)
   1.137 -    {
   1.138 -        struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups;
   1.139 -        for ( i = 0; i < SHADOW_OOS_FT_ENTRIES; i++ )
   1.140 -            if ( mfn_x(fixups[_FIXUP_IDX(idx, i)].gmfn) == mfn_x(gmfn) )
   1.141 -                fixups[_FIXUP_IDX(idx, i)].gmfn = _mfn(INVALID_MFN);
   1.142 -    }
   1.143 +    SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn));
   1.144 +    BUG();
   1.145  }
   1.146  
   1.147 -int oos_fixup_flush(struct vcpu *v)
   1.148 -{
   1.149 -    int i, rc = 0;
   1.150 -    struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups;
   1.151 -
   1.152 -    perfc_incr(shadow_oos_fixup_flush);
   1.153 -
   1.154 -    if ( !v->arch.paging.shadow.oos_fixup_used )
   1.155 -        return 0;
   1.156 -
   1.157 -    for ( i = 0; i < SHADOW_OOS_FT_HASH * SHADOW_OOS_FT_ENTRIES; i++ )
   1.158 -    {
   1.159 -        if ( mfn_valid(fixups[i].gmfn) )
   1.160 -        {
   1.161 -            if ( mfn_is_out_of_sync(fixups[i].gmfn) )
   1.162 -                rc |= sh_remove_write_access_from_sl1p(v, fixups[i].gmfn,
   1.163 -                                                       fixups[i].smfn,
   1.164 -                                                       fixups[i].off);
   1.165 -            fixups[i].gmfn = _mfn(INVALID_MFN);
   1.166 -        }
   1.167 -    }
   1.168 -
   1.169 -    v->arch.paging.shadow.oos_fixup_used = 0;
   1.170 -
   1.171 -    return rc;
   1.172 -}
   1.173 -
   1.174 -int oos_fixup_flush_gmfn(struct vcpu *v, mfn_t gmfn)
   1.175 -{
   1.176 -    int idx, i, rc = 0;
   1.177 -    struct domain *d = v->domain;
   1.178 -
   1.179 -    perfc_incr(shadow_oos_fixup_flush_gmfn);
   1.180 -
   1.181 -    idx = mfn_x(gmfn) % SHADOW_OOS_FT_HASH;
   1.182 -    for_each_vcpu(d, v)
   1.183 -    {
   1.184 -        struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups;
   1.185 -
   1.186 -        for ( i = 0; i < SHADOW_OOS_FT_ENTRIES; i++ )
   1.187 -        {
   1.188 -            if ( mfn_x(fixups[_FIXUP_IDX(idx, i)].gmfn) != mfn_x(gmfn) )
   1.189 -                continue;
   1.190 -
   1.191 -            rc |= sh_remove_write_access_from_sl1p(v, 
   1.192 -                                                   fixups[_FIXUP_IDX(idx,i)].gmfn,
   1.193 -                                                   fixups[_FIXUP_IDX(idx,i)].smfn,
   1.194 -                                                   fixups[_FIXUP_IDX(idx,i)].off);
   1.195 -
   1.196 -            fixups[_FIXUP_IDX(idx,i)].gmfn = _mfn(INVALID_MFN);
   1.197 -        }
   1.198 -    }
   1.199 -
   1.200 -    return rc;
   1.201 -}
   1.202 -
   1.203 -static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn, unsigned long va)
   1.204 +static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn,
   1.205 +                                   struct oos_fixup *fixup)
   1.206  {
   1.207      int ftlb = 0;
   1.208  
   1.209 -    ftlb |= oos_fixup_flush_gmfn(v, gmfn);
   1.210 -
   1.211 -    switch ( sh_remove_write_access(v, gmfn, 0, va) )
   1.212 +    ftlb |= oos_fixup_flush_gmfn(v, gmfn, fixup);
   1.213 +
   1.214 +    switch ( sh_remove_write_access(v, gmfn, 0, 0) )
   1.215      {
   1.216      default:
   1.217      case 0:
   1.218 @@ -732,7 +688,8 @@ static int oos_remove_write_access(struc
   1.219  
   1.220  
   1.221  /* Pull all the entries on an out-of-sync page back into sync. */
   1.222 -static void _sh_resync(struct vcpu *v, mfn_t gmfn, unsigned long va, mfn_t snp)
   1.223 +static void _sh_resync(struct vcpu *v, mfn_t gmfn,
   1.224 +                       struct oos_fixup *fixup, mfn_t snp)
   1.225  {
   1.226      struct page_info *pg = mfn_to_page(gmfn);
   1.227  
   1.228 @@ -747,7 +704,7 @@ static void _sh_resync(struct vcpu *v, m
   1.229                    v->domain->domain_id, v->vcpu_id, mfn_x(gmfn), va);
   1.230  
   1.231      /* Need to pull write access so the page *stays* in sync. */
   1.232 -    if ( oos_remove_write_access(v, gmfn, va) )
   1.233 +    if ( oos_remove_write_access(v, gmfn, fixup) )
   1.234      {
   1.235          /* Page has been unshadowed. */
   1.236          return;
   1.237 @@ -766,13 +723,17 @@ static void _sh_resync(struct vcpu *v, m
   1.238  
   1.239  
   1.240  /* Add an MFN to the list of out-of-sync guest pagetables */
   1.241 -static void oos_hash_add(struct vcpu *v, mfn_t gmfn, unsigned long va)
   1.242 +static void oos_hash_add(struct vcpu *v, mfn_t gmfn)
   1.243  {
   1.244 -    int idx, oidx, swap = 0;
   1.245 +    int i, idx, oidx, swap = 0;
   1.246      void *gptr, *gsnpptr;
   1.247      mfn_t *oos = v->arch.paging.shadow.oos;
   1.248 -    unsigned long *oos_va = v->arch.paging.shadow.oos_va;
   1.249      mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
   1.250 +    struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup;
   1.251 +    struct oos_fixup fixup = { .next = 0 };
   1.252 +    
   1.253 +    for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
   1.254 +        fixup.smfn[i] = _mfn(INVALID_MFN);
   1.255  
   1.256      idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
   1.257      oidx = idx;
   1.258 @@ -782,18 +743,18 @@ static void oos_hash_add(struct vcpu *v,
   1.259      {
   1.260          /* Punt the current occupant into the next slot */
   1.261          SWAP(oos[idx], gmfn);
   1.262 -        SWAP(oos_va[idx], va);
   1.263 +        SWAP(oos_fixup[idx], fixup);
   1.264          swap = 1;
   1.265          idx = (idx + 1) % SHADOW_OOS_PAGES;
   1.266      }
   1.267      if ( mfn_valid(oos[idx]) )
   1.268     {
   1.269          /* Crush the current occupant. */
   1.270 -        _sh_resync(v, oos[idx], oos_va[idx], oos_snapshot[idx]);
   1.271 +        _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
   1.272          perfc_incr(shadow_unsync_evict);
   1.273      }
   1.274      oos[idx] = gmfn;
   1.275 -    oos_va[idx] = va;
   1.276 +    oos_fixup[idx] = fixup;
   1.277  
   1.278      if ( swap )
   1.279          SWAP(oos_snapshot[idx], oos_snapshot[oidx]);
   1.280 @@ -862,14 +823,14 @@ void sh_resync(struct vcpu *v, mfn_t gmf
   1.281  {
   1.282      int idx;
   1.283      mfn_t *oos;
   1.284 -    unsigned long *oos_va;
   1.285      mfn_t *oos_snapshot;
   1.286 +    struct oos_fixup *oos_fixup;
   1.287      struct domain *d = v->domain;
   1.288  
   1.289      for_each_vcpu(d, v) 
   1.290      {
   1.291          oos = v->arch.paging.shadow.oos;
   1.292 -        oos_va = v->arch.paging.shadow.oos_va;
   1.293 +        oos_fixup = v->arch.paging.shadow.oos_fixup;
   1.294          oos_snapshot = v->arch.paging.shadow.oos_snapshot;
   1.295          idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
   1.296          if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
   1.297 @@ -877,7 +838,7 @@ void sh_resync(struct vcpu *v, mfn_t gmf
   1.298          
   1.299          if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
   1.300          {
   1.301 -            _sh_resync(v, gmfn, oos_va[idx], oos_snapshot[idx]);
   1.302 +            _sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
   1.303              oos[idx] = _mfn(INVALID_MFN);
   1.304              return;
   1.305          }
   1.306 @@ -917,8 +878,8 @@ void sh_resync_all(struct vcpu *v, int s
   1.307      int idx;
   1.308      struct vcpu *other;
   1.309      mfn_t *oos = v->arch.paging.shadow.oos;
   1.310 -    unsigned long *oos_va = v->arch.paging.shadow.oos_va;
   1.311      mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
   1.312 +    struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup;
   1.313  
   1.314      SHADOW_PRINTK("d=%d, v=%d\n", v->domain->domain_id, v->vcpu_id);
   1.315  
   1.316 @@ -930,15 +891,12 @@ void sh_resync_all(struct vcpu *v, int s
   1.317      if ( do_locking )
   1.318          shadow_lock(v->domain);
   1.319  
   1.320 -    if ( oos_fixup_flush(v) )
   1.321 -        flush_tlb_mask(v->domain->domain_dirty_cpumask);    
   1.322 -
   1.323      /* First: resync all of this vcpu's oos pages */
   1.324      for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 
   1.325          if ( mfn_valid(oos[idx]) )
   1.326          {
   1.327              /* Write-protect and sync contents */
   1.328 -            _sh_resync(v, oos[idx], oos_va[idx], oos_snapshot[idx]);
   1.329 +            _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
   1.330              oos[idx] = _mfn(INVALID_MFN);
   1.331          }
   1.332  
   1.333 @@ -959,8 +917,9 @@ void sh_resync_all(struct vcpu *v, int s
   1.334              shadow_lock(v->domain);
   1.335  
   1.336          oos = other->arch.paging.shadow.oos;
   1.337 -        oos_va = other->arch.paging.shadow.oos_va;
   1.338 +        oos_fixup = other->arch.paging.shadow.oos_fixup;
   1.339          oos_snapshot = other->arch.paging.shadow.oos_snapshot;
   1.340 +
   1.341          for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 
   1.342          {
   1.343              if ( !mfn_valid(oos[idx]) )
   1.344 @@ -976,7 +935,7 @@ void sh_resync_all(struct vcpu *v, int s
   1.345              else
   1.346              {
   1.347                  /* Write-protect and sync contents */
   1.348 -                _sh_resync(other, oos[idx], oos_va[idx], oos_snapshot[idx]);
   1.349 +                _sh_resync(other, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
   1.350                  oos[idx] = _mfn(INVALID_MFN);
   1.351              }
   1.352          }
   1.353 @@ -987,7 +946,7 @@ void sh_resync_all(struct vcpu *v, int s
   1.354  }
   1.355  
   1.356  /* Allow a shadowed page to go out of sync */
   1.357 -int sh_unsync(struct vcpu *v, mfn_t gmfn, unsigned long va)
   1.358 +int sh_unsync(struct vcpu *v, mfn_t gmfn)
   1.359  {
   1.360      struct page_info *pg;
   1.361      
   1.362 @@ -1009,7 +968,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn
   1.363          return 0;
   1.364  
   1.365      pg->shadow_flags |= SHF_out_of_sync|SHF_oos_may_write;
   1.366 -    oos_hash_add(v, gmfn, va);
   1.367 +    oos_hash_add(v, gmfn);
   1.368      perfc_incr(shadow_unsync);
   1.369      return 1;
   1.370  }
   1.371 @@ -1064,7 +1023,6 @@ void shadow_demote(struct vcpu *v, mfn_t
   1.372          if ( page_is_out_of_sync(page) ) 
   1.373          {
   1.374              oos_hash_remove(v, gmfn);
   1.375 -            oos_fixup_remove(v, gmfn);
   1.376          }
   1.377  #endif 
   1.378          clear_bit(_PGC_page_table, &page->count_info);
   1.379 @@ -2804,23 +2762,6 @@ static void sh_update_paging_modes(struc
   1.380  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
   1.381  
   1.382  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   1.383 -    if ( v->arch.paging.shadow.oos_fixups == NULL )
   1.384 -    {
   1.385 -        int i;
   1.386 -        v->arch.paging.shadow.oos_fixups =
   1.387 -            alloc_xenheap_pages(SHADOW_OOS_FT_ORDER);
   1.388 -        if ( v->arch.paging.shadow.oos_fixups == NULL )
   1.389 -        {
   1.390 -            SHADOW_ERROR("Could not allocate OOS fixup table"
   1.391 -                         " for dom %u vcpu %u\n",
   1.392 -                         v->domain->domain_id, v->vcpu_id);
   1.393 -            domain_crash(v->domain);
   1.394 -            return;
   1.395 -        }
   1.396 -        for ( i = 0; i < SHADOW_OOS_FT_HASH * SHADOW_OOS_FT_ENTRIES; i++ )
   1.397 -            v->arch.paging.shadow.oos_fixups[i].gmfn = _mfn(INVALID_MFN);
   1.398 -    }
   1.399 -     
   1.400      if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
   1.401      {
   1.402          int i;
   1.403 @@ -3173,13 +3114,6 @@ void shadow_teardown(struct domain *d)
   1.404  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
   1.405  
   1.406  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
   1.407 -        if ( v->arch.paging.shadow.oos_fixups )
   1.408 -        {
   1.409 -            free_xenheap_pages(v->arch.paging.shadow.oos_fixups,
   1.410 -                               SHADOW_OOS_FT_ORDER);
   1.411 -            v->arch.paging.shadow.oos_fixups = NULL;
   1.412 -        }
   1.413 -
   1.414          {
   1.415              int i;
   1.416              mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Sat Jul 05 14:00:19 2008 +0100
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Sat Jul 05 14:01:27 2008 +0100
     2.3 @@ -916,7 +916,10 @@ static always_inline void
     2.4      if ( unlikely((level == 1) 
     2.5                    && sh_mfn_is_a_page_table(target_mfn)
     2.6  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
     2.7 -                  && !mfn_oos_may_write(target_mfn)
     2.8 +                  /* Unless the page is out of sync and the guest is
     2.9 +                     writing to it. */
    2.10 +                  && !(mfn_oos_may_write(target_mfn)
    2.11 +                       && (ft == ft_demand_write))
    2.12  #endif /* OOS */
    2.13                    ) )
    2.14      {
    2.15 @@ -3291,7 +3294,7 @@ static int sh_page_fault(struct vcpu *v,
    2.16      /* Always unsync when writing to L1 page tables. */
    2.17      if ( sh_mfn_is_a_page_table(gmfn)
    2.18           && ft == ft_demand_write )
    2.19 -        sh_unsync(v, gmfn, va);
    2.20 +        sh_unsync(v, gmfn);
    2.21  #endif /* OOS */
    2.22  
    2.23      /* Calculate the shadow entry and write it */
    2.24 @@ -3322,8 +3325,10 @@ static int sh_page_fault(struct vcpu *v,
    2.25      /* Need to emulate accesses to page tables */
    2.26      if ( sh_mfn_is_a_page_table(gmfn)
    2.27  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
    2.28 -         /* Unless they've been allowed to go out of sync with their shadows */
    2.29 -         && !mfn_is_out_of_sync(gmfn)
    2.30 +         /* Unless they've been allowed to go out of sync with their
    2.31 +            shadows and we don't need to unshadow it. */
    2.32 +         && !(mfn_is_out_of_sync(gmfn)
    2.33 +              && !(regs->error_code & PFEC_user_mode))
    2.34  #endif
    2.35           )
    2.36      {
    2.37 @@ -4350,15 +4355,8 @@ int sh_rm_write_access_from_sl1p(struct 
    2.38  
    2.39      sp = mfn_to_shadow_page(smfn);
    2.40  
    2.41 -    if ( sp->mbz != 0 ||
    2.42 -#if GUEST_PAGING_LEVELS == 4
    2.43 -         (sp->type != SH_type_l1_64_shadow)
    2.44 -#elif GUEST_PAGING_LEVELS == 3
    2.45 -         (sp->type != SH_type_l1_pae_shadow)
    2.46 -#elif GUEST_PAGING_LEVELS == 2
    2.47 -         (sp->type != SH_type_l1_32_shadow)
    2.48 -#endif
    2.49 -       )
    2.50 +    if ( sp->mbz != 0
    2.51 +         || (sp->type != SH_type_l1_shadow) )
    2.52          goto fail;
    2.53  
    2.54      sl1p = sh_map_domain_page(smfn);
     3.1 --- a/xen/arch/x86/mm/shadow/private.h	Sat Jul 05 14:00:19 2008 +0100
     3.2 +++ b/xen/arch/x86/mm/shadow/private.h	Sat Jul 05 14:01:27 2008 +0100
     3.3 @@ -323,15 +323,6 @@ static inline int sh_type_is_pinnable(st
     3.4  #define SHF_out_of_sync (1u<<30)
     3.5  #define SHF_oos_may_write (1u<<29)
     3.6  
     3.7 -/* Fixup tables are a non-complete writable-mappings reverse map for
     3.8 -   OOS pages. This let us quickly resync pages (avoiding brute-force
     3.9 -   search of the shadows) when the va hint is not sufficient (i.e.,
    3.10 -   the pagetable is mapped in multiple places and in multiple
    3.11 -   shadows.) */
    3.12 -#define SHADOW_OOS_FT_ENTRIES                           \
    3.13 -    ((PAGE_SIZE << SHADOW_OOS_FT_ORDER)                 \
    3.14 -     / (SHADOW_OOS_FT_HASH * sizeof(struct oos_fixup)))
    3.15 -
    3.16  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
    3.17  
    3.18  static inline int sh_page_has_multiple_shadows(struct page_info *pg)
    3.19 @@ -421,7 +412,7 @@ int shadow_cmpxchg_guest_entry(struct vc
    3.20  
    3.21  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
    3.22  /* Allow a shadowed page to go out of sync */
    3.23 -int sh_unsync(struct vcpu *v, mfn_t gmfn, unsigned long va);
    3.24 +int sh_unsync(struct vcpu *v, mfn_t gmfn);
    3.25  
    3.26  /* Pull an out-of-sync page back into sync. */
    3.27  void sh_resync(struct vcpu *v, mfn_t gmfn);
     4.1 --- a/xen/include/asm-x86/domain.h	Sat Jul 05 14:00:19 2008 +0100
     4.2 +++ b/xen/include/asm-x86/domain.h	Sat Jul 05 14:01:27 2008 +0100
     4.3 @@ -128,14 +128,12 @@ struct shadow_vcpu {
     4.4  
     4.5      /* Shadow out-of-sync: pages that this vcpu has let go out of sync */
     4.6      mfn_t oos[SHADOW_OOS_PAGES];
     4.7 -    unsigned long oos_va[SHADOW_OOS_PAGES];
     4.8      mfn_t oos_snapshot[SHADOW_OOS_PAGES];
     4.9      struct oos_fixup {
    4.10 -        mfn_t gmfn;
    4.11 -        mfn_t smfn;
    4.12 -        unsigned long off;
    4.13 -    } *oos_fixups;
    4.14 -    int oos_fixup_used;
    4.15 +        int next;
    4.16 +        mfn_t smfn[SHADOW_OOS_FIXUPS];
    4.17 +        unsigned long off[SHADOW_OOS_FIXUPS];
    4.18 +    } oos_fixup[SHADOW_OOS_PAGES];
    4.19  };
    4.20  
    4.21  /************************************************/
     5.1 --- a/xen/include/asm-x86/mm.h	Sat Jul 05 14:00:19 2008 +0100
     5.2 +++ b/xen/include/asm-x86/mm.h	Sat Jul 05 14:01:27 2008 +0100
     5.3 @@ -133,10 +133,8 @@ static inline u32 pickle_domptr(struct d
     5.4  /* The number of out-of-sync shadows we allow per vcpu (prime, please) */
     5.5  #define SHADOW_OOS_PAGES 3
     5.6  
     5.7 -/* The order OOS fixup tables per vcpu */
     5.8 -#define SHADOW_OOS_FT_ORDER 1
     5.9 -/* OOS fixup tables hash entries */
    5.10 -#define SHADOW_OOS_FT_HASH 13
    5.11 +/* OOS fixup entries */
    5.12 +#define SHADOW_OOS_FIXUPS 2
    5.13  
    5.14  #define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
    5.15  #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
     6.1 --- a/xen/include/asm-x86/perfc_defn.h	Sat Jul 05 14:00:19 2008 +0100
     6.2 +++ b/xen/include/asm-x86/perfc_defn.h	Sat Jul 05 14:01:27 2008 +0100
     6.3 @@ -105,13 +105,8 @@ PERFCOUNTER(shadow_em_ex_pt,       "shad
     6.4  PERFCOUNTER(shadow_em_ex_non_pt,   "shadow extra non-pt-write op")
     6.5  PERFCOUNTER(shadow_em_ex_fail,     "shadow extra emulation failed")
     6.6  
     6.7 -PERFCOUNTER(shadow_oos_fixup_add_ok,    "shadow OOS fixups adds")
     6.8 -PERFCOUNTER(shadow_oos_fixup_no_add,    "shadow OOS fixups no adds")
     6.9 -PERFCOUNTER(shadow_oos_fixup_add_fail,  "shadow OOS fixups adds failed")
    6.10 -PERFCOUNTER(shadow_oos_fixup_remove,    "shadow OOS fixups removes")
    6.11 -PERFCOUNTER(shadow_oos_fixup_flush,     "shadow OOS fixups flushes")
    6.12 -PERFCOUNTER(shadow_oos_fixup_flush_gmfn,"shadow OOS fixups gmfn flushes")
    6.13 -
    6.14 +PERFCOUNTER(shadow_oos_fixup_add,  "shadow OOS fixup adds")
    6.15 +PERFCOUNTER(shadow_oos_fixup_evict,"shadow OOS fixup evictions")
    6.16  PERFCOUNTER(shadow_unsync,         "shadow OOS unsyncs")
    6.17  PERFCOUNTER(shadow_unsync_evict,   "shadow OOS evictions")
    6.18  PERFCOUNTER(shadow_resync,         "shadow OOS resyncs")