direct-io.hg

changeset 7374:b41e51ffa5ea

Because of a bug with reference counting against the target guest page
when searching the list for L1 shadow pages to write protect that page
(at shadow_promote(), which is called by alloc_shadow_page()), the code
was always scanning _all_ the entries in the hash list. The length of
the hash list can be >500 for L1 shadow pages, and for each page we
needed to check all the PTEs in the page.

The patch attached does the following things:
- Correct the reference count (for the target guest page) so that=20
it can exit the loop when all the L1 shadow pages to modify are found.
Even with this, we can search the entire list if the page is at=20
the end.
- Try to avoid the search in the hash list, by having a
back pointer (as a hint) to the shadow page pfn. For most cases,=20
there is a single translation for the guest page in the shadow.
- Cleanups, remove the nested function fix_entry

With those, the kernel build performance, for example, was improved
approximately by 20%, 40% on 32-bit, 64-bit unmodified Linux guests,
respectively. Tested log-dirty mode for plain 32-bit as well.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Asit Mallick <asit.k.mallick@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Oct 13 17:58:01 2005 +0100 (2005-10-13)
parents 3fd239d8b640
children 56752fea020d
files xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Thu Oct 13 17:55:15 2005 +0100
     1.2 +++ b/xen/arch/x86/shadow.c	Thu Oct 13 17:58:01 2005 +0100
     1.3 @@ -616,13 +616,6 @@ static void shadow_map_l1_into_current_l
     1.4          pt_va = ((va >> L1_PAGETABLE_SHIFT) & ~(entries - 1)) << L1_PAGETABLE_SHIFT;
     1.5          spl1e = (l1_pgentry_t *) __shadow_get_l1e(v, pt_va, &tmp_sl1e);
     1.6  
     1.7 -        /*
     1.8 -        gpl1e = &(linear_pg_table[l1_linear_offset(va) &
     1.9 -                              ~(L1_PAGETABLE_ENTRIES-1)]);
    1.10 -
    1.11 -        spl1e = &(shadow_linear_pg_table[l1_linear_offset(va) &
    1.12 -                                     ~(L1_PAGETABLE_ENTRIES-1)]);*/
    1.13 -
    1.14          for ( i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++ )
    1.15          {
    1.16              l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
    1.17 @@ -645,7 +638,8 @@ static void shadow_map_l1_into_current_l
    1.18                  min = i;
    1.19              if ( likely(i > max) )
    1.20                  max = i;
    1.21 -        }
    1.22 +            set_guest_back_ptr(d, sl1e, sl1mfn, i);
    1.23 +          }
    1.24  
    1.25          frame_table[sl1mfn].tlbflush_timestamp =
    1.26              SHADOW_ENCODE_MIN_MAX(min, max);
    1.27 @@ -716,8 +710,8 @@ shadow_set_l1e(unsigned long va, l1_pgen
    1.28          }
    1.29      }
    1.30  
    1.31 +    set_guest_back_ptr(d, new_spte, l2e_get_pfn(sl2e), l1_table_offset(va));
    1.32      __shadow_set_l1e(v, va, &new_spte);
    1.33 -
    1.34      shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
    1.35  }
    1.36  
    1.37 @@ -1135,6 +1129,24 @@ decrease_writable_pte_prediction(struct 
    1.38      }
    1.39  }
    1.40  
    1.41 +static int fix_entry(
    1.42 +    struct domain *d, 
    1.43 +    l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
    1.44 +{
    1.45 +    l1_pgentry_t old = *pt;
    1.46 +    l1_pgentry_t new = old;
    1.47 +
    1.48 +    l1e_remove_flags(new,_PAGE_RW);
    1.49 +    if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
    1.50 +        BUG();
    1.51 +    (*found)++;
    1.52 +    *pt = new;
    1.53 +    if ( is_l1_shadow )
    1.54 +        shadow_put_page_from_l1e(old, d);
    1.55 +
    1.56 +    return (*found == max_refs_to_find);
    1.57 +}
    1.58 +
    1.59  static u32 remove_all_write_access_in_ptpage(
    1.60      struct domain *d, unsigned long pt_pfn, unsigned long pt_mfn,
    1.61      unsigned long readonly_gpfn, unsigned long readonly_gmfn,
    1.62 @@ -1156,49 +1168,28 @@ static u32 remove_all_write_access_in_pt
    1.63  
    1.64      match = l1e_from_pfn(readonly_gmfn, flags);
    1.65  
    1.66 -    // returns true if all refs have been found and fixed.
    1.67 -    //
    1.68 -    int fix_entry(int i)
    1.69 -    {
    1.70 -        l1_pgentry_t old = pt[i];
    1.71 -        l1_pgentry_t new = old;
    1.72 -
    1.73 -        l1e_remove_flags(new,_PAGE_RW);
    1.74 -        if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
    1.75 -            BUG();
    1.76 -        found++;
    1.77 -        pt[i] = new;
    1.78 -        if ( is_l1_shadow )
    1.79 -            shadow_put_page_from_l1e(old, d);
    1.80 -
    1.81 -#if 0
    1.82 -        printk("removed write access to pfn=%lx mfn=%lx in smfn=%lx entry %x "
    1.83 -               "is_l1_shadow=%d\n",
    1.84 -               readonly_gpfn, readonly_gmfn, pt_mfn, i, is_l1_shadow);
    1.85 -#endif
    1.86 -
    1.87 -        return (found == max_refs_to_find);
    1.88 -    }
    1.89 -
    1.90 -    i = readonly_gpfn & (GUEST_L1_PAGETABLE_ENTRIES - 1);
    1.91 -    if ( !l1e_has_changed(pt[i], match, flags) && fix_entry(i) )
    1.92 -    {
    1.93 -        perfc_incrc(remove_write_fast_exit);
    1.94 -        increase_writable_pte_prediction(d, readonly_gpfn, prediction);
    1.95 -        unmap_domain_page(pt);
    1.96 -        return found;
    1.97 +    if ( shadow_mode_external(d) ) {
    1.98 +        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
    1.99 +            >> PGT_va_shift;
   1.100 +
   1.101 +        if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
   1.102 +             !l1e_has_changed(pt[i], match, flags) && 
   1.103 +             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
   1.104 +             !prediction )
   1.105 +            goto out;
   1.106      }
   1.107   
   1.108      for (i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++)
   1.109      {
   1.110 -        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && fix_entry(i) )
   1.111 +        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
   1.112 +             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
   1.113              break;
   1.114      }
   1.115  
   1.116 +out:
   1.117      unmap_domain_page(pt);
   1.118  
   1.119      return found;
   1.120 -#undef MATCH_ENTRY
   1.121  }
   1.122  
   1.123  static int remove_all_write_access(
   1.124 @@ -1206,8 +1197,8 @@ static int remove_all_write_access(
   1.125  {
   1.126      int i;
   1.127      struct shadow_status *a;
   1.128 -    u32 found = 0, fixups, write_refs;
   1.129 -    unsigned long prediction, predicted_gpfn, predicted_smfn;
   1.130 +    u32 found = 0, write_refs;
   1.131 +    unsigned long predicted_smfn;
   1.132  
   1.133      ASSERT(shadow_lock_is_acquired(d));
   1.134      ASSERT(VALID_MFN(readonly_gmfn));
   1.135 @@ -1238,26 +1229,18 @@ static int remove_all_write_access(
   1.136          return 1;
   1.137      }
   1.138  
   1.139 -    // Before searching all the L1 page tables, check the typical culprit first
   1.140 -    //
   1.141 -    if ( (prediction = predict_writable_pte_page(d, readonly_gpfn)) )
   1.142 -    {
   1.143 -        predicted_gpfn = prediction & PGT_mfn_mask;
   1.144 -        if ( (predicted_smfn = __shadow_status(d, predicted_gpfn, PGT_l1_shadow)) &&
   1.145 -             (fixups = remove_all_write_access_in_ptpage(d, predicted_gpfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, prediction)) )
   1.146 -        {
   1.147 -            found += fixups;
   1.148 -            if ( found == write_refs )
   1.149 -            {
   1.150 -                perfc_incrc(remove_write_predicted);
   1.151 -                return 1;
   1.152 -            }
   1.153 -        }
   1.154 -        else
   1.155 -        {
   1.156 -            perfc_incrc(remove_write_bad_prediction);
   1.157 -            decrease_writable_pte_prediction(d, readonly_gpfn, prediction);
   1.158 -        }
   1.159 +    if ( shadow_mode_external(d) ) {
   1.160 +        if (write_refs-- == 0) 
   1.161 +            return 0;
   1.162 +
   1.163 +         // Use the back pointer to locate the shadow page that can contain
   1.164 +         // the PTE of interest
   1.165 +         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) ) {
   1.166 +             found += remove_all_write_access_in_ptpage(
   1.167 +                 d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
   1.168 +             if ( found == write_refs )
   1.169 +                 return 0;
   1.170 +         }
   1.171      }
   1.172  
   1.173      // Search all the shadow L1 page tables...
   1.174 @@ -1276,7 +1259,7 @@ static int remove_all_write_access(
   1.175              {
   1.176                  found += remove_all_write_access_in_ptpage(d, a->gpfn_and_flags & PGT_mfn_mask, a->smfn, readonly_gpfn, readonly_gmfn, write_refs - found, a->gpfn_and_flags & PGT_mfn_mask);
   1.177                  if ( found == write_refs )
   1.178 -                    return 1;
   1.179 +                    return 0;
   1.180              }
   1.181  
   1.182              a = a->next;
   1.183 @@ -1376,7 +1359,7 @@ static int resync_all(struct domain *d, 
   1.184                       guest_l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
   1.185                  {
   1.186                      need_flush |= validate_pte_change(d, guest1[i], &shadow1[i]);
   1.187 -
   1.188 +                    set_guest_back_ptr(d, shadow1[i], smfn, i);
   1.189                      // can't update snapshots of linear page tables -- they
   1.190                      // are used multiple times...
   1.191                      //
   1.192 @@ -1604,6 +1587,8 @@ static void sync_all(struct domain *d)
   1.193               !shadow_get_page_from_l1e(npte, d) )
   1.194              BUG();
   1.195          *ppte = npte;
   1.196 +        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
   1.197 +                           (entry->writable_pl1e & ~PAGE_MASK)/sizeof(l1_pgentry_t));
   1.198          shadow_put_page_from_l1e(opte, d);
   1.199  
   1.200          unmap_domain_page(ppte);
     2.1 --- a/xen/arch/x86/shadow32.c	Thu Oct 13 17:55:15 2005 +0100
     2.2 +++ b/xen/arch/x86/shadow32.c	Thu Oct 13 17:58:01 2005 +0100
     2.3 @@ -1032,7 +1032,12 @@ int __shadow_mode_enable(struct domain *
     2.4              if ( !get_page_type(page, PGT_writable_page) )
     2.5                  BUG();
     2.6              put_page_type(page);
     2.7 -
     2.8 +            /*
     2.9 +             * We use tlbflush_timestamp as back pointer to smfn, and need to
    2.10 +             * clean up it.
    2.11 +             */
    2.12 +            if ( shadow_mode_external(d) )
    2.13 +                page->tlbflush_timestamp = 0;
    2.14              list_ent = page->list.next;
    2.15          }
    2.16      }
    2.17 @@ -1390,18 +1395,6 @@ int shadow_mode_control(struct domain *d
    2.18      return rc;
    2.19  }
    2.20  
    2.21 -/*
    2.22 - * XXX KAF: Why is this VMX specific?
    2.23 - */
    2.24 -void vmx_shadow_clear_state(struct domain *d)
    2.25 -{
    2.26 -    SH_VVLOG("%s:", __func__);
    2.27 -    shadow_lock(d);
    2.28 -    free_shadow_pages(d);
    2.29 -    shadow_unlock(d);
    2.30 -    update_pagetables(d->vcpu[0]);
    2.31 -}
    2.32 -
    2.33  unsigned long
    2.34  gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
    2.35  {
    2.36 @@ -1462,14 +1455,10 @@ shadow_hl2_table(struct domain *d, unsig
    2.37  
    2.38      hl2 = map_domain_page(hl2mfn);
    2.39  
    2.40 -#ifdef __i386__
    2.41      if ( shadow_mode_external(d) )
    2.42          limit = L2_PAGETABLE_ENTRIES;
    2.43      else
    2.44          limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
    2.45 -#else
    2.46 -    limit = 0; /* XXX x86/64 XXX */
    2.47 -#endif
    2.48  
    2.49      memset(hl2, 0, limit * sizeof(l1_pgentry_t));
    2.50  
    2.51 @@ -1665,6 +1654,7 @@ void shadow_map_l1_into_current_l2(unsig
    2.52                  min = i;
    2.53              if ( likely(i > max) )
    2.54                  max = i;
    2.55 +            set_guest_back_ptr(d, sl1e, sl1mfn, i);
    2.56          }
    2.57  
    2.58          frame_table[sl1mfn].tlbflush_timestamp =
    2.59 @@ -2072,6 +2062,24 @@ free_writable_pte_predictions(struct dom
    2.60      }
    2.61  }
    2.62  
    2.63 +static int fix_entry(
    2.64 +    struct domain *d, 
    2.65 +    l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
    2.66 +{
    2.67 +    l1_pgentry_t old = *pt;
    2.68 +    l1_pgentry_t new = old;
    2.69 +
    2.70 +    l1e_remove_flags(new,_PAGE_RW);
    2.71 +    if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
    2.72 +        BUG();
    2.73 +    (*found)++;
    2.74 +    *pt = new;
    2.75 +    if ( is_l1_shadow )
    2.76 +        shadow_put_page_from_l1e(old, d);
    2.77 +
    2.78 +    return (*found == max_refs_to_find);
    2.79 +}
    2.80 +
    2.81  static u32 remove_all_write_access_in_ptpage(
    2.82      struct domain *d, unsigned long pt_pfn, unsigned long pt_mfn,
    2.83      unsigned long readonly_gpfn, unsigned long readonly_gmfn,
    2.84 @@ -2088,49 +2096,28 @@ static u32 remove_all_write_access_in_pt
    2.85  
    2.86      match = l1e_from_pfn(readonly_gmfn, flags);
    2.87  
    2.88 -    // returns true if all refs have been found and fixed.
    2.89 -    //
    2.90 -    int fix_entry(int i)
    2.91 -    {
    2.92 -        l1_pgentry_t old = pt[i];
    2.93 -        l1_pgentry_t new = old;
    2.94 -
    2.95 -        l1e_remove_flags(new,_PAGE_RW);
    2.96 -        if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
    2.97 -            BUG();
    2.98 -        found++;
    2.99 -        pt[i] = new;
   2.100 -        if ( is_l1_shadow )
   2.101 -            shadow_put_page_from_l1e(old, d);
   2.102 -
   2.103 -#if 0
   2.104 -        printk("removed write access to pfn=%lx mfn=%lx in smfn=%lx entry %x "
   2.105 -               "is_l1_shadow=%d\n",
   2.106 -               readonly_gpfn, readonly_gmfn, pt_mfn, i, is_l1_shadow);
   2.107 -#endif
   2.108 -
   2.109 -        return (found == max_refs_to_find);
   2.110 +    if ( shadow_mode_external(d) ) {
   2.111 +        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
   2.112 +            >> PGT_va_shift;
   2.113 +
   2.114 +        if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
   2.115 +             !l1e_has_changed(pt[i], match, flags) && 
   2.116 +             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
   2.117 +             !prediction )
   2.118 +            goto out;
   2.119      }
   2.120  
   2.121 -    i = readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1);
   2.122 -    if ( !l1e_has_changed(pt[i], match, flags) && fix_entry(i) )
   2.123 -    {
   2.124 -        perfc_incrc(remove_write_fast_exit);
   2.125 -        increase_writable_pte_prediction(d, readonly_gpfn, prediction);
   2.126 -        unmap_domain_page(pt);
   2.127 -        return found;
   2.128 -    }
   2.129 - 
   2.130      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   2.131      {
   2.132 -        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && fix_entry(i) )
   2.133 +        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
   2.134 +             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
   2.135              break;
   2.136      }
   2.137  
   2.138 +out:
   2.139      unmap_domain_page(pt);
   2.140  
   2.141      return found;
   2.142 -#undef MATCH_ENTRY
   2.143  }
   2.144  
   2.145  int shadow_remove_all_write_access(
   2.146 @@ -2138,8 +2125,8 @@ int shadow_remove_all_write_access(
   2.147  {
   2.148      int i;
   2.149      struct shadow_status *a;
   2.150 -    u32 found = 0, fixups, write_refs;
   2.151 -    unsigned long prediction, predicted_gpfn, predicted_smfn;
   2.152 +    u32 found = 0, write_refs;
   2.153 +    unsigned long predicted_smfn;
   2.154  
   2.155      ASSERT(shadow_lock_is_acquired(d));
   2.156      ASSERT(VALID_MFN(readonly_gmfn));
   2.157 @@ -2169,27 +2156,19 @@ int shadow_remove_all_write_access(
   2.158          perfc_incrc(remove_write_no_work);
   2.159          return 1;
   2.160      }
   2.161 -
   2.162 -    // Before searching all the L1 page tables, check the typical culprit first
   2.163 -    //
   2.164 -    if ( (prediction = predict_writable_pte_page(d, readonly_gpfn)) )
   2.165 -    {
   2.166 -        predicted_gpfn = prediction & PGT_mfn_mask;
   2.167 -        if ( (predicted_smfn = __shadow_status(d, predicted_gpfn, PGT_l1_shadow)) &&
   2.168 -             (fixups = remove_all_write_access_in_ptpage(d, predicted_gpfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, prediction)) )
   2.169 -        {
   2.170 -            found += fixups;
   2.171 -            if ( found == write_refs )
   2.172 -            {
   2.173 -                perfc_incrc(remove_write_predicted);
   2.174 -                return 1;
   2.175 -            }
   2.176 -        }
   2.177 -        else
   2.178 -        {
   2.179 -            perfc_incrc(remove_write_bad_prediction);
   2.180 -            decrease_writable_pte_prediction(d, readonly_gpfn, prediction);
   2.181 -        }
   2.182 +    
   2.183 +    if ( shadow_mode_external(d) ) {
   2.184 +        if (write_refs-- == 0) 
   2.185 +            return 0;
   2.186 +
   2.187 +         // Use the back pointer to locate the shadow page that can contain
   2.188 +         // the PTE of interest
   2.189 +         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) ) {
   2.190 +             found += remove_all_write_access_in_ptpage(
   2.191 +                 d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
   2.192 +             if ( found == write_refs )
   2.193 +                 return 0;
   2.194 +         }
   2.195      }
   2.196  
   2.197      // Search all the shadow L1 page tables...
   2.198 @@ -2203,7 +2182,7 @@ int shadow_remove_all_write_access(
   2.199              {
   2.200                  found += remove_all_write_access_in_ptpage(d, a->gpfn_and_flags & PGT_mfn_mask, a->smfn, readonly_gpfn, readonly_gmfn, write_refs - found, a->gpfn_and_flags & PGT_mfn_mask);
   2.201                  if ( found == write_refs )
   2.202 -                    return 1;
   2.203 +                    return 0;
   2.204              }
   2.205  
   2.206              a = a->next;
   2.207 @@ -2376,12 +2355,12 @@ static int resync_all(struct domain *d, 
   2.208                       l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
   2.209                  {
   2.210                      need_flush |= validate_pte_change(d, guest1[i], &shadow1[i]);
   2.211 +                    set_guest_back_ptr(d, shadow1[i], smfn, i);
   2.212  
   2.213                      // can't update snapshots of linear page tables -- they
   2.214                      // are used multiple times...
   2.215                      //
   2.216                      // snapshot[i] = new_pte;
   2.217 -
   2.218                      changed++;
   2.219                  }
   2.220              }
   2.221 @@ -2530,6 +2509,8 @@ void __shadow_sync_all(struct domain *d)
   2.222               !shadow_get_page_from_l1e(npte, d) )
   2.223              BUG();
   2.224          *ppte = npte;
   2.225 +        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
   2.226 +                           (entry->writable_pl1e & ~PAGE_MASK)/sizeof(l1_pgentry_t));
   2.227          shadow_put_page_from_l1e(opte, d);
   2.228  
   2.229          unmap_domain_page(ppte);
     3.1 --- a/xen/arch/x86/shadow_public.c	Thu Oct 13 17:55:15 2005 +0100
     3.2 +++ b/xen/arch/x86/shadow_public.c	Thu Oct 13 17:58:01 2005 +0100
     3.3 @@ -1095,7 +1095,12 @@ int __shadow_mode_enable(struct domain *
     3.4              if ( !get_page_type(page, PGT_writable_page) )
     3.5                  BUG();
     3.6              put_page_type(page);
     3.7 -
     3.8 +            /*
     3.9 +             * We use tlbflush_timestamp as back pointer to smfn, and need to
    3.10 +             * clean up it.
    3.11 +             */
    3.12 +            if ( shadow_mode_external(d) )
    3.13 +                page->tlbflush_timestamp = 0;
    3.14              list_ent = page->list.next;
    3.15          }
    3.16      }
     4.1 --- a/xen/include/asm-x86/shadow.h	Thu Oct 13 17:55:15 2005 +0100
     4.2 +++ b/xen/include/asm-x86/shadow.h	Thu Oct 13 17:58:01 2005 +0100
     4.3 @@ -718,6 +718,23 @@ shadow_unpin(unsigned long smfn)
     4.4      put_shadow_ref(smfn);
     4.5  }
     4.6  
     4.7 +/*
     4.8 + * SMP issue. The following code assumes the shadow lock is held. Re-visit
     4.9 + * when working on finer-gained locks for shadow.
    4.10 + */
    4.11 +static inline void set_guest_back_ptr(
    4.12 +    struct domain *d, l1_pgentry_t spte, unsigned long smfn, unsigned int index)
    4.13 +{
    4.14 +    if ( shadow_mode_external(d) ) {
    4.15 +        unsigned long gmfn;
    4.16 +
    4.17 +        ASSERT(shadow_lock_is_acquired(d));
    4.18 +        gmfn = l1e_get_pfn(spte);
    4.19 +        frame_table[gmfn].tlbflush_timestamp = smfn;
    4.20 +        frame_table[gmfn].u.inuse.type_info &= ~PGT_va_mask;
    4.21 +        frame_table[gmfn].u.inuse.type_info |= (unsigned long) index << PGT_va_shift;
    4.22 +    }
    4.23 +}
    4.24  
    4.25  /************************************************************************/
    4.26  #if CONFIG_PAGING_LEVELS <= 2
    4.27 @@ -1611,10 +1628,11 @@ shadow_set_l1e(unsigned long va, l1_pgen
    4.28              if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
    4.29                  shadow_put_page_from_l1e(old_spte, d);
    4.30          }
    4.31 +
    4.32      }
    4.33  
    4.34 +    set_guest_back_ptr(d, new_spte, l2e_get_pfn(sl2e), l1_table_offset(va));
    4.35      shadow_linear_pg_table[l1_linear_offset(va)] = new_spte;
    4.36 -
    4.37      shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
    4.38  }
    4.39  #endif