ia64/xen-unstable

changeset 7701:c665ab5a6b44

Cleanup various shadow mode asserts.

Separate out the ability for domains to be able to write to their
pagetables (ala "writable page tables", which uses write-protected PTEs
to address the page tables: this is shadow_mode_write_all()) from the
right of a domain to create a PTE with write permissions that points
at a page table (this is shadow_mode_wr_pt_pte())...

Minor cleanup of SHADOW_DEBUG (at least make it compilable) in shadow.c.
author Michael.Fetterman@cl.cam.ac.uk
date Tue Nov 08 13:26:50 2005 +0100 (2005-11-08)
parents 6d298cac0e8d
children f544934dd0b6
files xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/vmx.c xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Tue Nov 08 12:26:48 2005 +0100
     1.2 +++ b/xen/arch/x86/shadow.c	Tue Nov 08 13:26:50 2005 +0100
     1.3 @@ -37,9 +37,11 @@
     1.4  
     1.5  extern void free_shadow_pages(struct domain *d);
     1.6  
     1.7 +#if 0 // this code has not been updated for 32pae & 64 bit modes
     1.8  #if SHADOW_DEBUG
     1.9  static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn);
    1.10  #endif
    1.11 +#endif
    1.12  
    1.13  #if CONFIG_PAGING_LEVELS == 3
    1.14  #include <asm/shadow_64.h>
    1.15 @@ -898,9 +900,11 @@ mark_mfn_out_of_sync(struct vcpu *v, uns
    1.16      entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
    1.17      entry->writable_pl1e = -1;
    1.18  
    1.19 +#if 0 // this code has not been updated for 32pae & 64 bit modes
    1.20  #if SHADOW_DEBUG
    1.21      mark_shadows_as_reflecting_snapshot(d, gpfn);
    1.22  #endif
    1.23 +#endif
    1.24  
    1.25      // increment guest's ref count to represent the entry in the
    1.26      // full shadow out-of-sync list.
    1.27 @@ -1317,18 +1321,17 @@ static int resync_all(struct domain *d, 
    1.28  
    1.29          if ( !smfn )
    1.30          {
    1.31 +            // For heavy weight shadows: no need to update refcounts if
    1.32 +            // there's no shadow page.
    1.33 +            //
    1.34              if ( shadow_mode_refcounts(d) )
    1.35                  continue;
    1.36  
    1.37 -            // For light weight shadows, even when no shadow page exists,
    1.38 -            // we need to resync the refcounts to the new contents of the
    1.39 -            // guest page.
    1.40 -            // This only applies when we have writable page tables.
    1.41 +            // For light weight shadows: only need up resync the refcounts to
    1.42 +            // the new contents of the guest page iff this it has the right
    1.43 +            // page type.
    1.44              //
    1.45 -            if ( !shadow_mode_write_all(d) &&
    1.46 -                 !((stype == PGT_l1_shadow) &&
    1.47 -                   VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
    1.48 -                // Page is not writable -- no resync necessary
    1.49 +            if ( stype != ( pfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
    1.50                  continue;
    1.51          }
    1.52  
    1.53 @@ -1365,8 +1368,8 @@ static int resync_all(struct domain *d, 
    1.54              guest_l1_pgentry_t *snapshot1 = snapshot;
    1.55              int unshadow_l1 = 0;
    1.56  
    1.57 -            ASSERT(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) ||
    1.58 -                   shadow_mode_write_all(d));
    1.59 +            ASSERT(shadow_mode_write_l1(d) ||
    1.60 +                   shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
    1.61  
    1.62              if ( !shadow_mode_refcounts(d) )
    1.63                  revalidate_l1(d, (l1_pgentry_t *)guest1, (l1_pgentry_t *)snapshot1);
    1.64 @@ -1427,7 +1430,7 @@ static int resync_all(struct domain *d, 
    1.65              l2_pgentry_t *shadow2 = shadow;
    1.66              l2_pgentry_t *snapshot2 = snapshot;
    1.67  
    1.68 -            ASSERT(shadow_mode_write_all(d));
    1.69 +            ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
    1.70              BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
    1.71  
    1.72              changed = 0;
    1.73 @@ -1473,7 +1476,7 @@ static int resync_all(struct domain *d, 
    1.74              l2_pgentry_t *snapshot2 = snapshot;
    1.75              l1_pgentry_t *shadow2 = shadow;
    1.76  
    1.77 -            ASSERT(shadow_mode_write_all(d));
    1.78 +            ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
    1.79              BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
    1.80  
    1.81              changed = 0;
    1.82 @@ -1822,8 +1825,13 @@ static int shadow_fault_32(unsigned long
    1.83                  goto fail;
    1.84              }
    1.85          }
    1.86 -
    1.87 -        if ( !l1pte_write_fault(v, &gpte, &spte, va) )
    1.88 +        else if ( unlikely(!shadow_mode_wr_pt_pte(d) && mfn_is_page_table(l1e_get_pfn(gpte))) )
    1.89 +        {
    1.90 +            SH_LOG("l1pte_write_fault: no write access to page table page");
    1.91 +            domain_crash_synchronous();
    1.92 +        }
    1.93 +
    1.94 +        if ( unlikely(!l1pte_write_fault(v, &gpte, &spte, va)) )
    1.95          {
    1.96              SH_VVLOG("shadow_fault - EXIT: l1pte_write_fault failed");
    1.97              perfc_incrc(write_fault_bail);
    1.98 @@ -2072,6 +2080,7 @@ static void shadow_update_pagetables(str
    1.99  /************************************************************************/
   1.100  /************************************************************************/
   1.101  
   1.102 +#if 0 // this code has not been updated for 32pae & 64 bit modes
   1.103  #if SHADOW_DEBUG
   1.104  
   1.105  // The following is entirely for _check_pagetable()'s benefit.
   1.106 @@ -2118,8 +2127,8 @@ mark_shadows_as_reflecting_snapshot(stru
   1.107  // BUG: these are not SMP safe...
   1.108  static int sh_l2_present;
   1.109  static int sh_l1_present;
   1.110 -char * sh_check_name;
   1.111 -int shadow_status_noswap;
   1.112 +static char *sh_check_name;
   1.113 +// int shadow_status_noswap; // declared in shadow32.c
   1.114  
   1.115  #define v2m(_v, _adr) ({                                                     \
   1.116      unsigned long _a  = (unsigned long)(_adr);                               \
   1.117 @@ -2218,11 +2227,11 @@ static int check_pte(
   1.118  
   1.119      guest_writable =
   1.120          (l1e_get_flags(eff_guest_pte) & _PAGE_RW) ||
   1.121 -        (VM_ASSIST(d, VMASST_TYPE_writable_pagetables) && (level == 1) && mfn_out_of_sync(eff_guest_mfn));
   1.122 +        (shadow_mode_write_l1(d) && (level == 1) && mfn_out_of_sync(eff_guest_mfn));
   1.123  
   1.124      if ( (l1e_get_flags(shadow_pte) & _PAGE_RW ) && !guest_writable )
   1.125      {
   1.126 -        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08x page_table_page=%d\n",
   1.127 +        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
   1.128                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   1.129                 frame_table[eff_guest_mfn].u.inuse.type_info,
   1.130                 page_table_page);
   1.131 @@ -2233,7 +2242,7 @@ static int check_pte(
   1.132           (l1e_get_flags(shadow_pte) & _PAGE_RW ) &&
   1.133           !(guest_writable && (l1e_get_flags(eff_guest_pte) & _PAGE_DIRTY)) )
   1.134      {
   1.135 -        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08x page_table_page=%d\n",
   1.136 +        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
   1.137                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   1.138                 frame_table[eff_guest_mfn].u.inuse.type_info,
   1.139                 page_table_page);
   1.140 @@ -2393,13 +2402,12 @@ static int check_l2_table(
   1.141  }
   1.142  #undef FAILPT
   1.143  
   1.144 -static int _check_pagetable(struct vcpu *v, char *s)
   1.145 +int _check_pagetable(struct vcpu *v, char *s)
   1.146  {
   1.147      struct domain *d = v->domain;
   1.148  #if defined (__x86_64__)
   1.149      pagetable_t pt = ((v->arch.flags & TF_kernel_mode)?
   1.150 -                      pagetable_get_pfn(v->arch.guest_table) :
   1.151 -                      pagetable_get_pfn(v->arch.guest_table_user));
   1.152 +                      v->arch.guest_table : v->arch.guest_table_user);
   1.153  #else
   1.154      pagetable_t pt = v->arch.guest_table;
   1.155  #endif
   1.156 @@ -2539,6 +2547,7 @@ int _check_all_pagetables(struct vcpu *v
   1.157  }
   1.158  
   1.159  #endif // SHADOW_DEBUG
   1.160 +#endif // this code has not been updated for 32pae & 64 bit modes
   1.161  
   1.162  #if CONFIG_PAGING_LEVELS == 3
   1.163  static unsigned long shadow_l3_table(
     2.1 --- a/xen/arch/x86/shadow32.c	Tue Nov 08 12:26:48 2005 +0100
     2.2 +++ b/xen/arch/x86/shadow32.c	Tue Nov 08 13:26:50 2005 +0100
     2.3 @@ -624,6 +624,14 @@ static void free_shadow_pages(struct dom
     2.4      // under us...  First, collect the list of pinned pages, then
     2.5      // free them.
     2.6      //
     2.7 +    // FIXME: it would be good to just free all the pages referred to in
     2.8 +    // the hash table without going through each of them to decrement their
     2.9 +    // reference counts.  In shadow_mode_refcount(), we've gotta do the hard
    2.10 +    // work, but only for L1 shadows.  If we're not in refcount mode, then
    2.11 +    // there's no real hard work to do at all.  Need to be careful with the
    2.12 +    // writable_pte_predictions and snapshot entries in the hash table, but
    2.13 +    // that's about it.
    2.14 +    //
    2.15      for ( i = 0; i < shadow_ht_buckets; i++ )
    2.16      {
    2.17          u32 count;
    2.18 @@ -634,17 +642,51 @@ static void free_shadow_pages(struct dom
    2.19              continue;
    2.20  
    2.21          count = 0;
    2.22 -        for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next )
    2.23 -            if ( MFN_PINNED(x->smfn) )
    2.24 -                count++;
    2.25 +
    2.26 +        for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) {
    2.27 +	    /* Skip entries that are writable_pred) */
    2.28 +	    switch(x->gpfn_and_flags & PGT_type_mask){
    2.29 +		case PGT_l1_shadow:
    2.30 +		case PGT_l2_shadow:
    2.31 +		case PGT_l3_shadow:
    2.32 +		case PGT_l4_shadow:
    2.33 +		case PGT_hl2_shadow:
    2.34 +		    if ( MFN_PINNED(x->smfn) )
    2.35 +			count++;
    2.36 +		    break;
    2.37 +		case PGT_snapshot:
    2.38 +		case PGT_writable_pred:
    2.39 +		    break;
    2.40 +		default:
    2.41 +		    BUG();
    2.42 +
    2.43 +	    }
    2.44 +	}
    2.45 +
    2.46          if ( !count )
    2.47              continue;
    2.48  
    2.49          mfn_list = xmalloc_array(unsigned long, count);
    2.50          count = 0;
    2.51 -        for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next )
    2.52 -            if ( MFN_PINNED(x->smfn) )
    2.53 -                mfn_list[count++] = x->smfn;
    2.54 +        for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) {
    2.55 +	    /* Skip entries that are writable_pred) */
    2.56 +	    switch(x->gpfn_and_flags & PGT_type_mask){
    2.57 +		case PGT_l1_shadow:
    2.58 +		case PGT_l2_shadow:
    2.59 +		case PGT_l3_shadow:
    2.60 +		case PGT_l4_shadow:
    2.61 +		case PGT_hl2_shadow:
    2.62 +		    if ( MFN_PINNED(x->smfn) )
    2.63 +			mfn_list[count++] = x->smfn;
    2.64 +		    break;
    2.65 +		case PGT_snapshot:
    2.66 +		case PGT_writable_pred:
    2.67 +		    break;
    2.68 +		default:
    2.69 +		    BUG();
    2.70 +
    2.71 +	    }
    2.72 +	}
    2.73  
    2.74          while ( count )
    2.75          {
    2.76 @@ -779,6 +821,7 @@ set_p2m_entry(struct domain *d, unsigned
    2.77      unsigned long va = pfn << PAGE_SHIFT;
    2.78  
    2.79      ASSERT(tabpfn != 0);
    2.80 +    ASSERT(shadow_lock_is_acquired(d));
    2.81  
    2.82      l2 = map_domain_page_with_cache(tabpfn, l2cache);
    2.83      l2e = l2[l2_table_offset(va)];
    2.84 @@ -2037,7 +2080,12 @@ free_writable_pte_predictions(struct dom
    2.85          while ( count )
    2.86          {
    2.87              count--;
    2.88 +            /* delete_shadow_status() may do a shadow_audit(), so we need to
    2.89 +             * keep an accurate count of writable_pte_predictions to keep it
    2.90 +             * happy.
    2.91 +             */
    2.92              delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred);
    2.93 +            perfc_decr(writable_pte_predictions);
    2.94          }
    2.95  
    2.96          xfree(gpfn_list);
    2.97 @@ -2273,18 +2321,17 @@ static int resync_all(struct domain *d, 
    2.98  
    2.99          if ( !smfn )
   2.100          {
   2.101 +            // For heavy weight shadows: no need to update refcounts if
   2.102 +            // there's no shadow page.
   2.103 +            //
   2.104              if ( shadow_mode_refcounts(d) )
   2.105                  continue;
   2.106  
   2.107 -            // For light weight shadows, even when no shadow page exists,
   2.108 -            // we need to resync the refcounts to the new contents of the
   2.109 -            // guest page.
   2.110 -            // This only applies when we have writable page tables.
   2.111 +            // For light weight shadows: only need up resync the refcounts to
   2.112 +            // the new contents of the guest page iff this it has the right
   2.113 +            // page type.
   2.114              //
   2.115 -            if ( !shadow_mode_write_all(d) &&
   2.116 -                 !((stype == PGT_l1_shadow) &&
   2.117 -                   VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
   2.118 -                // Page is not writable -- no resync necessary
   2.119 +            if ( stype != ( pfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
   2.120                  continue;
   2.121          }
   2.122  
   2.123 @@ -2312,8 +2359,8 @@ static int resync_all(struct domain *d, 
   2.124              l1_pgentry_t *snapshot1 = snapshot;
   2.125              int unshadow_l1 = 0;
   2.126  
   2.127 -            ASSERT(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) ||
   2.128 -                   shadow_mode_write_all(d));
   2.129 +            ASSERT(shadow_mode_write_l1(d) ||
   2.130 +                   shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
   2.131  
   2.132              if ( !shadow_mode_refcounts(d) )
   2.133                  revalidate_l1(d, guest1, snapshot1);
   2.134 @@ -2380,7 +2427,7 @@ static int resync_all(struct domain *d, 
   2.135              l2_pgentry_t *shadow2 = shadow;
   2.136              l2_pgentry_t *snapshot2 = snapshot;
   2.137  
   2.138 -            ASSERT(shadow_mode_write_all(d));
   2.139 +            ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
   2.140              BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
   2.141  
   2.142              changed = 0;
   2.143 @@ -2426,7 +2473,7 @@ static int resync_all(struct domain *d, 
   2.144              l2_pgentry_t *snapshot2 = snapshot;
   2.145              l1_pgentry_t *shadow2 = shadow;
   2.146              
   2.147 -            ASSERT(shadow_mode_write_all(d));
   2.148 +            ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
   2.149              BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
   2.150  
   2.151              changed = 0;
   2.152 @@ -2619,8 +2666,13 @@ int shadow_fault(unsigned long va, struc
   2.153                  goto fail;
   2.154              }
   2.155          }
   2.156 -
   2.157 -        if ( !l1pte_write_fault(v, &gpte, &spte, va) )
   2.158 +        else if ( unlikely(!shadow_mode_wr_pt_pte(d) && mfn_is_page_table(l1e_get_pfn(gpte))) )
   2.159 +        {
   2.160 +            SH_LOG("l1pte_write_fault: no write access to page table page");
   2.161 +            domain_crash_synchronous();
   2.162 +        }
   2.163 +
   2.164 +        if ( unlikely(!l1pte_write_fault(v, &gpte, &spte, va)) )
   2.165          {
   2.166              SH_VVLOG("shadow_fault - EXIT: l1pte_write_fault failed");
   2.167              perfc_incrc(write_fault_bail);
   2.168 @@ -2954,7 +3006,7 @@ mark_shadows_as_reflecting_snapshot(stru
   2.169  // BUG: these are not SMP safe...
   2.170  static int sh_l2_present;
   2.171  static int sh_l1_present;
   2.172 -char * sh_check_name;
   2.173 +static char *sh_check_name;
   2.174  int shadow_status_noswap;
   2.175  
   2.176  #define v2m(_v, _adr) ({                                                     \
   2.177 @@ -3054,7 +3106,7 @@ static int check_pte(
   2.178  
   2.179      guest_writable =
   2.180          (l1e_get_flags(eff_guest_pte) & _PAGE_RW) ||
   2.181 -        (VM_ASSIST(d, VMASST_TYPE_writable_pagetables) && (level == 1) && mfn_out_of_sync(eff_guest_mfn));
   2.182 +        (shadow_mode_write_l1(d) && (level == 1) && mfn_out_of_sync(eff_guest_mfn));
   2.183  
   2.184      if ( (l1e_get_flags(shadow_pte) & _PAGE_RW ) && !guest_writable )
   2.185      {
     3.1 --- a/xen/arch/x86/vmx.c	Tue Nov 08 12:26:48 2005 +0100
     3.2 +++ b/xen/arch/x86/vmx.c	Tue Nov 08 13:26:50 2005 +0100
     3.3 @@ -79,7 +79,7 @@ void vmx_final_setup_guest(struct vcpu *
     3.4           * the shared 1:1 page table initially. It shouldn't hurt */
     3.5          shadow_mode_enable(v->domain,
     3.6                             SHM_enable|SHM_refcounts|
     3.7 -                           SHM_translate|SHM_external);
     3.8 +                           SHM_translate|SHM_external|SHM_wr_pt_pte);
     3.9      }
    3.10  
    3.11      vmx_switch_on = 1;
     4.1 --- a/xen/include/asm-x86/page.h	Tue Nov 08 12:26:48 2005 +0100
     4.2 +++ b/xen/include/asm-x86/page.h	Tue Nov 08 13:26:50 2005 +0100
     4.3 @@ -271,6 +271,9 @@ extern void paging_init(void);
     4.4  #define _PAGE_PAT      0x080U
     4.5  #define _PAGE_PSE      0x080U
     4.6  #define _PAGE_GLOBAL   0x100U
     4.7 +#define _PAGE_AVAIL0   0x200U
     4.8 +#define _PAGE_AVAIL1   0x400U
     4.9 +#define _PAGE_AVAIL2   0x800U
    4.10  #define _PAGE_AVAIL    0xE00U
    4.11  
    4.12  #define __PAGE_HYPERVISOR \
     5.1 --- a/xen/include/asm-x86/shadow.h	Tue Nov 08 12:26:48 2005 +0100
     5.2 +++ b/xen/include/asm-x86/shadow.h	Tue Nov 08 13:26:50 2005 +0100
     5.3 @@ -45,15 +45,21 @@
     5.4  #define SHM_write_all (1<<2) /* allow write access to all guest pt pages,
     5.5                                  regardless of pte write permissions */
     5.6  #define SHM_log_dirty (1<<3) /* enable log dirty mode */
     5.7 -#define SHM_translate (1<<4) /* do p2m tranaltion on guest tables */
     5.8 -#define SHM_external  (1<<5) /* external page table, not used by Xen */
     5.9 +#define SHM_translate (1<<4) /* Xen does p2m translation, not guest */
    5.10 +#define SHM_external  (1<<5) /* Xen does not steal address space from the
    5.11 +                                domain for its own booking; requires VT or
    5.12 +                                similar mechanisms */
    5.13 +#define SHM_wr_pt_pte (1<<6) /* guest allowed to set PAGE_RW bit in PTEs which
    5.14 +                                point to page table pages. */
    5.15  
    5.16  #define shadow_mode_enabled(_d)   ((_d)->arch.shadow_mode)
    5.17  #define shadow_mode_refcounts(_d) ((_d)->arch.shadow_mode & SHM_refcounts)
    5.18 +#define shadow_mode_write_l1(_d)  (VM_ASSIST(_d, VMASST_TYPE_writable_pagetables))
    5.19  #define shadow_mode_write_all(_d) ((_d)->arch.shadow_mode & SHM_write_all)
    5.20  #define shadow_mode_log_dirty(_d) ((_d)->arch.shadow_mode & SHM_log_dirty)
    5.21  #define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
    5.22  #define shadow_mode_external(_d)  ((_d)->arch.shadow_mode & SHM_external)
    5.23 +#define shadow_mode_wr_pt_pte(_d) ((_d)->arch.shadow_mode & SHM_wr_pt_pte)
    5.24  
    5.25  #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
    5.26  #define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
    5.27 @@ -324,8 +330,7 @@ struct out_of_sync_entry {
    5.28  
    5.29  #if SHADOW_DEBUG
    5.30  extern int shadow_status_noswap;
    5.31 -#define _SHADOW_REFLECTS_SNAPSHOT ( 9)
    5.32 -#define SHADOW_REFLECTS_SNAPSHOT  (1u << _SHADOW_REFLECTS_SNAPSHOT)
    5.33 +#define SHADOW_REFLECTS_SNAPSHOT _PAGE_AVAIL0
    5.34  #endif
    5.35  
    5.36  #ifdef VERBOSE
    5.37 @@ -1474,7 +1479,8 @@ static inline void set_shadow_status(
    5.38              if ( stype != PGT_writable_pred )
    5.39                  BUG(); // we should never replace entries into the hash table
    5.40              x->smfn = smfn;
    5.41 -            put_page(pfn_to_page(gmfn)); // already had a ref...
    5.42 +            if ( stype != PGT_writable_pred )
    5.43 +                put_page(pfn_to_page(gmfn)); // already had a ref...
    5.44              goto done;
    5.45          }
    5.46  
    5.47 @@ -1656,14 +1662,18 @@ shadow_mode_page_writable(unsigned long 
    5.48           (type == PGT_writable_page) )
    5.49          type = shadow_max_pgtable_type(d, gpfn, NULL);
    5.50  
    5.51 -    if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) &&
    5.52 -         (type == PGT_l1_page_table) &&
    5.53 -         (va < HYPERVISOR_VIRT_START) &&
    5.54 -         KERNEL_MODE(v, regs) )
    5.55 -        return 1;
    5.56 -
    5.57 -    if ( shadow_mode_write_all(d) &&
    5.58 -         type && (type <= PGT_l4_page_table) &&
    5.59 +    // Strange but true: writable page tables allow kernel-mode access
    5.60 +    // to L1 page table pages via write-protected PTEs...  Similarly, write 
    5.61 +    // access to all page table pages is granted for shadow_mode_write_all
    5.62 +    // clients.
    5.63 +    //
    5.64 +    if ( ((shadow_mode_write_l1(d) && (type == PGT_l1_page_table)) ||
    5.65 +          (shadow_mode_write_all(d) && type && (type <= PGT_l4_page_table))) &&
    5.66 +         ((va < HYPERVISOR_VIRT_START)
    5.67 +#if defined(__x86_64__)
    5.68 +          || (va >= HYPERVISOR_VIRT_END)
    5.69 +#endif
    5.70 +             ) &&
    5.71           KERNEL_MODE(v, regs) )
    5.72          return 1;
    5.73