ia64/xen-unstable

changeset 4315:288deb913f46

bitkeeper revision 1.1265 (42435d13hIiIzrasNZHbz13uy4ZTKg)

First attempt at cleanup after merge of shadow code with unstable.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Fri Mar 25 00:36:35 2005 +0000 (2005-03-25)
parents a01199a95070
children 571783a694e5
files xen/arch/x86/audit.c xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/include/asm-x86/domain.h xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/xen/perfc_defn.h
line diff
     1.1 --- a/xen/arch/x86/audit.c	Thu Mar 24 22:52:13 2005 +0000
     1.2 +++ b/xen/arch/x86/audit.c	Fri Mar 25 00:36:35 2005 +0000
     1.3 @@ -431,8 +431,7 @@ int audit_adjust_pgtables(struct domain 
     1.4              mfn = page_to_pfn(page);
     1.5              page_type = page->u.inuse.type_info & PGT_type_mask;
     1.6  
     1.7 -            if ( page_get_owner(page) != d )
     1.8 -                BUG();
     1.9 +            BUG_ON(page_get_owner(page) != d);
    1.10  
    1.11              page_count++;
    1.12  
    1.13 @@ -563,6 +562,31 @@ int audit_adjust_pgtables(struct domain 
    1.14  
    1.15  #ifndef NDEBUG
    1.16  
    1.17 +void audit_pagelist(struct domain *d)
    1.18 +{
    1.19 +    struct list_head *list_ent;
    1.20 +    int xenpages, totpages;
    1.21 +
    1.22 +    list_ent = d->xenpage_list.next;
    1.23 +    for ( xenpages = 0; (list_ent != &d->xenpage_list); xenpages++ )
    1.24 +    {
    1.25 +        list_ent = list_ent->next;
    1.26 +    }
    1.27 +    list_ent = d->page_list.next;
    1.28 +    for ( totpages = 0; (list_ent != &d->page_list); totpages++ )
    1.29 +    {
    1.30 +        list_ent = list_ent->next;
    1.31 +    }
    1.32 +
    1.33 +    if ( xenpages != d->xenheap_pages ||
    1.34 +         totpages != d->tot_pages )
    1.35 +    {
    1.36 +        printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n",
    1.37 +               xenpages, d->xenheap_pages, 
    1.38 +               totpages, d->tot_pages );
    1.39 +    }
    1.40 +}
    1.41 +
    1.42  void _audit_domain(struct domain *d, int flags)
    1.43  {
    1.44      void scan_for_pfn_in_mfn(struct domain *d, unsigned long xmfn,
    1.45 @@ -668,6 +692,8 @@ void _audit_domain(struct domain *d, int
    1.46  
    1.47      spin_lock(&d->page_alloc_lock);
    1.48  
    1.49 +    audit_pagelist(d);
    1.50 +
    1.51      /* PHASE 0 */
    1.52  
    1.53      list_ent = d->page_list.next;
    1.54 @@ -679,8 +705,7 @@ void _audit_domain(struct domain *d, int
    1.55          mfn = page_to_pfn(page);
    1.56          page_type = page->u.inuse.type_info & PGT_type_mask;
    1.57  
    1.58 -        if ( page_get_owner(page) != d )
    1.59 -            BUG();
    1.60 +        BUG_ON(page_get_owner(page) != d);
    1.61  
    1.62          if ( (page->u.inuse.type_info & PGT_count_mask) >
    1.63               (page->count_info & PGC_count_mask) )
     2.1 --- a/xen/arch/x86/domain.c	Thu Mar 24 22:52:13 2005 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Fri Mar 25 00:36:35 2005 +0000
     2.3 @@ -256,6 +256,9 @@ void arch_do_createdomain(struct exec_do
     2.4          ed->arch.shadow_vtable = __shadow_linear_l2_table;
     2.5  
     2.6  #ifdef __x86_64__
     2.7 +        ed->arch.guest_vl3table = __linear_l3_table;
     2.8 +        ed->arch.guest_vl4table = __linear_l4_table;
     2.9 +
    2.10          d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
    2.11          memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
    2.12          d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
     3.1 --- a/xen/arch/x86/mm.c	Thu Mar 24 22:52:13 2005 +0000
     3.2 +++ b/xen/arch/x86/mm.c	Fri Mar 25 00:36:35 2005 +0000
     3.3 @@ -1988,48 +1988,58 @@ int do_mmu_update(
     3.4      return rc;
     3.5  }
     3.6  
     3.7 -void update_shadow_va_mapping(unsigned long va,
     3.8 -                              unsigned long val,
     3.9 -                              struct exec_domain *ed,
    3.10 -                              struct domain *d)
    3.11 +/* This function assumes the caller is holding the domain's BIGLOCK
    3.12 + * and is running in a shadow mode
    3.13 + */
    3.14 +int update_shadow_va_mapping(unsigned long va,
    3.15 +                             unsigned long val,
    3.16 +                             struct exec_domain *ed,
    3.17 +                             struct domain *d)
    3.18  {
    3.19 -    /* This function assumes the caller is holding the domain's BIGLOCK
    3.20 -     * and is running in a shadow mode
    3.21 +    unsigned long l1mfn;
    3.22 +    unsigned long spte;
    3.23 +    int rc = 0;
    3.24 +
    3.25 +    check_pagetable(ed, "pre-va"); /* debug */
    3.26 +    shadow_lock(d);
    3.27 +        
    3.28 +    // This is actually overkill - we don't need to sync the L1 itself,
    3.29 +    // just everything involved in getting to this L1 (i.e. we need
    3.30 +    // linear_pg_table[l1_linear_offset(va)] to be in sync)...
    3.31 +    //
    3.32 +    __shadow_sync_va(ed, va);
    3.33 +
    3.34 +#if 1 /* keep check_pagetables() happy */
    3.35 +    /*
    3.36 +     * However, the above doesn't guarantee that there's no snapshot of
    3.37 +     * the L1 table in question; it just says that the relevant L2 and L1
    3.38 +     * entries for VA are in-sync.  There might still be a snapshot.
    3.39 +     *
    3.40 +     * The checking code in _check_pagetables() assumes that no one will
    3.41 +     * mutate the shadow of a page that has a snapshot.  It's actually
    3.42 +     * OK to not sync this page, but it seems simpler to:
    3.43 +     * 1) keep all code paths the same, and
    3.44 +     * 2) maintain the invariant for _check_pagetables(), rather than try
    3.45 +     *    to teach it about this boundary case.
    3.46 +     * So we flush this L1 page, if it's out of sync.
    3.47       */
    3.48 -
    3.49 -    unsigned long   sval = 0;
    3.50 -
    3.51 -    l1pte_propagate_from_guest(d, &val, &sval);
    3.52 -
    3.53 -    if ( unlikely(__put_user(sval, ((unsigned long *)(
    3.54 -        &shadow_linear_pg_table[l1_linear_offset(va)])))) )
    3.55 +    l1mfn = (l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]) >>
    3.56 +             PAGE_SHIFT);
    3.57 +    if ( mfn_out_of_sync(l1mfn) )
    3.58      {
    3.59 -        /*
    3.60 -         * Since L2's are guaranteed RW, failure indicates either that the
    3.61 -         * page was not shadowed, or that the L2 entry has not yet been
    3.62 -         * updated to reflect the shadow.
    3.63 -         */
    3.64 -
    3.65 -        /* Can't use linear_l2_table with external tables. */
    3.66 -        BUG_ON(shadow_mode_external(current->domain));
    3.67 -
    3.68 -        l2_pgentry_t gpde = linear_l2_table[l2_table_offset(va)];
    3.69 -        unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
    3.70 -
    3.71 -        if (get_shadow_status(d, gpfn))
    3.72 -        {
    3.73 -            unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
    3.74 -            unsigned long *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
    3.75 -            unsigned l1_idx = l1_table_offset(va);
    3.76 -            gl1e[l1_idx] = sval;
    3.77 -            unmap_domain_mem(gl1e);
    3.78 -            put_shadow_status(d);
    3.79 -
    3.80 -            perfc_incrc(shadow_update_va_fail1);
    3.81 -        }
    3.82 -        else
    3.83 -            perfc_incrc(shadow_update_va_fail2);
    3.84 +        perfc_incrc(extra_va_update_sync);
    3.85 +        __shadow_sync_mfn(d, l1mfn);
    3.86      }
    3.87 +#endif /* keep check_pagetables() happy */
    3.88 +
    3.89 +    if ( unlikely(__put_user(val, &l1_pgentry_val(
    3.90 +                                 linear_pg_table[l1_linear_offset(va)]))) )
    3.91 +        return -EINVAL;
    3.92 +
    3.93 +    // also need to update the shadow
    3.94 +
    3.95 +    l1pte_propagate_from_guest(d, val, &spte);
    3.96 +    shadow_set_l1e(va, spte, 0);
    3.97  
    3.98      /*
    3.99       * If we're in log-dirty mode then we need to note that we've updated
   3.100 @@ -2037,9 +2047,12 @@ void update_shadow_va_mapping(unsigned l
   3.101       * for this.
   3.102       */
   3.103      if ( shadow_mode_log_dirty(d) )
   3.104 -        mark_dirty(d, va_to_l1mfn(va));
   3.105 -
   3.106 -    check_pagetable(d, ed->arch.guest_table, "va"); /* debug */
   3.107 +        mark_dirty(d, va_to_l1mfn(ed, va));
   3.108 +
   3.109 +    shadow_unlock(d);
   3.110 +    check_pagetable(ed, "post-va"); /* debug */
   3.111 +
   3.112 +    return rc;
   3.113  }
   3.114  
   3.115  int update_grant_va_mapping(unsigned long va,
   3.116 @@ -2104,8 +2117,21 @@ int do_update_va_mapping(unsigned long v
   3.117       * XXX When we make this support 4MB superpages we should also deal with 
   3.118       * the case of updating L2 entries.
   3.119       */
   3.120 -    if ( unlikely(!shadow_mode_enabled(d)) )
   3.121 +    if ( unlikely(shadow_mode_enabled(d)) )
   3.122 +    {
   3.123 +        if ( unlikely(percpu_info[cpu].foreign &&
   3.124 +                      (shadow_mode_translate(d) ||
   3.125 +                       shadow_mode_translate(percpu_info[cpu].foreign))) )
   3.126 +        {
   3.127 +            // The foreign domain's pfn's are in a different namespace.
   3.128 +            // There's not enough information in just a gpte to figure out
   3.129 +            // how to (re-)shadow this entry.
   3.130 +            //
   3.131 +            domain_crash();
   3.132 +        }
   3.133 +    
   3.134          rc = update_shadow_va_mapping(va, val, ed, d);
   3.135 +    }
   3.136      else if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
   3.137                                       mk_l1_pgentry(val))) )
   3.138          rc = -EINVAL;
   3.139 @@ -2484,7 +2510,7 @@ void ptwr_flush(const int which)
   3.140  
   3.141      if ( which == PTWR_PT_ACTIVE )
   3.142      {
   3.143 -        pl2e = &linear_l2_table(ed)[ptwr_info[cpu].ptinfo[which].l2_idx];
   3.144 +        pl2e = &__linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
   3.145          *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 
   3.146      }
   3.147  
   3.148 @@ -2502,9 +2528,9 @@ static int ptwr_emulated_update(
   3.149      unsigned int bytes,
   3.150      unsigned int do_cmpxchg)
   3.151  {
   3.152 -    unsigned long sstat, pte, pfn;
   3.153 +    unsigned long pte, pfn;
   3.154      struct pfn_info *page;
   3.155 -    l1_pgentry_t ol1e, nl1e, *pl1e, *sl1e;
   3.156 +    l1_pgentry_t ol1e, nl1e, *pl1e;
   3.157      struct domain *d = current->domain;
   3.158  
   3.159      /* Aligned access only, thank you. */
   3.160 @@ -2581,6 +2607,8 @@ static int ptwr_emulated_update(
   3.161      /* Propagate update to shadow cache. */
   3.162      if ( unlikely(shadow_mode_enabled(d)) )
   3.163      {
   3.164 +        BUG(); // XXX fix me...
   3.165 +#if 0
   3.166          sstat = get_shadow_status(d, page_to_pfn(page));
   3.167          if ( sstat & PSH_shadowed )
   3.168          {
   3.169 @@ -2590,6 +2618,7 @@ static int ptwr_emulated_update(
   3.170                  d, &l1_pgentry_val(nl1e), &l1_pgentry_val(*sl1e));
   3.171              unmap_domain_mem(sl1e);
   3.172          }
   3.173 +#endif
   3.174      }
   3.175  
   3.176      /* Finally, drop the old PTE. */
   3.177 @@ -2636,14 +2665,11 @@ int ptwr_do_page_fault(unsigned long add
   3.178      // not supported in combination with various shadow modes!
   3.179      ASSERT( !shadow_mode_enabled(ed->domain) );
   3.180  
   3.181 -    /* Can't use linear_l2_table with external tables. */
   3.182 -    BUG_ON(shadow_mode_external(ed->domain));
   3.183 -
   3.184      /*
   3.185       * Attempt to read the PTE that maps the VA being accessed. By checking for
   3.186       * PDE validity in the L2 we avoid many expensive fixups in __get_user().
   3.187       */
   3.188 -    if ( !(l2_pgentry_val(linear_l2_table(ed)[addr>>L2_PAGETABLE_SHIFT]) &
   3.189 +    if ( !(l2_pgentry_val(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
   3.190             _PAGE_PRESENT) ||
   3.191           __get_user(pte, (unsigned long *)
   3.192                      &linear_pg_table[l1_linear_offset(addr)]) )
   3.193 @@ -2684,7 +2710,7 @@ int ptwr_do_page_fault(unsigned long add
   3.194       * Is the L1 p.t. mapped into the current address space? If so we call it
   3.195       * an ACTIVE p.t., otherwise it is INACTIVE.
   3.196       */
   3.197 -    pl2e = &linear_l2_table(ed)[l2_idx];
   3.198 +    pl2e = &__linear_l2_table[l2_idx];
   3.199      l2e  = l2_pgentry_val(*pl2e);
   3.200      which = PTWR_PT_INACTIVE;
   3.201      if ( (l2e >> PAGE_SHIFT) == pfn )
   3.202 @@ -2824,31 +2850,6 @@ void ptwr_status(void)
   3.203      page = &frame_table[pfn];
   3.204  }
   3.205  
   3.206 -void audit_pagelist(struct domain *d)
   3.207 -{
   3.208 -    struct list_head *list_ent;
   3.209 -    int xenpages, totpages;
   3.210 -
   3.211 -    list_ent = d->xenpage_list.next;
   3.212 -    for ( xenpages = 0; (list_ent != &d->xenpage_list); xenpages++ )
   3.213 -    {
   3.214 -        list_ent = list_ent->next;
   3.215 -    }
   3.216 -    list_ent = d->page_list.next;
   3.217 -    for ( totpages = 0; (list_ent != &d->page_list); totpages++ )
   3.218 -    {
   3.219 -        list_ent = list_ent->next;
   3.220 -    }
   3.221 -
   3.222 -    if ( xenpages != d->xenheap_pages ||
   3.223 -         totpages != d->tot_pages )
   3.224 -    {
   3.225 -        printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n",
   3.226 -               xenpages, d->xenheap_pages, 
   3.227 -               totpages, d->tot_pages );
   3.228 -    }
   3.229 -}
   3.230 -
   3.231  #endif /* NDEBUG */
   3.232  
   3.233  /*
     4.1 --- a/xen/arch/x86/shadow.c	Thu Mar 24 22:52:13 2005 +0000
     4.2 +++ b/xen/arch/x86/shadow.c	Fri Mar 25 00:36:35 2005 +0000
     4.3 @@ -1245,11 +1245,11 @@ void vmx_shadow_clear_state(struct domai
     4.4  }
     4.5  
     4.6  unsigned long
     4.7 -gpfn_to_mfn_safe(struct domain *d, unsigned long gpfn)
     4.8 +translate_gpfn_to_mfn(struct domain *d, unsigned long gpfn)
     4.9  {
    4.10      ASSERT( shadow_mode_translate(d) );
    4.11  
    4.12 -    perfc_incrc(gpfn_to_mfn_safe);
    4.13 +    perfc_incrc(translate_gpfn_to_mfn);
    4.14  
    4.15      unsigned long va = gpfn << PAGE_SHIFT;
    4.16      unsigned long phystab = pagetable_val(d->arch.phys_table);
    4.17 @@ -1258,7 +1258,7 @@ gpfn_to_mfn_safe(struct domain *d, unsig
    4.18      unmap_domain_mem(l2);
    4.19      if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
    4.20      {
    4.21 -        printk("gpfn_to_mfn_safe(d->id=%d, gpfn=%p) => 0 l2e=%p\n",
    4.22 +        printk("translate_gpfn_to_mfn(d->id=%d, gpfn=%p) => 0 l2e=%p\n",
    4.23                 d->id, gpfn, l2_pgentry_val(l2e));
    4.24          return INVALID_MFN;
    4.25      }
    4.26 @@ -1267,12 +1267,14 @@ gpfn_to_mfn_safe(struct domain *d, unsig
    4.27      l1_pgentry_t l1e = l1[l1_table_offset(va)];
    4.28      unmap_domain_mem(l1);
    4.29  
    4.30 -    printk("gpfn_to_mfn_safe(d->id=%d, gpfn=%p) => %p phystab=%p l2e=%p l1tab=%p, l1e=%p\n",
    4.31 +#if 0
    4.32 +    printk("translate_gpfn_to_mfn(d->id=%d, gpfn=%p) => %p phystab=%p l2e=%p l1tab=%p, l1e=%p\n",
    4.33             d->id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
    4.34 +#endif
    4.35  
    4.36      if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) )
    4.37      {
    4.38 -        printk("gpfn_to_mfn_safe(d->id=%d, gpfn=%p) => 0 l1e=%p\n",
    4.39 +        printk("translate_gpfn_to_mfn(d->id=%d, gpfn=%p) => 0 l1e=%p\n",
    4.40                 d->id, gpfn, l1_pgentry_val(l1e));
    4.41          return INVALID_MFN;
    4.42      }
     5.1 --- a/xen/include/asm-x86/domain.h	Thu Mar 24 22:52:13 2005 +0000
     5.2 +++ b/xen/include/asm-x86/domain.h	Fri Mar 25 00:36:35 2005 +0000
     5.3 @@ -127,6 +127,11 @@ struct arch_exec_domain
     5.4      l2_pgentry_t *monitor_vtable;		/* virtual address of monitor_table */
     5.5      l1_pgentry_t *hl2_vtable;			/* virtual address of hl2_table */
     5.6  
     5.7 +#ifdef __x86_64__
     5.8 +    l3_pgentry_t *guest_vl3table;
     5.9 +    l4_pgentry_t *guest_vl4table;
    5.10 +#endif
    5.11 +
    5.12      unsigned long monitor_shadow_ref;
    5.13  
    5.14      /* Virtual CR2 value. Can be read/written by guest. */
     6.1 --- a/xen/include/asm-x86/mm.h	Thu Mar 24 22:52:13 2005 +0000
     6.2 +++ b/xen/include/asm-x86/mm.h	Fri Mar 25 00:36:35 2005 +0000
     6.3 @@ -4,6 +4,7 @@
     6.4  
     6.5  #include <xen/config.h>
     6.6  #include <xen/list.h>
     6.7 +#include <xen/sched.h>
     6.8  #include <asm/io.h>
     6.9  
    6.10  /*
    6.11 @@ -180,12 +181,13 @@ static inline int get_page(struct pfn_in
    6.12               unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
    6.13               unlikely(d != _domain) )                /* Wrong owner? */
    6.14          {
    6.15 -          if ( !domain->arch.shadow_mode )
    6.16 -            DPRINTK("Error pfn %p: rd=%p(%d), od=%p(%d), caf=%08x, taf=%08x\n",
    6.17 -                    page_to_pfn(page), domain, (domain ? domain->id : -1),
    6.18 -                    page_get_owner(page),
    6.19 -                    (page_get_owner(page) ? page_get_owner(page)->id : -1),
    6.20 -                    x, page->u.inuse.type_info);
    6.21 +            if ( !domain->arch.shadow_mode )
    6.22 +                DPRINTK("Error pfn %p: rd=%p(%d), od=%p(%d), caf=%08x, "
    6.23 +                        "taf=%08x\n",
    6.24 +                        page_to_pfn(page), domain, (domain ? domain->id : -1),
    6.25 +                        page_get_owner(page),
    6.26 +                        (page_get_owner(page) ? page_get_owner(page)->id : -1),
    6.27 +                        x, page->u.inuse.type_info);
    6.28              return 0;
    6.29          }
    6.30          __asm__ __volatile__(
     7.1 --- a/xen/include/asm-x86/shadow.h	Thu Mar 24 22:52:13 2005 +0000
     7.2 +++ b/xen/include/asm-x86/shadow.h	Fri Mar 25 00:36:35 2005 +0000
     7.3 @@ -195,9 +195,7 @@ static inline void shadow_mode_disable(s
     7.4        ? translate_gpfn_to_mfn(_d, gpfn)                \
     7.5        : (gpfn) )
     7.6  
     7.7 -#define translate_gpfn_to_mfn gpfn_to_mfn_safe
     7.8 -
     7.9 -extern unsigned long gpfn_to_mfn_safe(
    7.10 +extern unsigned long translate_gpfn_to_mfn(
    7.11      struct domain *d, unsigned long gpfn);
    7.12  
    7.13  /************************************************************************/
    7.14 @@ -659,12 +657,11 @@ static inline void hl2e_propagate_from_g
    7.15          if ( unlikely((current->domain != d) && !shadow_mode_external(d)) )
    7.16          {
    7.17              // Can't use __gpfn_to_mfn() if we don't have one of this domain's
    7.18 -            // page tables currently installed.  What a pain in the neck!
    7.19 -            //
    7.20 +            // page tables currently installed.
    7.21              // This isn't common -- it only happens during shadow mode setup
    7.22              // and mode changes.
    7.23              //
    7.24 -            mfn = gpfn_to_mfn_safe(d, pfn);
    7.25 +            mfn = translate_gpfn_to_mfn(d, pfn);
    7.26          }
    7.27          else
    7.28              mfn = __gpfn_to_mfn(d, pfn);
     8.1 --- a/xen/include/xen/perfc_defn.h	Thu Mar 24 22:52:13 2005 +0000
     8.2 +++ b/xen/include/xen/perfc_defn.h	Fri Mar 25 00:36:35 2005 +0000
     8.3 @@ -98,7 +98,7 @@ PERFCOUNTER_CPU(shadow_get_page_fail,   
     8.4  PERFCOUNTER_CPU(validate_hl2e_calls,               "calls to validate_hl2e_change")
     8.5  PERFCOUNTER_CPU(validate_hl2e_changes,             "validate_hl2e makes changes")
     8.6  PERFCOUNTER_CPU(exception_fixed,                   "pre-exception fixed")
     8.7 -PERFCOUNTER_CPU(gpfn_to_mfn_safe,                  "calls to gpfn_to_mfn_safe")
     8.8 +PERFCOUNTER_CPU(translate_gpfn_to_mfn,             "calls to translate_gpfn_to_mfn")
     8.9  PERFCOUNTER_CPU(remove_write_access,               "calls to remove_write_access")
    8.10  PERFCOUNTER_CPU(remove_write_access_easy,          "easy outs of remove_write_access")
    8.11  PERFCOUNTER_CPU(remove_write_no_work,              "no work in remove_write_access")