ia64/xen-unstable

changeset 3928:e8255a227e2c

bitkeeper revision 1.1236.3.3 (421f3ac7eVdbco19D20ncC6UepUAYw)

Keep a separate shadow and "hl2" shadow of each guest L2 page.
Still doing excessive clearing of these shadows, though...

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author maf46@burn.cl.cam.ac.uk
date Fri Feb 25 14:48:39 2005 +0000 (2005-02-25)
parents 55293083a741
children 826d1823c5b4
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/include/asm-x86/shadow.h xen/include/xen/perfc_defn.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Feb 25 14:47:52 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Fri Feb 25 14:48:39 2005 +0000
     1.3 @@ -410,8 +410,8 @@ static int vmx_final_setup_guest(struct 
     1.4      }
     1.5  
     1.6      /* We don't call update_pagetables() as we actively want fields such as 
     1.7 -     * the linear_pg_table to be null so that we bail out early of 
     1.8 -     * shadow_fault in case the vmx guest tries illegal accesses with
     1.9 +     * the linear_pg_table to be inaccessible so that we bail out early of 
    1.10 +     * shadow_fault() in case the vmx guest tries illegal accesses with
    1.11       * paging turned off. 
    1.12       */
    1.13      //update_pagetables(ed);     /* this assigns shadow_pagetable */
     2.1 --- a/xen/arch/x86/mm.c	Fri Feb 25 14:47:52 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Fri Feb 25 14:48:39 2005 +0000
     2.3 @@ -1957,6 +1957,9 @@ int do_update_va_mapping(unsigned long v
     2.4               * page was not shadowed, or that the L2 entry has not yet been
     2.5               * updated to reflect the shadow.
     2.6               */
     2.7 +            if ( shadow_mode_external(current->domain) )
     2.8 +                BUG(); // can't use linear_l2_table with external tables.
     2.9 +
    2.10              l2_pgentry_t gpde = linear_l2_table[l2_table_offset(va)];
    2.11              unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
    2.12  
    2.13 @@ -2381,6 +2384,9 @@ int ptwr_do_page_fault(unsigned long add
    2.14       * Attempt to read the PTE that maps the VA being accessed. By checking for
    2.15       * PDE validity in the L2 we avoid many expensive fixups in __get_user().
    2.16       */
    2.17 +    if ( shadow_mode_external(current->domain) )
    2.18 +        BUG(); // can't use linear_l2_table with external tables.
    2.19 +
    2.20      if ( !(l2_pgentry_val(linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
    2.21             _PAGE_PRESENT) ||
    2.22           __get_user(pte, (unsigned long *)
    2.23 @@ -2417,6 +2423,9 @@ int ptwr_do_page_fault(unsigned long add
    2.24       * Is the L1 p.t. mapped into the current address space? If so we call it
    2.25       * an ACTIVE p.t., otherwise it is INACTIVE.
    2.26       */
    2.27 +    if ( shadow_mode_external(current->domain) )
    2.28 +        BUG(); // can't use linear_l2_table with external tables.
    2.29 +
    2.30      pl2e = &linear_l2_table[l2_idx];
    2.31      l2e  = l2_pgentry_val(*pl2e);
    2.32      which = PTWR_PT_INACTIVE;
     3.1 --- a/xen/arch/x86/shadow.c	Fri Feb 25 14:47:52 2005 +0000
     3.2 +++ b/xen/arch/x86/shadow.c	Fri Feb 25 14:48:39 2005 +0000
     3.3 @@ -111,6 +111,10 @@ static inline int clear_shadow_page(
     3.4      int              restart = 0;
     3.5      struct pfn_info *spage = &frame_table[x->smfn_and_flags & PSH_pfn_mask];
     3.6  
     3.7 +    // We don't clear hl2_table's here.  At least not yet.
     3.8 +    if ( x->pfn & PSH_hl2 )
     3.9 +        return 0;
    3.10 +
    3.11      switch ( spage->u.inuse.type_info & PGT_type_mask )
    3.12      {
    3.13          /* We clear L2 pages by zeroing the guest entries. */
    3.14 @@ -486,7 +490,7 @@ unsigned long shadow_l2_table(
    3.15      spfn_info->u.inuse.type_info = PGT_l2_page_table;
    3.16      perfc_incr(shadow_l2_pages);
    3.17  
    3.18 -    spfn = spfn_info - frame_table;
    3.19 +    spfn = page_to_pfn(spfn_info);
    3.20    /* Mark pfn as being shadowed; update field to point at shadow. */
    3.21      set_shadow_status(d, gpfn, spfn | PSH_shadowed);
    3.22   
    3.23 @@ -770,6 +774,41 @@ void shadow_l2_normal_pt_update(unsigned
    3.24      unmap_domain_mem(spl2e);
    3.25  }
    3.26  
    3.27 +unsigned long mk_hl2_table(struct exec_domain *ed)
    3.28 +{
    3.29 +    struct domain *d = ed->domain;
    3.30 +    unsigned long gmfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
    3.31 +    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
    3.32 +    unsigned long hl2mfn, status;
    3.33 +    struct pfn_info *hl2_info;
    3.34 +    l1_pgentry_t *hl2;
    3.35 +
    3.36 +    perfc_incr(hl2_table_pages);
    3.37 +
    3.38 +    if ( (hl2_info = alloc_shadow_page(d)) == NULL )
    3.39 +        BUG(); /* XXX Deal gracefully with failure. */
    3.40 +
    3.41 +    hl2_info->u.inuse.type_info = PGT_l1_page_table;
    3.42 +
    3.43 +    hl2mfn = page_to_pfn(hl2_info);
    3.44 +    status = hl2mfn | PSH_hl2;
    3.45 +    set_shadow_status(ed->domain, gpfn | PSH_hl2, status);
    3.46 +
    3.47 +    // need to optimize this...
    3.48 +    hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT);
    3.49 +    memset(hl2, 0, PAGE_SIZE);
    3.50 +    unmap_domain_mem(hl2);
    3.51 +
    3.52 +    // install this hl2 as the linear_pg_table
    3.53 +    if ( shadow_mode_external(d) )
    3.54 +        ed->arch.monitor_vtable[l2_table_offset(LINEAR_PT_VIRT_START)] =
    3.55 +            mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    3.56 +    else
    3.57 +        ed->arch.shadow_vtable[l2_table_offset(LINEAR_PT_VIRT_START)] =
    3.58 +            mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    3.59 +
    3.60 +    return status;
    3.61 +}
    3.62  
    3.63  
    3.64  
     4.1 --- a/xen/arch/x86/vmx.c	Fri Feb 25 14:47:52 2005 +0000
     4.2 +++ b/xen/arch/x86/vmx.c	Fri Feb 25 14:48:39 2005 +0000
     4.3 @@ -137,10 +137,17 @@ static int vmx_do_page_fault(unsigned lo
     4.4      if (mmio_space(gpa))
     4.5          handle_mmio(va, gpa);
     4.6  
     4.7 -    if ((result = shadow_fault(va, regs)))
     4.8 -        return result;
     4.9 -    
    4.10 -    return 0;       /* failed to resolve, i.e raise #PG */
    4.11 +    result = shadow_fault(va, regs);
    4.12 +
    4.13 +#if 0
    4.14 +    if ( !result )
    4.15 +    {
    4.16 +        __vmread(GUEST_EIP, &eip);
    4.17 +        printk("vmx pgfault to guest va=%p eip=%p\n", va, eip);
    4.18 +    }
    4.19 +#endif
    4.20 +
    4.21 +    return result;
    4.22  }
    4.23  
    4.24  static void vmx_do_general_protection_fault(struct xen_regs *regs) 
    4.25 @@ -273,19 +280,11 @@ static void vmx_vmexit_do_invlpg(unsigne
    4.26       * copying from guest
    4.27       */
    4.28      shadow_invlpg(ed, va);
    4.29 -    index = (va >> L2_PAGETABLE_SHIFT);
    4.30 +    index = l2_table_offset(va);
    4.31      ed->arch.hl2_vtable[index] = 
    4.32          mk_l2_pgentry(0); /* invalidate pgd cache */
    4.33  }
    4.34  
    4.35 -static inline void hl2_table_invalidate(struct exec_domain *ed)
    4.36 -{
    4.37 -    /*
    4.38 -     * Need to optimize this
    4.39 -     */
    4.40 -    memset(ed->arch.hl2_vtable, 0, PAGE_SIZE);
    4.41 -}
    4.42 -
    4.43  static void vmx_io_instruction(struct xen_regs *regs, 
    4.44                     unsigned long exit_qualification, unsigned long inst_len) 
    4.45  {
     5.1 --- a/xen/include/asm-x86/shadow.h	Fri Feb 25 14:47:52 2005 +0000
     5.2 +++ b/xen/include/asm-x86/shadow.h	Fri Feb 25 14:48:39 2005 +0000
     5.3 @@ -9,8 +9,9 @@
     5.4  #include <asm/processor.h>
     5.5  #include <asm/domain_page.h>
     5.6  
     5.7 -/* Shadow PT flag bits in pfn_info */
     5.8 +/* Shadow PT flag bits in shadow_status */
     5.9  #define PSH_shadowed    (1<<31) /* page has a shadow. PFN points to shadow */
    5.10 +#define PSH_hl2         (1<<30) /* page is an hl2 */
    5.11  #define PSH_pfn_mask    ((1<<21)-1)
    5.12  
    5.13  /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
    5.14 @@ -44,6 +45,7 @@ extern void unshadow_table(unsigned long
    5.15  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
    5.16  extern void free_shadow_state(struct domain *d);
    5.17  extern void shadow_invlpg(struct exec_domain *, unsigned long);
    5.18 +extern unsigned long mk_hl2_table(struct exec_domain *ed);
    5.19  
    5.20  extern void vmx_shadow_clear_state(struct domain *);
    5.21  
    5.22 @@ -68,7 +70,7 @@ extern unsigned long shadow_l2_table(
    5.23      struct domain *d, unsigned long gmfn);
    5.24    
    5.25  static inline void shadow_invalidate(struct exec_domain *ed) {
    5.26 -    if ( !shadow_mode_translate(ed->domain))
    5.27 +    if ( !ed->arch.arch_vmx.flags )
    5.28          BUG();
    5.29      memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
    5.30  }
    5.31 @@ -118,29 +120,27 @@ struct shadow_status {
    5.32  static inline void __shadow_get_l2e(
    5.33      struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
    5.34  {
    5.35 -    if ( likely(shadow_mode_enabled(ed->domain)) ) {
    5.36 -        if ( shadow_mode_translate(ed->domain) )
    5.37 -            *sl2e = l2_pgentry_val(
    5.38 -                ed->arch.shadow_vtable[l2_table_offset(va)]);       
    5.39 -        else 
    5.40 -            *sl2e = l2_pgentry_val(
    5.41 -                shadow_linear_l2_table[l2_table_offset(va)]);
    5.42 -    }
    5.43 -    else
    5.44 +    if ( !likely(shadow_mode_enabled(ed->domain)) )
    5.45          BUG();
    5.46 +
    5.47 +    if ( shadow_mode_translate(ed->domain) )
    5.48 +        *sl2e = l2_pgentry_val(
    5.49 +            ed->arch.shadow_vtable[l2_table_offset(va)]);       
    5.50 +    else 
    5.51 +        *sl2e = l2_pgentry_val(
    5.52 +            shadow_linear_l2_table[l2_table_offset(va)]);
    5.53  }
    5.54  
    5.55  static inline void __shadow_set_l2e(
    5.56      struct exec_domain *ed, unsigned long va, unsigned long value)
    5.57  {
    5.58 -    if ( likely(shadow_mode_enabled(ed->domain)) ) {
    5.59 -        if ( shadow_mode_translate(ed->domain) ) 
    5.60 -            ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.61 -        else 
    5.62 -            shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.63 -    }
    5.64 -    else
    5.65 +    if ( !likely(shadow_mode_enabled(ed->domain)) )
    5.66          BUG();
    5.67 +
    5.68 +    if ( shadow_mode_translate(ed->domain) ) 
    5.69 +        ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.70 +    else 
    5.71 +        shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.72  }
    5.73  
    5.74  static inline void __guest_get_l2e(
    5.75 @@ -347,8 +347,14 @@ static void shadow_audit(struct domain *
    5.76      for ( j = 0; j < shadow_ht_buckets; j++ )
    5.77      {
    5.78          a = &d->arch.shadow_ht[j];        
    5.79 -        if ( a->pfn ) { live++; ASSERT(a->smfn_and_flags & PSH_pfn_mask); }
    5.80 -        ASSERT(a->pfn < 0x00100000UL);
    5.81 +        if ( a->pfn )
    5.82 +        {
    5.83 +            live++;
    5.84 +            ASSERT(a->smfn_and_flags & PSH_pfn_mask);
    5.85 +        }
    5.86 +        else
    5.87 +            ASSERT(!a->next);
    5.88 +        ASSERT( (a->pfn & ~PSH_hl2) < 0x00100000UL);
    5.89          a = a->next;
    5.90          while ( a && (live < 9999) )
    5.91          { 
    5.92 @@ -359,7 +365,7 @@ static void shadow_audit(struct domain *
    5.93                         live, a->pfn, a->smfn_and_flags, a->next);
    5.94                  BUG();
    5.95              }
    5.96 -            ASSERT(a->pfn < 0x00100000UL);
    5.97 +            ASSERT( (a->pfn & ~PSH_hl2) < 0x00100000UL);
    5.98              ASSERT(a->smfn_and_flags & PSH_pfn_mask);
    5.99              a = a->next; 
   5.100          }
   5.101 @@ -369,15 +375,22 @@ static void shadow_audit(struct domain *
   5.102      for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
   5.103          free++; 
   5.104  
   5.105 -    if ( print)
   5.106 +    if ( print )
   5.107          printk("Xlive=%d free=%d\n",live,free);
   5.108  
   5.109 -    abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
   5.110 +    // BUG: this only works if there's only a single domain which is
   5.111 +    //      using shadow tables.
   5.112 +    //
   5.113 +    abs = ( perfc_value(shadow_l1_pages) +
   5.114 +            perfc_value(shadow_l2_pages) +
   5.115 +            perfc_value(hl2_table_pages) ) - live;
   5.116  #ifdef PERF_COUNTERS
   5.117      if ( (abs < -1) || (abs > 1) )
   5.118      {
   5.119 -        printk("live=%d free=%d l1=%d l2=%d\n",live,free,
   5.120 -               perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
   5.121 +        printk("live=%d free=%d l1=%d l2=%d hl2=%d\n", live, free,
   5.122 +               perfc_value(shadow_l1_pages),
   5.123 +               perfc_value(shadow_l2_pages),
   5.124 +               perfc_value(hl2_table_pages));
   5.125          BUG();
   5.126      }
   5.127  #endif
   5.128 @@ -405,6 +418,8 @@ static inline unsigned long __shadow_sta
   5.129  {
   5.130      struct shadow_status *p, *x, *head;
   5.131  
   5.132 +    ASSERT(spin_is_locked(&d->arch.shadow_lock));
   5.133 +
   5.134      x = head = hash_bucket(d, gpfn);
   5.135      p = NULL;
   5.136  
   5.137 @@ -570,7 +585,7 @@ static inline void set_shadow_status(
   5.138  
   5.139      ASSERT(spin_is_locked(&d->arch.shadow_lock));
   5.140      ASSERT(gpfn != 0);
   5.141 -    ASSERT(s & PSH_shadowed);
   5.142 +    ASSERT(s & (PSH_shadowed | PSH_hl2));
   5.143  
   5.144      x = head = hash_bucket(d, gpfn);
   5.145     
   5.146 @@ -658,7 +673,7 @@ static inline unsigned long gva_to_gpte(
   5.147      if (!(gpde & _PAGE_PRESENT))
   5.148          return 0;
   5.149  
   5.150 -    index = (gva >> L2_PAGETABLE_SHIFT);
   5.151 +    index = l2_table_offset(gva);
   5.152  
   5.153      if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
   5.154          pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
   5.155 @@ -684,6 +699,14 @@ static inline unsigned long gva_to_gpa(u
   5.156      return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
   5.157  }
   5.158  
   5.159 +static inline void hl2_table_invalidate(struct exec_domain *ed)
   5.160 +{
   5.161 +    /*
   5.162 +     * Need to optimize this
   5.163 +     */
   5.164 +    memset(ed->arch.hl2_vtable, 0, PAGE_SIZE);
   5.165 +}
   5.166 +
   5.167  static inline void __update_pagetables(struct exec_domain *ed)
   5.168  {
   5.169      struct domain *d = ed->domain;
   5.170 @@ -698,63 +721,83 @@ static inline void __update_pagetables(s
   5.171  
   5.172      ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
   5.173  
   5.174 -    if  ( shadow_mode_translate(ed->domain) )
   5.175 +    if ( shadow_mode_translate(d) )
   5.176      {
   5.177 +        l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
   5.178          l2_pgentry_t *gpl2e, *spl2e;
   5.179 +        unsigned long hl2_status, hl2mfn, offset;
   5.180 +        int need_flush = 0;
   5.181  
   5.182          if ( ed->arch.guest_vtable )
   5.183              unmap_domain_mem(ed->arch.guest_vtable);
   5.184          if ( ed->arch.shadow_vtable )
   5.185              unmap_domain_mem(ed->arch.shadow_vtable);
   5.186 +        if ( ed->arch.hl2_vtable )
   5.187 +            unmap_domain_mem(ed->arch.hl2_vtable);
   5.188  
   5.189          gpl2e = ed->arch.guest_vtable =
   5.190              map_domain_mem(pagetable_val(ed->arch.guest_table));
   5.191          spl2e = ed->arch.shadow_vtable =
   5.192              map_domain_mem(pagetable_val(ed->arch.shadow_table));
   5.193  
   5.194 -        if ( shadow_mode_external(ed->domain ) )
   5.195 +        hl2_status = __shadow_status(d, gpfn | PSH_hl2);
   5.196 +        if ( unlikely(!(hl2_status & PSH_hl2)) )
   5.197 +            hl2_status = mk_hl2_table(ed);
   5.198 +
   5.199 +        hl2mfn = hl2_status & PSH_pfn_mask;
   5.200 +        ed->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
   5.201 +
   5.202 +        offset = l2_table_offset(LINEAR_PT_VIRT_START);
   5.203 +        if ( hl2mfn != (l2_pgentry_val(mpl2e[offset]) >> PAGE_SHIFT) )
   5.204          {
   5.205 -            l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
   5.206 -            unsigned long old_smfn;
   5.207 -            unsigned sh_l2offset = l2_table_offset(SH_LINEAR_PT_VIRT_START);
   5.208 -            
   5.209 -            old_smfn = l2_pgentry_val(mpl2e[sh_l2offset]) >> PAGE_SHIFT;
   5.210 -            if ( old_smfn != smfn )
   5.211 +            mpl2e[offset] =
   5.212 +                mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.213 +            need_flush = 1;
   5.214 +        }
   5.215 +
   5.216 +        if ( shadow_mode_external(d ) )
   5.217 +        {
   5.218 +            offset = l2_table_offset(SH_LINEAR_PT_VIRT_START);
   5.219 +            if ( smfn != (l2_pgentry_val(mpl2e[offset]) >> PAGE_SHIFT) )
   5.220              {
   5.221 -                mpl2e[sh_l2offset] =
   5.222 +                mpl2e[offset] =
   5.223                      mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.224 -                local_flush_tlb();
   5.225 +                need_flush = 1;
   5.226              }
   5.227          }
   5.228  
   5.229          if ( ed->arch.arch_vmx.flags )
   5.230          {
   5.231              // Why is VMX mode doing this?
   5.232 -            memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
   5.233 +            shadow_invalidate(ed);
   5.234 +            hl2_table_invalidate(ed);
   5.235          }
   5.236 +
   5.237 +        if ( need_flush )
   5.238 +            local_flush_tlb();
   5.239      }
   5.240  }
   5.241  
   5.242  static inline void update_pagetables(struct exec_domain *ed)
   5.243  {
   5.244 -     if ( unlikely(shadow_mode_enabled(ed->domain)) )
   5.245 -     {
   5.246 -         shadow_lock(ed->domain);
   5.247 -         __update_pagetables(ed);
   5.248 -         shadow_unlock(ed->domain);
   5.249 -     }
   5.250 -     if ( !shadow_mode_external(ed->domain) )
   5.251 -     {
   5.252 +    if ( unlikely(shadow_mode_enabled(ed->domain)) )
   5.253 +    {
   5.254 +        shadow_lock(ed->domain);
   5.255 +        __update_pagetables(ed);
   5.256 +        shadow_unlock(ed->domain);
   5.257 +    }
   5.258 +    if ( !shadow_mode_external(ed->domain) )
   5.259 +    {
   5.260  #ifdef __x86_64__
   5.261 -         if ( !(ed->arch.flags & TF_kernel_mode) )
   5.262 -             ed->arch.monitor_table = ed->arch.guest_table_user;
   5.263 -         else
   5.264 +        if ( !(ed->arch.flags & TF_kernel_mode) )
   5.265 +            ed->arch.monitor_table = ed->arch.guest_table_user;
   5.266 +        else
   5.267  #endif
   5.268 -         if ( shadow_mode_enabled(ed->domain) )
   5.269 -             ed->arch.monitor_table = ed->arch.shadow_table;
   5.270 -         else
   5.271 -             ed->arch.monitor_table = ed->arch.guest_table;
   5.272 -     }
   5.273 +        if ( shadow_mode_enabled(ed->domain) )
   5.274 +            ed->arch.monitor_table = ed->arch.shadow_table;
   5.275 +        else
   5.276 +            ed->arch.monitor_table = ed->arch.guest_table;
   5.277 +    }
   5.278  }
   5.279  
   5.280  #if SHADOW_DEBUG
     6.1 --- a/xen/include/xen/perfc_defn.h	Fri Feb 25 14:47:52 2005 +0000
     6.2 +++ b/xen/include/xen/perfc_defn.h	Fri Feb 25 14:48:39 2005 +0000
     6.3 @@ -31,6 +31,7 @@ PERFCOUNTER_CPU( shadow_update_va_fail2,
     6.4  /* STATUS counters do not reset when 'P' is hit */
     6.5  PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
     6.6  PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
     6.7 +PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
     6.8  
     6.9  PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
    6.10  PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )