ia64/xen-unstable

changeset 17620:810d8c3ac992

Clean up shadow code after the removal of non-PAE 32-bit builds

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 08 16:58:33 2008 +0100 (2008-05-08)
parents c99a88623eda
children 14d362d5fa59
files xen/arch/x86/mm/p2m.c xen/arch/x86/mm/shadow/Makefile xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/multi.h xen/arch/x86/mm/shadow/private.h xen/arch/x86/mm/shadow/types.h xen/include/asm-x86/mtrr.h
line diff
     1.1 --- a/xen/arch/x86/mm/p2m.c	Thu May 08 14:33:31 2008 +0100
     1.2 +++ b/xen/arch/x86/mm/p2m.c	Thu May 08 16:58:33 2008 +0100
     1.3 @@ -220,7 +220,6 @@ p2m_set_entry(struct domain *d, unsigned
     1.4                           L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
     1.5          goto out;
     1.6  #endif
     1.7 -#if CONFIG_PAGING_LEVELS >= 3
     1.8      /*
     1.9       * When using PAE Xen, we only allow 33 bits of pseudo-physical
    1.10       * address in translated guests (i.e. 8 GBytes).  This restriction
    1.11 @@ -235,7 +234,7 @@ p2m_set_entry(struct domain *d, unsigned
    1.12                            : L3_PAGETABLE_ENTRIES),
    1.13                           PGT_l2_page_table) )
    1.14          goto out;
    1.15 -#endif
    1.16 +
    1.17      if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
    1.18                           L2_PAGETABLE_SHIFT - PAGE_SHIFT,
    1.19                           L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
    1.20 @@ -308,7 +307,6 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
    1.21          unmap_domain_page(l4e);
    1.22      }
    1.23  #endif
    1.24 -#if CONFIG_PAGING_LEVELS >= 3
    1.25      {
    1.26          l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
    1.27  #if CONFIG_PAGING_LEVELS == 3
    1.28 @@ -329,7 +327,6 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
    1.29          mfn = _mfn(l3e_get_pfn(*l3e));
    1.30          unmap_domain_page(l3e);
    1.31      }
    1.32 -#endif
    1.33  
    1.34      l2e = map_domain_page(mfn_x(mfn));
    1.35      l2e += l2_table_offset(addr);
    1.36 @@ -486,7 +483,7 @@ int p2m_alloc_table(struct domain *d,
    1.37      p2m_top->u.inuse.type_info =
    1.38  #if CONFIG_PAGING_LEVELS == 4
    1.39          PGT_l4_page_table
    1.40 -#elif CONFIG_PAGING_LEVELS == 3
    1.41 +#else
    1.42          PGT_l3_page_table
    1.43  #endif
    1.44          | 1 | PGT_validated;
    1.45 @@ -657,16 +654,13 @@ static void audit_p2m(struct domain *d)
    1.46          l3_pgentry_t *l3e;
    1.47          int i3, i4;
    1.48          l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
    1.49 -#elif CONFIG_PAGING_LEVELS == 3
    1.50 +#else /* CONFIG_PAGING_LEVELS == 3 */
    1.51          l3_pgentry_t *l3e;
    1.52          int i3;
    1.53          l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
    1.54 -#else /* CONFIG_PAGING_LEVELS == 2 */
    1.55 -        l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
    1.56  #endif
    1.57  
    1.58          gfn = 0;
    1.59 -#if CONFIG_PAGING_LEVELS >= 3
    1.60  #if CONFIG_PAGING_LEVELS >= 4
    1.61          for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
    1.62          {
    1.63 @@ -676,7 +670,7 @@ static void audit_p2m(struct domain *d)
    1.64                  continue;
    1.65              }
    1.66              l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
    1.67 -#endif /* now at levels 3 or 4... */
    1.68 +#endif
    1.69              for ( i3 = 0;
    1.70                    i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
    1.71                    i3++ )
    1.72 @@ -687,7 +681,6 @@ static void audit_p2m(struct domain *d)
    1.73                      continue;
    1.74                  }
    1.75                  l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
    1.76 -#endif /* all levels... */
    1.77                  for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
    1.78                  {
    1.79                      if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
    1.80 @@ -714,21 +707,17 @@ static void audit_p2m(struct domain *d)
    1.81                      }
    1.82                      unmap_domain_page(l1e);
    1.83                  }
    1.84 -#if CONFIG_PAGING_LEVELS >= 3
    1.85                  unmap_domain_page(l2e);
    1.86              }
    1.87  #if CONFIG_PAGING_LEVELS >= 4
    1.88              unmap_domain_page(l3e);
    1.89          }
    1.90  #endif
    1.91 -#endif
    1.92  
    1.93  #if CONFIG_PAGING_LEVELS == 4
    1.94          unmap_domain_page(l4e);
    1.95 -#elif CONFIG_PAGING_LEVELS == 3
    1.96 +#else /* CONFIG_PAGING_LEVELS == 3 */
    1.97          unmap_domain_page(l3e);
    1.98 -#else /* CONFIG_PAGING_LEVELS == 2 */
    1.99 -        unmap_domain_page(l2e);
   1.100  #endif
   1.101  
   1.102      }
   1.103 @@ -864,14 +853,12 @@ void p2m_change_type_global(struct domai
   1.104      l2_pgentry_t *l2e;
   1.105      mfn_t l1mfn;
   1.106      int i1, i2;
   1.107 -#if CONFIG_PAGING_LEVELS >= 3
   1.108      l3_pgentry_t *l3e;
   1.109      int i3;
   1.110  #if CONFIG_PAGING_LEVELS == 4
   1.111      l4_pgentry_t *l4e;
   1.112      int i4;
   1.113  #endif /* CONFIG_PAGING_LEVELS == 4 */
   1.114 -#endif /* CONFIG_PAGING_LEVELS >= 3 */
   1.115  
   1.116      if ( !paging_mode_translate(d) )
   1.117          return;
   1.118 @@ -883,13 +870,10 @@ void p2m_change_type_global(struct domai
   1.119  
   1.120  #if CONFIG_PAGING_LEVELS == 4
   1.121      l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
   1.122 -#elif CONFIG_PAGING_LEVELS == 3
   1.123 +#else /* CONFIG_PAGING_LEVELS == 3 */
   1.124      l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
   1.125 -#else /* CONFIG_PAGING_LEVELS == 2 */
   1.126 -    l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
   1.127  #endif
   1.128  
   1.129 -#if CONFIG_PAGING_LEVELS >= 3
   1.130  #if CONFIG_PAGING_LEVELS >= 4
   1.131      for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
   1.132      {
   1.133 @@ -898,7 +882,7 @@ void p2m_change_type_global(struct domai
   1.134              continue;
   1.135          }
   1.136          l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
   1.137 -#endif /* now at levels 3 or 4... */
   1.138 +#endif
   1.139          for ( i3 = 0;
   1.140                i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
   1.141                i3++ )
   1.142 @@ -908,7 +892,6 @@ void p2m_change_type_global(struct domai
   1.143                  continue;
   1.144              }
   1.145              l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
   1.146 -#endif /* all levels... */
   1.147              for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
   1.148              {
   1.149                  if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
   1.150 @@ -934,21 +917,17 @@ void p2m_change_type_global(struct domai
   1.151                  }
   1.152                  unmap_domain_page(l1e);
   1.153              }
   1.154 -#if CONFIG_PAGING_LEVELS >= 3
   1.155              unmap_domain_page(l2e);
   1.156          }
   1.157  #if CONFIG_PAGING_LEVELS >= 4
   1.158          unmap_domain_page(l3e);
   1.159      }
   1.160  #endif
   1.161 -#endif
   1.162  
   1.163  #if CONFIG_PAGING_LEVELS == 4
   1.164      unmap_domain_page(l4e);
   1.165 -#elif CONFIG_PAGING_LEVELS == 3
   1.166 +#else /* CONFIG_PAGING_LEVELS == 3 */
   1.167      unmap_domain_page(l3e);
   1.168 -#else /* CONFIG_PAGING_LEVELS == 2 */
   1.169 -    unmap_domain_page(l2e);
   1.170  #endif
   1.171  
   1.172  }
     2.1 --- a/xen/arch/x86/mm/shadow/Makefile	Thu May 08 14:33:31 2008 +0100
     2.2 +++ b/xen/arch/x86/mm/shadow/Makefile	Thu May 08 16:58:33 2008 +0100
     2.3 @@ -1,10 +1,5 @@
     2.4 -obj-$(x86_32) += common.o g2_on_s3.o g3_on_s3.o
     2.5 -obj-$(x86_64) += common.o g4_on_s4.o g3_on_s3.o g2_on_s3.o
     2.6 +obj-$(x86_32) += common.o guest_2.o guest_3.o
     2.7 +obj-$(x86_64) += common.o guest_2.o guest_3.o guest_4.o
     2.8  
     2.9 -guest_levels  = $(subst g,,$(filter g%,$(subst ., ,$(subst _, ,$(1)))))
    2.10 -shadow_levels = $(subst s,,$(filter s%,$(subst ., ,$(subst _, ,$(1)))))
    2.11 -shadow_defns  = -DGUEST_PAGING_LEVELS=$(call guest_levels,$(1)) \
    2.12 -                -DSHADOW_PAGING_LEVELS=$(call shadow_levels,$(1))
    2.13 -
    2.14 -g%.o: multi.c $(HDRS) Makefile
    2.15 -	$(CC) $(CFLAGS) $(call shadow_defns,$(@F)) -c $< -o $@
    2.16 +guest_%.o: multi.c $(HDRS) Makefile
    2.17 +	$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
     3.1 --- a/xen/arch/x86/mm/shadow/common.c	Thu May 08 14:33:31 2008 +0100
     3.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu May 08 16:58:33 2008 +0100
     3.3 @@ -64,11 +64,7 @@ void shadow_domain_init(struct domain *d
     3.4   */
     3.5  void shadow_vcpu_init(struct vcpu *v)
     3.6  {
     3.7 -#if CONFIG_PAGING_LEVELS == 4
     3.8 -    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
     3.9 -#elif CONFIG_PAGING_LEVELS == 3
    3.10 -    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
    3.11 -#endif
    3.12 +    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
    3.13  }
    3.14  
    3.15  #if SHADOW_AUDIT
    3.16 @@ -503,38 +499,37 @@ sh_validate_guest_entry(struct vcpu *v, 
    3.17          return 0;  /* Not shadowed at all */
    3.18  
    3.19      if ( page->shadow_flags & SHF_L1_32 ) 
    3.20 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3, 2)
    3.21 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2)
    3.22              (v, gmfn, entry, size);
    3.23 -
    3.24      if ( page->shadow_flags & SHF_L2_32 ) 
    3.25 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3, 2)
    3.26 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2)
    3.27              (v, gmfn, entry, size);
    3.28  
    3.29      if ( page->shadow_flags & SHF_L1_PAE ) 
    3.30 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3, 3)
    3.31 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3)
    3.32              (v, gmfn, entry, size);
    3.33      if ( page->shadow_flags & SHF_L2_PAE ) 
    3.34 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3, 3)
    3.35 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3)
    3.36              (v, gmfn, entry, size);
    3.37      if ( page->shadow_flags & SHF_L2H_PAE ) 
    3.38 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3, 3)
    3.39 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
    3.40              (v, gmfn, entry, size);
    3.41  
    3.42  #if CONFIG_PAGING_LEVELS >= 4 
    3.43      if ( page->shadow_flags & SHF_L1_64 ) 
    3.44 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4, 4)
    3.45 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
    3.46              (v, gmfn, entry, size);
    3.47      if ( page->shadow_flags & SHF_L2_64 ) 
    3.48 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4, 4)
    3.49 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4)
    3.50              (v, gmfn, entry, size);
    3.51      if ( page->shadow_flags & SHF_L2H_64 ) 
    3.52 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4, 4)
    3.53 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4)
    3.54              (v, gmfn, entry, size);
    3.55      if ( page->shadow_flags & SHF_L3_64 ) 
    3.56 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4, 4)
    3.57 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4)
    3.58              (v, gmfn, entry, size);
    3.59      if ( page->shadow_flags & SHF_L4_64 ) 
    3.60 -        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4, 4)
    3.61 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
    3.62              (v, gmfn, entry, size);
    3.63  #else /* 32-bit hypervisor does not support 64-bit guests */
    3.64      ASSERT((page->shadow_flags 
    3.65 @@ -613,7 +608,7 @@ int shadow_cmpxchg_guest_entry(struct vc
    3.66   * Most shadow pages are allocated singly, but there is one case where
    3.67   * we need to allocate multiple pages together: shadowing 32-bit guest
    3.68   * tables on PAE or 64-bit shadows.  A 32-bit guest l1 table covers 4MB
    3.69 - * of virtuial address space, and needs to be shadowed by two PAE/64-bit
    3.70 + * of virtual address space, and needs to be shadowed by two PAE/64-bit
    3.71   * l1 tables (covering 2MB of virtual address space each).  Similarly, a
    3.72   * 32-bit guest l2 table (4GB va) needs to be shadowed by four
    3.73   * PAE/64-bit l2 tables (1GB va each).  These multi-page shadows are
    3.74 @@ -622,15 +617,15 @@ int shadow_cmpxchg_guest_entry(struct vc
    3.75   *    
    3.76   * This table shows the allocation behaviour of the different modes:
    3.77   *
    3.78 - * Xen paging      32b  pae  pae  64b  64b  64b
    3.79 - * Guest paging    32b  32b  pae  32b  pae  64b
    3.80 - * PV or HVM        *   HVM   *   HVM  HVM   * 
    3.81 - * Shadow paging   32b  pae  pae  pae  pae  64b
    3.82 + * Xen paging      pae  pae  64b  64b  64b
    3.83 + * Guest paging    32b  pae  32b  pae  64b
    3.84 + * PV or HVM       HVM   *   HVM  HVM   * 
    3.85 + * Shadow paging   pae  pae  pae  pae  64b
    3.86   *
    3.87 - * sl1 size         4k   8k   4k   8k   4k   4k
    3.88 - * sl2 size         4k  16k   4k  16k   4k   4k
    3.89 - * sl3 size         -    -    -    -    -    4k
    3.90 - * sl4 size         -    -    -    -    -    4k
    3.91 + * sl1 size         8k   4k   8k   4k   4k
    3.92 + * sl2 size        16k   4k  16k   4k   4k
    3.93 + * sl3 size         -    -    -    -    4k
    3.94 + * sl4 size         -    -    -    -    4k
    3.95   *
    3.96   * We allocate memory from xen in four-page units and break them down
    3.97   * with a simple buddy allocator.  Can't use the xen allocator to handle
    3.98 @@ -723,15 +718,15 @@ static void shadow_unhook_mappings(struc
    3.99      switch ( sp->type )
   3.100      {
   3.101      case SH_type_l2_32_shadow:
   3.102 -        SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings,3,2)(v,smfn);
   3.103 +        SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(v,smfn);
   3.104          break;
   3.105      case SH_type_l2_pae_shadow:
   3.106      case SH_type_l2h_pae_shadow:
   3.107 -        SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings,3,3)(v,smfn);
   3.108 +        SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(v,smfn);
   3.109          break;
   3.110  #if CONFIG_PAGING_LEVELS >= 4
   3.111      case SH_type_l4_64_shadow:
   3.112 -        SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings,4,4)(v,smfn);
   3.113 +        SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(v,smfn);
   3.114          break;
   3.115  #endif
   3.116      default:
   3.117 @@ -1573,37 +1568,37 @@ void sh_destroy_shadow(struct vcpu *v, m
   3.118      {
   3.119      case SH_type_l1_32_shadow:
   3.120      case SH_type_fl1_32_shadow:
   3.121 -        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 2)(v, smfn);
   3.122 +        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(v, smfn);
   3.123          break;
   3.124      case SH_type_l2_32_shadow:
   3.125 -        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 2)(v, smfn);
   3.126 +        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2)(v, smfn);
   3.127          break;
   3.128  
   3.129      case SH_type_l1_pae_shadow:
   3.130      case SH_type_fl1_pae_shadow:
   3.131 -        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 3)(v, smfn);
   3.132 +        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3)(v, smfn);
   3.133          break;
   3.134      case SH_type_l2_pae_shadow:
   3.135      case SH_type_l2h_pae_shadow:
   3.136 -        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 3)(v, smfn);
   3.137 +        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(v, smfn);
   3.138          break;
   3.139  
   3.140  #if CONFIG_PAGING_LEVELS >= 4
   3.141      case SH_type_l1_64_shadow:
   3.142      case SH_type_fl1_64_shadow:
   3.143 -        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
   3.144 +        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
   3.145          break;
   3.146      case SH_type_l2h_64_shadow:
   3.147          ASSERT(is_pv_32on64_vcpu(v));
   3.148          /* Fall through... */
   3.149      case SH_type_l2_64_shadow:
   3.150 -        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
   3.151 +        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(v, smfn);
   3.152          break;
   3.153      case SH_type_l3_64_shadow:
   3.154 -        SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4, 4)(v, smfn);
   3.155 +        SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4)(v, smfn);
   3.156          break;
   3.157      case SH_type_l4_64_shadow:
   3.158 -        SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4, 4)(v, smfn);
   3.159 +        SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(v, smfn);
   3.160          break;
   3.161  #endif
   3.162      default:
   3.163 @@ -1626,16 +1621,16 @@ int sh_remove_write_access(struct vcpu *
   3.164      /* Dispatch table for getting per-type functions */
   3.165      static hash_callback_t callbacks[SH_type_unused] = {
   3.166          NULL, /* none    */
   3.167 -        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,2), /* l1_32   */
   3.168 -        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,2), /* fl1_32  */
   3.169 +        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32   */
   3.170 +        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32  */
   3.171          NULL, /* l2_32   */
   3.172 -        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,3), /* l1_pae  */
   3.173 -        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,3), /* fl1_pae */
   3.174 +        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* l1_pae  */
   3.175 +        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
   3.176          NULL, /* l2_pae  */
   3.177          NULL, /* l2h_pae */
   3.178  #if CONFIG_PAGING_LEVELS >= 4
   3.179 -        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,4,4), /* l1_64   */
   3.180 -        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,4,4), /* fl1_64  */
   3.181 +        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64   */
   3.182 +        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64  */
   3.183  #else
   3.184          NULL, /* l1_64   */
   3.185          NULL, /* fl1_64  */
   3.186 @@ -1711,7 +1706,6 @@ int sh_remove_write_access(struct vcpu *
   3.187                  GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
   3.188  
   3.189          }
   3.190 -#if CONFIG_PAGING_LEVELS >= 3
   3.191          else if ( v->arch.paging.mode->guest_levels == 3 )
   3.192          {
   3.193              /* 32bit PAE w2k3: linear map at 0xC0000000 */
   3.194 @@ -1746,7 +1740,6 @@ int sh_remove_write_access(struct vcpu *
   3.195              GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4); 
   3.196          }
   3.197  #endif /* CONFIG_PAGING_LEVELS >= 4 */
   3.198 -#endif /* CONFIG_PAGING_LEVELS >= 3 */
   3.199  
   3.200  #undef GUESS
   3.201      }
   3.202 @@ -1810,16 +1803,16 @@ int sh_remove_all_mappings(struct vcpu *
   3.203      /* Dispatch table for getting per-type functions */
   3.204      static hash_callback_t callbacks[SH_type_unused] = {
   3.205          NULL, /* none    */
   3.206 -        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,2), /* l1_32   */
   3.207 -        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,2), /* fl1_32  */
   3.208 +        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32   */
   3.209 +        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32  */
   3.210          NULL, /* l2_32   */
   3.211 -        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,3), /* l1_pae  */
   3.212 -        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,3), /* fl1_pae */
   3.213 +        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* l1_pae  */
   3.214 +        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
   3.215          NULL, /* l2_pae  */
   3.216          NULL, /* l2h_pae */
   3.217  #if CONFIG_PAGING_LEVELS >= 4
   3.218 -        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,4,4), /* l1_64   */
   3.219 -        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,4,4), /* fl1_64  */
   3.220 +        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64   */
   3.221 +        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64  */
   3.222  #else
   3.223          NULL, /* l1_64   */
   3.224          NULL, /* fl1_64  */
   3.225 @@ -1918,12 +1911,12 @@ static int sh_remove_shadow_via_pointer(
   3.226      {
   3.227      case SH_type_l1_32_shadow:
   3.228      case SH_type_l2_32_shadow:
   3.229 -        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,2)(v, vaddr, pmfn);
   3.230 +        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 2)(v, vaddr, pmfn);
   3.231          break;
   3.232      case SH_type_l1_pae_shadow:
   3.233      case SH_type_l2_pae_shadow:
   3.234      case SH_type_l2h_pae_shadow:
   3.235 -        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,3)(v, vaddr, pmfn);
   3.236 +        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 3)(v, vaddr, pmfn);
   3.237          break;
   3.238  #if CONFIG_PAGING_LEVELS >= 4
   3.239      case SH_type_l1_64_shadow:
   3.240 @@ -1931,7 +1924,7 @@ static int sh_remove_shadow_via_pointer(
   3.241      case SH_type_l2h_64_shadow:
   3.242      case SH_type_l3_64_shadow:
   3.243      case SH_type_l4_64_shadow:
   3.244 -        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
   3.245 +        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 4)(v, vaddr, pmfn);
   3.246          break;
   3.247  #endif
   3.248      default: BUG(); /* Some wierd unknown shadow type */
   3.249 @@ -1966,18 +1959,18 @@ void sh_remove_shadows(struct vcpu *v, m
   3.250          NULL, /* none    */
   3.251          NULL, /* l1_32   */
   3.252          NULL, /* fl1_32  */
   3.253 -        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,2), /* l2_32   */
   3.254 +        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2), /* l2_32   */
   3.255          NULL, /* l1_pae  */
   3.256          NULL, /* fl1_pae */
   3.257 -        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,3), /* l2_pae  */
   3.258 -        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,3), /* l2h_pae */
   3.259 +        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2_pae  */
   3.260 +        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2h_pae */
   3.261          NULL, /* l1_64   */
   3.262          NULL, /* fl1_64  */
   3.263  #if CONFIG_PAGING_LEVELS >= 4
   3.264 -        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2_64   */
   3.265 -        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2h_64  */
   3.266 -        SHADOW_INTERNAL_NAME(sh_remove_l2_shadow,4,4), /* l3_64   */
   3.267 -        SHADOW_INTERNAL_NAME(sh_remove_l3_shadow,4,4), /* l4_64   */
   3.268 +        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64   */
   3.269 +        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64  */
   3.270 +        SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64   */
   3.271 +        SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64   */
   3.272  #else
   3.273          NULL, /* l2_64   */
   3.274          NULL, /* l2h_64  */
   3.275 @@ -2061,7 +2054,6 @@ void sh_remove_shadows(struct vcpu *v, m
   3.276  
   3.277      DO_UNSHADOW(SH_type_l2_32_shadow);
   3.278      DO_UNSHADOW(SH_type_l1_32_shadow);
   3.279 -#if CONFIG_PAGING_LEVELS >= 3
   3.280      DO_UNSHADOW(SH_type_l2h_pae_shadow);
   3.281      DO_UNSHADOW(SH_type_l2_pae_shadow);
   3.282      DO_UNSHADOW(SH_type_l1_pae_shadow);
   3.283 @@ -2072,7 +2064,6 @@ void sh_remove_shadows(struct vcpu *v, m
   3.284      DO_UNSHADOW(SH_type_l2_64_shadow);
   3.285      DO_UNSHADOW(SH_type_l1_64_shadow);
   3.286  #endif
   3.287 -#endif
   3.288  
   3.289  #undef DO_UNSHADOW
   3.290  
   3.291 @@ -2154,11 +2145,9 @@ static void sh_update_paging_modes(struc
   3.292          /// PV guest
   3.293          ///
   3.294  #if CONFIG_PAGING_LEVELS == 4
   3.295 -        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
   3.296 -#elif CONFIG_PAGING_LEVELS == 3
   3.297 -        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
   3.298 -#else
   3.299 -#error unexpected paging mode
   3.300 +        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
   3.301 +#else /* CONFIG_PAGING_LEVELS == 3 */
   3.302 +        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
   3.303  #endif
   3.304      }
   3.305      else
   3.306 @@ -2175,11 +2164,7 @@ static void sh_update_paging_modes(struc
   3.307               * pagetable for it, mapping 4 GB one-to-one using a single l2
   3.308               * page of 1024 superpage mappings */
   3.309              v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable;
   3.310 -#if CONFIG_PAGING_LEVELS >= 3
   3.311 -            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 2);
   3.312 -#else
   3.313 -            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2, 2);
   3.314 -#endif
   3.315 +            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
   3.316          }
   3.317          else
   3.318          {
   3.319 @@ -2188,32 +2173,21 @@ static void sh_update_paging_modes(struc
   3.320              {
   3.321                  // long mode guest...
   3.322                  v->arch.paging.mode =
   3.323 -                    &SHADOW_INTERNAL_NAME(sh_paging_mode, 4, 4);
   3.324 +                    &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
   3.325              }
   3.326              else
   3.327  #endif
   3.328                  if ( hvm_pae_enabled(v) )
   3.329                  {
   3.330 -#if CONFIG_PAGING_LEVELS >= 3
   3.331                      // 32-bit PAE mode guest...
   3.332                      v->arch.paging.mode =
   3.333 -                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 3);
   3.334 -#else
   3.335 -                    SHADOW_ERROR("PAE not supported in 32-bit Xen\n");
   3.336 -                    domain_crash(d);
   3.337 -                    return;
   3.338 -#endif
   3.339 +                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
   3.340                  }
   3.341                  else
   3.342                  {
   3.343                      // 32-bit 2 level guest...
   3.344 -#if CONFIG_PAGING_LEVELS >= 3
   3.345                      v->arch.paging.mode =
   3.346 -                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 2);
   3.347 -#else
   3.348 -                    v->arch.paging.mode =
   3.349 -                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 2, 2);
   3.350 -#endif
   3.351 +                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
   3.352                  }
   3.353          }
   3.354  
   3.355 @@ -2227,7 +2201,7 @@ static void sh_update_paging_modes(struc
   3.356  
   3.357          if ( v->arch.paging.mode != old_mode )
   3.358          {
   3.359 -            SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
   3.360 +            SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d gl=%u "
   3.361                            "(was g=%u s=%u)\n",
   3.362                            d->domain_id, v->vcpu_id,
   3.363                            is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
   3.364 @@ -3033,20 +3007,20 @@ void shadow_audit_tables(struct vcpu *v)
   3.365      /* Dispatch table for getting per-type functions */
   3.366      static hash_callback_t callbacks[SH_type_unused] = {
   3.367          NULL, /* none    */
   3.368 -        SHADOW_INTERNAL_NAME(sh_audit_l1_table,3,2),  /* l1_32   */
   3.369 -        SHADOW_INTERNAL_NAME(sh_audit_fl1_table,3,2), /* fl1_32  */
   3.370 -        SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,2),  /* l2_32   */
   3.371 -        SHADOW_INTERNAL_NAME(sh_audit_l1_table,3,3),  /* l1_pae  */
   3.372 -        SHADOW_INTERNAL_NAME(sh_audit_fl1_table,3,3), /* fl1_pae */
   3.373 -        SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,3),  /* l2_pae  */
   3.374 -        SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,3),  /* l2h_pae */
   3.375 +        SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2),  /* l1_32   */
   3.376 +        SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32  */
   3.377 +        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2),  /* l2_32   */
   3.378 +        SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3),  /* l1_pae  */
   3.379 +        SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
   3.380 +        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),  /* l2_pae  */
   3.381 +        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),  /* l2h_pae */
   3.382  #if CONFIG_PAGING_LEVELS >= 4
   3.383 -        SHADOW_INTERNAL_NAME(sh_audit_l1_table,4,4),  /* l1_64   */
   3.384 -        SHADOW_INTERNAL_NAME(sh_audit_fl1_table,4,4), /* fl1_64  */
   3.385 -        SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2_64   */
   3.386 -        SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2h_64   */
   3.387 -        SHADOW_INTERNAL_NAME(sh_audit_l3_table,4,4),  /* l3_64   */
   3.388 -        SHADOW_INTERNAL_NAME(sh_audit_l4_table,4,4),  /* l4_64   */
   3.389 +        SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4),  /* l1_64   */
   3.390 +        SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64  */
   3.391 +        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),  /* l2_64   */
   3.392 +        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),  /* l2h_64   */
   3.393 +        SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4),  /* l3_64   */
   3.394 +        SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),  /* l4_64   */
   3.395  #endif /* CONFIG_PAGING_LEVELS >= 4 */
   3.396          NULL  /* All the rest */
   3.397      };
     4.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu May 08 14:33:31 2008 +0100
     4.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu May 08 16:58:33 2008 +0100
     4.3 @@ -605,7 +605,7 @@ static void sh_audit_gw(struct vcpu *v, 
     4.4  #endif /* audit code */
     4.5  
     4.6  
     4.7 -#if (CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS) && (CONFIG_PAGING_LEVELS == SHADOW_PAGING_LEVELS)
     4.8 +#if (CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS)
     4.9  void *
    4.10  sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
    4.11                    unsigned long *gl1mfn)
    4.12 @@ -643,7 +643,7 @@ sh_guest_get_eff_l1e(struct vcpu *v, uns
    4.13      (void) guest_walk_tables(v, addr, &gw, PFEC_page_present);
    4.14      *(guest_l1e_t *)eff_l1e = gw.l1e;
    4.15  }
    4.16 -#endif /* CONFIG==SHADOW==GUEST */
    4.17 +#endif /* CONFIG == GUEST (== SHADOW) */
    4.18  
    4.19  /**************************************************************************/
    4.20  /* Functions to compute the correct index into a shadow page, given an
    4.21 @@ -678,7 +678,7 @@ guest_index(void *ptr)
    4.22  static u32
    4.23  shadow_l1_index(mfn_t *smfn, u32 guest_index)
    4.24  {
    4.25 -#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2)
    4.26 +#if (GUEST_PAGING_LEVELS == 2)
    4.27      *smfn = _mfn(mfn_x(*smfn) +
    4.28                   (guest_index / SHADOW_L1_PAGETABLE_ENTRIES));
    4.29      return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES);
    4.30 @@ -690,14 +690,14 @@ shadow_l1_index(mfn_t *smfn, u32 guest_i
    4.31  static u32
    4.32  shadow_l2_index(mfn_t *smfn, u32 guest_index)
    4.33  {
    4.34 -#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2)
    4.35 +#if (GUEST_PAGING_LEVELS == 2)
    4.36      // Because we use 2 shadow l2 entries for each guest entry, the number of
    4.37      // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2
    4.38      //
    4.39      *smfn = _mfn(mfn_x(*smfn) +
    4.40                   (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2)));
    4.41  
    4.42 -    // We multiple by two to get the index of the first of the two entries
    4.43 +    // We multiply by two to get the index of the first of the two entries
    4.44      // used to shadow the specified guest entry.
    4.45      return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2;
    4.46  #else
    4.47 @@ -721,12 +721,7 @@ shadow_l4_index(mfn_t *smfn, u32 guest_i
    4.48  
    4.49  #endif // GUEST_PAGING_LEVELS >= 4
    4.50  
    4.51 -extern u32 get_pat_flags(struct vcpu *v,
    4.52 -                  u32 gl1e_flags,
    4.53 -                  paddr_t gpaddr,
    4.54 -                  paddr_t spaddr);
    4.55 -
    4.56 -unsigned char pat_type_2_pte_flags(unsigned char pat_type);
    4.57 +
    4.58  /**************************************************************************/
    4.59  /* Function which computes shadow entries from their corresponding guest
    4.60   * entries.  This is the "heart" of the shadow code. It operates using
    4.61 @@ -996,7 +991,7 @@ static inline void safe_write_entry(void
    4.62      d[1] = s[1];
    4.63      d[0] = s[0];
    4.64  #else
    4.65 -    /* In 32-bit and 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,
    4.66 +    /* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,
    4.67       * which will be an atomic write, since the entry is aligned. */
    4.68      BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));
    4.69      *d = *s;
    4.70 @@ -1204,7 +1199,7 @@ static int shadow_set_l2e(struct vcpu *v
    4.71      shadow_l2e_t old_sl2e;
    4.72      paddr_t paddr;
    4.73  
    4.74 -#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
    4.75 +#if GUEST_PAGING_LEVELS == 2
    4.76      /* In 2-on-3 we work with pairs of l2es pointing at two-page
    4.77       * shadows.  Reference counting and up-pointers track from the first
    4.78       * page of the shadow to the first l2e, so make sure that we're 
    4.79 @@ -1232,7 +1227,7 @@ static int shadow_set_l2e(struct vcpu *v
    4.80          } 
    4.81  
    4.82      /* Write the new entry */
    4.83 -#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
    4.84 +#if GUEST_PAGING_LEVELS == 2
    4.85      {
    4.86          shadow_l2e_t pair[2] = { new_sl2e, new_sl2e };
    4.87          /* The l1 shadow is two pages long and need to be pointed to by
    4.88 @@ -1418,7 +1413,7 @@ static inline void increment_ptr_to_gues
    4.89  #define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)        \
    4.90  do {                                                                    \
    4.91      int _i;                                                             \
    4.92 -    shadow_l1e_t *_sp = map_shadow_page((_sl1mfn));                     \
    4.93 +    shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \
    4.94      ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow       \
    4.95             || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \
    4.96      for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
    4.97 @@ -1429,7 +1424,7 @@ do {                                    
    4.98          if ( _done ) break;                                             \
    4.99          increment_ptr_to_guest_entry(_gl1p);                            \
   4.100      }                                                                   \
   4.101 -    unmap_shadow_page(_sp);                                             \
   4.102 +    sh_unmap_domain_page(_sp);                                          \
   4.103  } while (0)
   4.104  
   4.105  /* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */
   4.106 @@ -1450,7 +1445,7 @@ do {                                    
   4.107  #endif
   4.108      
   4.109  
   4.110 -#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
   4.111 +#if GUEST_PAGING_LEVELS == 2
   4.112  
   4.113  /* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
   4.114  #define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)     \
   4.115 @@ -1460,7 +1455,7 @@ do {                                    
   4.116      ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
   4.117      for ( _j = 0; _j < 4 && !__done; _j++ )                               \
   4.118      {                                                                     \
   4.119 -        shadow_l2e_t *_sp = map_shadow_page(_sl2mfn);                     \
   4.120 +        shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \
   4.121          for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 )         \
   4.122              if ( (!(_xen))                                                \
   4.123                   || ((_j * SHADOW_L2_PAGETABLE_ENTRIES) + _i)             \
   4.124 @@ -1472,34 +1467,11 @@ do {                                    
   4.125                  if ( (__done = (_done)) ) break;                          \
   4.126                  increment_ptr_to_guest_entry(_gl2p);                      \
   4.127              }                                                             \
   4.128 -        unmap_shadow_page(_sp);                                           \
   4.129 +        sh_unmap_domain_page(_sp);                                        \
   4.130          _sl2mfn = _mfn(mfn_x(_sl2mfn) + 1);                               \
   4.131      }                                                                     \
   4.132  } while (0)
   4.133  
   4.134 -#elif GUEST_PAGING_LEVELS == 2
   4.135 -
   4.136 -/* 32-bit on 32-bit: avoid Xen entries */
   4.137 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \
   4.138 -do {                                                                       \
   4.139 -    int _i;                                                                \
   4.140 -    int _xen = !shadow_mode_external(_dom);                                \
   4.141 -    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
   4.142 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);     \
   4.143 -    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
   4.144 -        if ( (!(_xen))                                                     \
   4.145 -             ||                                                            \
   4.146 -             (_i < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
   4.147 -        {                                                                  \
   4.148 -            (_sl2e) = _sp + _i;                                            \
   4.149 -            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )          \
   4.150 -                {_code}                                                    \
   4.151 -            if ( _done ) break;                                            \
   4.152 -            increment_ptr_to_guest_entry(_gl2p);                           \
   4.153 -        }                                                                  \
   4.154 -    unmap_shadow_page(_sp);                                                \
   4.155 -} while (0)
   4.156 -
   4.157  #elif GUEST_PAGING_LEVELS == 3
   4.158  
   4.159  /* PAE: if it's an l2h, don't touch Xen mappings */
   4.160 @@ -1507,7 +1479,7 @@ do {                                    
   4.161  do {                                                                       \
   4.162      int _i;                                                                \
   4.163      int _xen = !shadow_mode_external(_dom);                                \
   4.164 -    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
   4.165 +    shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \
   4.166      ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
   4.167             || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
   4.168      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
   4.169 @@ -1522,7 +1494,7 @@ do {                                    
   4.170              if ( _done ) break;                                            \
   4.171              increment_ptr_to_guest_entry(_gl2p);                           \
   4.172          }                                                                  \
   4.173 -    unmap_shadow_page(_sp);                                                \
   4.174 +    sh_unmap_domain_page(_sp);                                             \
   4.175  } while (0)
   4.176  
   4.177  #else 
   4.178 @@ -1532,7 +1504,7 @@ do {                                    
   4.179  do {                                                                        \
   4.180      int _i;                                                                 \
   4.181      int _xen = !shadow_mode_external(_dom);                                 \
   4.182 -    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                         \
   4.183 +    shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \
   4.184      ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \
   4.185             mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \
   4.186      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
   4.187 @@ -1549,7 +1521,7 @@ do {                                    
   4.188              increment_ptr_to_guest_entry(_gl2p);                            \
   4.189          }                                                                   \
   4.190      }                                                                       \
   4.191 -    unmap_shadow_page(_sp);                                                 \
   4.192 +    sh_unmap_domain_page(_sp);                                              \
   4.193  } while (0)
   4.194  
   4.195  #endif /* different kinds of l2 */
   4.196 @@ -1560,7 +1532,7 @@ do {                                    
   4.197  #define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code)         \
   4.198  do {                                                                    \
   4.199      int _i;                                                             \
   4.200 -    shadow_l3e_t *_sp = map_shadow_page((_sl3mfn));                     \
   4.201 +    shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \
   4.202      ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \
   4.203      for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
   4.204      {                                                                   \
   4.205 @@ -1570,13 +1542,13 @@ do {                                    
   4.206          if ( _done ) break;                                             \
   4.207          increment_ptr_to_guest_entry(_gl3p);                            \
   4.208      }                                                                   \
   4.209 -    unmap_shadow_page(_sp);                                             \
   4.210 +    sh_unmap_domain_page(_sp);                                          \
   4.211  } while (0)
   4.212  
   4.213  /* 64-bit l4: avoid Xen mappings */
   4.214  #define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code)   \
   4.215  do {                                                                    \
   4.216 -    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
   4.217 +    shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \
   4.218      int _xen = !shadow_mode_external(_dom);                             \
   4.219      int _i;                                                             \
   4.220      ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
   4.221 @@ -1591,7 +1563,7 @@ do {                                    
   4.222          }                                                               \
   4.223          increment_ptr_to_guest_entry(_gl4p);                            \
   4.224      }                                                                   \
   4.225 -    unmap_shadow_page(_sp);                                             \
   4.226 +    sh_unmap_domain_page(_sp);                                          \
   4.227  } while (0)
   4.228  
   4.229  #endif
   4.230 @@ -2606,7 +2578,7 @@ sh_map_and_validate(struct vcpu *v, mfn_
   4.231      guest_idx = guest_index(new_gp);
   4.232      map_mfn = smfn;
   4.233      shadow_idx = shadow_index(&map_mfn, guest_idx);
   4.234 -    sl1p = map_shadow_page(map_mfn);
   4.235 +    sl1p = sh_map_domain_page(map_mfn);
   4.236  
   4.237      /* Validate one entry at a time */
   4.238      while ( size )
   4.239 @@ -2618,8 +2590,8 @@ sh_map_and_validate(struct vcpu *v, mfn_
   4.240          {
   4.241              /* We have moved to another page of the shadow */
   4.242              map_mfn = smfn2;
   4.243 -            unmap_shadow_page(sl1p);
   4.244 -            sl1p = map_shadow_page(map_mfn);
   4.245 +            sh_unmap_domain_page(sl1p);
   4.246 +            sl1p = sh_map_domain_page(map_mfn);
   4.247          }
   4.248          result |= validate_ge(v,
   4.249                                new_gp,
   4.250 @@ -2628,7 +2600,7 @@ sh_map_and_validate(struct vcpu *v, mfn_
   4.251          size -= sizeof(guest_l1e_t);
   4.252          new_gp += sizeof(guest_l1e_t);
   4.253      }
   4.254 -    unmap_shadow_page(sl1p);
   4.255 +    sh_unmap_domain_page(sl1p);
   4.256      return result;
   4.257  }
   4.258  
   4.259 @@ -2875,7 +2847,7 @@ static int sh_page_fault(struct vcpu *v,
   4.260      //      bunch of 4K maps.
   4.261      //
   4.262  
   4.263 -#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
   4.264 +#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
   4.265      if ( (regs->error_code & PFEC_reserved_bit) )
   4.266      {
   4.267          /* The only reasons for reserved bits to be set in shadow entries 
   4.268 @@ -3282,7 +3254,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
   4.269          if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
   4.270              return 0;
   4.271      }
   4.272 -#elif SHADOW_PAGING_LEVELS == 3
   4.273 +#else /* SHADOW_PAGING_LEVELS == 3 */
   4.274      if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
   4.275             & _PAGE_PRESENT) )
   4.276          // no need to flush anything if there's no SL2...
   4.277 @@ -3827,10 +3799,6 @@ sh_update_cr3(struct vcpu *v, int do_loc
   4.278  #error this should never happen
   4.279  #endif
   4.280  
   4.281 -#if 0
   4.282 -    printk("%s %s %d gmfn=%05lx shadow.guest_vtable=%p\n",
   4.283 -           __func__, __FILE__, __LINE__, gmfn, v->arch.paging.shadow.guest_vtable);
   4.284 -#endif
   4.285  
   4.286      ////
   4.287      //// vcpu->arch.shadow_table[]
   4.288 @@ -3891,8 +3859,6 @@ sh_update_cr3(struct vcpu *v, int do_loc
   4.289  #error This should never happen 
   4.290  #endif
   4.291  
   4.292 -#if (CONFIG_PAGING_LEVELS == 3) && (GUEST_PAGING_LEVELS == 3)
   4.293 -#endif
   4.294  
   4.295      /// 
   4.296      /// v->arch.paging.shadow.l3table
   4.297 @@ -3937,7 +3903,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
   4.298          ASSERT(virt_to_maddr(&v->arch.paging.shadow.l3table) <= 0xffffffe0ULL);
   4.299          v->arch.cr3 = virt_to_maddr(&v->arch.paging.shadow.l3table);
   4.300  #else
   4.301 -        /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
   4.302 +        /* 4-on-4: Just use the shadow top-level directly */
   4.303          make_cr3(v, pagetable_get_pfn(v->arch.shadow_table[0]));
   4.304  #endif
   4.305      }
   4.306 @@ -3954,7 +3920,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
   4.307          v->arch.hvm_vcpu.hw_cr[3] =
   4.308              virt_to_maddr(&v->arch.paging.shadow.l3table);
   4.309  #else
   4.310 -        /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
   4.311 +        /* 4-on-4: Just use the shadow top-level directly */
   4.312          v->arch.hvm_vcpu.hw_cr[3] =
   4.313              pagetable_get_paddr(v->arch.shadow_table[0]);
   4.314  #endif
   4.315 @@ -3988,12 +3954,10 @@ static int sh_guess_wrmap(struct vcpu *v
   4.316  {
   4.317      shadow_l1e_t sl1e, *sl1p;
   4.318      shadow_l2e_t *sl2p;
   4.319 -#if SHADOW_PAGING_LEVELS >= 3
   4.320      shadow_l3e_t *sl3p;
   4.321  #if SHADOW_PAGING_LEVELS >= 4
   4.322      shadow_l4e_t *sl4p;
   4.323  #endif
   4.324 -#endif
   4.325      mfn_t sl1mfn;
   4.326      int r;
   4.327  
   4.328 @@ -4005,7 +3969,7 @@ static int sh_guess_wrmap(struct vcpu *v
   4.329      sl3p = sh_linear_l3_table(v) + shadow_l3_linear_offset(vaddr);
   4.330      if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
   4.331          return 0;
   4.332 -#elif SHADOW_PAGING_LEVELS == 3
   4.333 +#else /* SHADOW_PAGING_LEVELS == 3 */
   4.334      sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table) 
   4.335          + shadow_l3_linear_offset(vaddr);
   4.336      if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
   4.337 @@ -4536,7 +4500,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
   4.338  
   4.339          if ( sh_l1e_is_magic(*sl1e) ) 
   4.340          {
   4.341 -#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
   4.342 +#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
   4.343              if ( sh_l1e_is_gnp(*sl1e) )
   4.344              {
   4.345                  if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT )
     5.1 --- a/xen/arch/x86/mm/shadow/multi.h	Thu May 08 14:33:31 2008 +0100
     5.2 +++ b/xen/arch/x86/mm/shadow/multi.h	Thu May 08 16:58:33 2008 +0100
     5.3 @@ -22,98 +22,96 @@
     5.4   */
     5.5  
     5.6  extern int 
     5.7 -SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, SHADOW_LEVELS, GUEST_LEVELS)(
     5.8 +SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, GUEST_LEVELS)(
     5.9      struct vcpu *v, mfn_t gl1mfn, void *new_gl1p, u32 size);
    5.10  extern int 
    5.11 -SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, SHADOW_LEVELS, GUEST_LEVELS)(
    5.12 +SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, GUEST_LEVELS)(
    5.13      struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size);
    5.14  extern int 
    5.15 -SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, SHADOW_LEVELS, GUEST_LEVELS)(
    5.16 +SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, GUEST_LEVELS)(
    5.17      struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size);
    5.18  extern int 
    5.19 -SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, SHADOW_LEVELS, GUEST_LEVELS)(
    5.20 +SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, GUEST_LEVELS)(
    5.21      struct vcpu *v, mfn_t gl3mfn, void *new_gl3p, u32 size);
    5.22  extern int 
    5.23 -SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, SHADOW_LEVELS, GUEST_LEVELS)(
    5.24 +SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, GUEST_LEVELS)(
    5.25      struct vcpu *v, mfn_t gl4mfn, void *new_gl4p, u32 size);
    5.26  
    5.27  extern void 
    5.28 -SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
    5.29 +SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, GUEST_LEVELS)(
    5.30      struct vcpu *v, mfn_t smfn);
    5.31  extern void 
    5.32 -SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
    5.33 +SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, GUEST_LEVELS)(
    5.34      struct vcpu *v, mfn_t smfn);
    5.35  extern void 
    5.36 -SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
    5.37 +SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, GUEST_LEVELS)(
    5.38      struct vcpu *v, mfn_t smfn);
    5.39  extern void 
    5.40 -SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
    5.41 +SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, GUEST_LEVELS)(
    5.42      struct vcpu *v, mfn_t smfn);
    5.43  
    5.44  extern void 
    5.45 -SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, SHADOW_LEVELS, GUEST_LEVELS)
    5.46 +SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
    5.47      (struct vcpu *v, mfn_t sl2mfn);
    5.48  extern void 
    5.49 -SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, SHADOW_LEVELS, GUEST_LEVELS)
    5.50 +SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, GUEST_LEVELS)
    5.51      (struct vcpu *v, mfn_t sl3mfn);
    5.52  extern void 
    5.53 -SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, SHADOW_LEVELS, GUEST_LEVELS)
    5.54 +SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, GUEST_LEVELS)
    5.55      (struct vcpu *v, mfn_t sl4mfn);
    5.56  
    5.57  extern int
    5.58 -SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, SHADOW_LEVELS, GUEST_LEVELS)
    5.59 +SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, GUEST_LEVELS)
    5.60      (struct vcpu *v, mfn_t sl1mfn, mfn_t readonly_mfn);
    5.61  extern int
    5.62 -SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, SHADOW_LEVELS, GUEST_LEVELS)
    5.63 +SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, GUEST_LEVELS)
    5.64      (struct vcpu *v, mfn_t sl1mfn, mfn_t target_mfn);
    5.65  
    5.66  extern void
    5.67 -SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, SHADOW_LEVELS, GUEST_LEVELS)
    5.68 +SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, GUEST_LEVELS)
    5.69      (struct vcpu *v, void *ep, mfn_t smfn);
    5.70  
    5.71  extern int
    5.72 -SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, SHADOW_LEVELS, GUEST_LEVELS)
    5.73 +SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, GUEST_LEVELS)
    5.74      (struct vcpu *v, mfn_t sl2mfn, mfn_t sl1mfn);
    5.75  extern int
    5.76 -SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, SHADOW_LEVELS, GUEST_LEVELS)
    5.77 +SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, GUEST_LEVELS)
    5.78      (struct vcpu *v, mfn_t sl3mfn, mfn_t sl2mfn);
    5.79  extern int
    5.80 -SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, SHADOW_LEVELS, GUEST_LEVELS)
    5.81 +SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, GUEST_LEVELS)
    5.82      (struct vcpu *v, mfn_t sl4mfn, mfn_t sl3mfn);
    5.83  
    5.84  #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
    5.85  int 
    5.86 -SHADOW_INTERNAL_NAME(sh_audit_l1_table, SHADOW_LEVELS, GUEST_LEVELS)
    5.87 +SHADOW_INTERNAL_NAME(sh_audit_l1_table, GUEST_LEVELS)
    5.88      (struct vcpu *v, mfn_t sl1mfn, mfn_t x);
    5.89  int 
    5.90 -SHADOW_INTERNAL_NAME(sh_audit_fl1_table, SHADOW_LEVELS, GUEST_LEVELS)
    5.91 +SHADOW_INTERNAL_NAME(sh_audit_fl1_table, GUEST_LEVELS)
    5.92      (struct vcpu *v, mfn_t sl1mfn, mfn_t x);
    5.93  int 
    5.94 -SHADOW_INTERNAL_NAME(sh_audit_l2_table, SHADOW_LEVELS, GUEST_LEVELS)
    5.95 +SHADOW_INTERNAL_NAME(sh_audit_l2_table, GUEST_LEVELS)
    5.96      (struct vcpu *v, mfn_t sl2mfn, mfn_t x);
    5.97  int 
    5.98 -SHADOW_INTERNAL_NAME(sh_audit_l3_table, SHADOW_LEVELS, GUEST_LEVELS)
    5.99 +SHADOW_INTERNAL_NAME(sh_audit_l3_table, GUEST_LEVELS)
   5.100      (struct vcpu *v, mfn_t sl3mfn, mfn_t x);
   5.101  int 
   5.102 -SHADOW_INTERNAL_NAME(sh_audit_l4_table, SHADOW_LEVELS, GUEST_LEVELS)
   5.103 +SHADOW_INTERNAL_NAME(sh_audit_l4_table, GUEST_LEVELS)
   5.104      (struct vcpu *v, mfn_t sl4mfn, mfn_t x);
   5.105  #endif
   5.106  
   5.107  extern void *
   5.108 -SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS, CONFIG_PAGING_LEVELS)
   5.109 +SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS)
   5.110      (struct vcpu *v, unsigned long va, unsigned long *gl1mfn);
   5.111  extern void
   5.112 -SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS, CONFIG_PAGING_LEVELS)
   5.113 +SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS)
   5.114      (struct vcpu *v, unsigned long va, void *eff_l1e);
   5.115  
   5.116 -#if SHADOW_LEVELS == GUEST_LEVELS
   5.117  extern mfn_t
   5.118 -SHADOW_INTERNAL_NAME(sh_make_monitor_table, SHADOW_LEVELS, GUEST_LEVELS)
   5.119 +SHADOW_INTERNAL_NAME(sh_make_monitor_table, GUEST_LEVELS)
   5.120      (struct vcpu *v);
   5.121  extern void
   5.122 -SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, SHADOW_LEVELS, GUEST_LEVELS)
   5.123 +SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, GUEST_LEVELS)
   5.124      (struct vcpu *v, mfn_t mmfn);
   5.125 -#endif
   5.126  
   5.127  extern struct paging_mode 
   5.128 -SHADOW_INTERNAL_NAME(sh_paging_mode, SHADOW_LEVELS, GUEST_LEVELS);
   5.129 +SHADOW_INTERNAL_NAME(sh_paging_mode, GUEST_LEVELS);
     6.1 --- a/xen/arch/x86/mm/shadow/private.h	Thu May 08 14:33:31 2008 +0100
     6.2 +++ b/xen/arch/x86/mm/shadow/private.h	Thu May 08 16:58:33 2008 +0100
     6.3 @@ -157,49 +157,23 @@ extern void shadow_audit_tables(struct v
     6.4   * Macro for dealing with the naming of the internal names of the
     6.5   * shadow code's external entry points.
     6.6   */
     6.7 -#define SHADOW_INTERNAL_NAME_HIDDEN(name, shadow_levels, guest_levels) \
     6.8 -    name ## __shadow_ ## shadow_levels ## _guest_ ## guest_levels
     6.9 -#define SHADOW_INTERNAL_NAME(name, shadow_levels, guest_levels) \
    6.10 -    SHADOW_INTERNAL_NAME_HIDDEN(name, shadow_levels, guest_levels)
    6.11 +#define SHADOW_INTERNAL_NAME_HIDDEN(name, guest_levels) \
    6.12 +    name ## __guest_ ## guest_levels
    6.13 +#define SHADOW_INTERNAL_NAME(name, guest_levels)        \
    6.14 +    SHADOW_INTERNAL_NAME_HIDDEN(name, guest_levels)
    6.15  
    6.16 -#if CONFIG_PAGING_LEVELS == 3
    6.17  #define GUEST_LEVELS  2
    6.18 -#define SHADOW_LEVELS 3
    6.19  #include "multi.h"
    6.20  #undef GUEST_LEVELS
    6.21 -#undef SHADOW_LEVELS
    6.22  
    6.23  #define GUEST_LEVELS  3
    6.24 -#define SHADOW_LEVELS 3
    6.25  #include "multi.h"
    6.26  #undef GUEST_LEVELS
    6.27 -#undef SHADOW_LEVELS
    6.28 -#endif /* CONFIG_PAGING_LEVELS == 3 */
    6.29  
    6.30  #if CONFIG_PAGING_LEVELS == 4
    6.31 -#define GUEST_LEVELS  2
    6.32 -#define SHADOW_LEVELS 3
    6.33 -#include "multi.h"
    6.34 -#undef GUEST_LEVELS
    6.35 -#undef SHADOW_LEVELS
    6.36 -
    6.37 -#define GUEST_LEVELS  3
    6.38 -#define SHADOW_LEVELS 3
    6.39 +#define GUEST_LEVELS  4
    6.40  #include "multi.h"
    6.41  #undef GUEST_LEVELS
    6.42 -#undef SHADOW_LEVELS
    6.43 -
    6.44 -#define GUEST_LEVELS  3
    6.45 -#define SHADOW_LEVELS 4
    6.46 -#include "multi.h"
    6.47 -#undef GUEST_LEVELS
    6.48 -#undef SHADOW_LEVELS
    6.49 -
    6.50 -#define GUEST_LEVELS  4
    6.51 -#define SHADOW_LEVELS 4
    6.52 -#include "multi.h"
    6.53 -#undef GUEST_LEVELS
    6.54 -#undef SHADOW_LEVELS
    6.55  #endif /* CONFIG_PAGING_LEVELS == 4 */
    6.56  
    6.57  /******************************************************************************
     7.1 --- a/xen/arch/x86/mm/shadow/types.h	Thu May 08 14:33:31 2008 +0100
     7.2 +++ b/xen/arch/x86/mm/shadow/types.h	Thu May 08 16:58:33 2008 +0100
     7.3 @@ -23,26 +23,13 @@
     7.4  #ifndef _XEN_SHADOW_TYPES_H
     7.5  #define _XEN_SHADOW_TYPES_H
     7.6  
     7.7 -// Map a shadow page
     7.8 -static inline void *
     7.9 -map_shadow_page(mfn_t smfn)
    7.10 -{
    7.11 -    // XXX -- Possible optimization/measurement question for 32-bit and PAE
    7.12 -    //        hypervisors:
    7.13 -    //        How often is this smfn already available in the shadow linear
    7.14 -    //        table?  Might it be worth checking that table first,
    7.15 -    //        presumably using the reverse map hint in the page_info of this
    7.16 -    //        smfn, rather than calling map_domain_page()?
    7.17 -    //
    7.18 -    return sh_map_domain_page(smfn);
    7.19 -}
    7.20 -
    7.21 -// matching unmap for map_shadow_page()
    7.22 -static inline void
    7.23 -unmap_shadow_page(void *p)
    7.24 -{
    7.25 -    sh_unmap_domain_page(p);
    7.26 -}
    7.27 +/* The number of levels in the shadow pagetable is entirely determined
    7.28 + * by the number of levels in the guest pagetable */
    7.29 +#if GUEST_PAGING_LEVELS == 4
    7.30 +#define SHADOW_PAGING_LEVELS 4
    7.31 +#else
    7.32 +#define SHADOW_PAGING_LEVELS 3
    7.33 +#endif
    7.34  
    7.35  /* 
    7.36   * Define various types for handling pagetabels, based on these options:
    7.37 @@ -50,21 +37,6 @@ unmap_shadow_page(void *p)
    7.38   * GUEST_PAGING_LEVELS  : Number of levels of guest pagetables
    7.39   */
    7.40  
    7.41 -#if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS) 
    7.42 -#error Cannot have more levels of shadow pagetables than host pagetables
    7.43 -#endif
    7.44 -
    7.45 -#if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS) 
    7.46 -#error Cannot have more levels of guest pagetables than shadow pagetables
    7.47 -#endif
    7.48 -
    7.49 -#if SHADOW_PAGING_LEVELS == 2
    7.50 -#define SHADOW_L1_PAGETABLE_ENTRIES    1024
    7.51 -#define SHADOW_L2_PAGETABLE_ENTRIES    1024
    7.52 -#define SHADOW_L1_PAGETABLE_SHIFT        12
    7.53 -#define SHADOW_L2_PAGETABLE_SHIFT        22
    7.54 -#endif
    7.55 -
    7.56  #if SHADOW_PAGING_LEVELS == 3
    7.57  #define SHADOW_L1_PAGETABLE_ENTRIES     512
    7.58  #define SHADOW_L2_PAGETABLE_ENTRIES     512
    7.59 @@ -72,9 +44,7 @@ unmap_shadow_page(void *p)
    7.60  #define SHADOW_L1_PAGETABLE_SHIFT        12
    7.61  #define SHADOW_L2_PAGETABLE_SHIFT        21
    7.62  #define SHADOW_L3_PAGETABLE_SHIFT        30
    7.63 -#endif
    7.64 -
    7.65 -#if SHADOW_PAGING_LEVELS == 4
    7.66 +#else /* SHADOW_PAGING_LEVELS == 4 */
    7.67  #define SHADOW_L1_PAGETABLE_ENTRIES     512
    7.68  #define SHADOW_L2_PAGETABLE_ENTRIES     512
    7.69  #define SHADOW_L3_PAGETABLE_ENTRIES     512
    7.70 @@ -88,52 +58,44 @@ unmap_shadow_page(void *p)
    7.71  /* Types of the shadow page tables */
    7.72  typedef l1_pgentry_t shadow_l1e_t;
    7.73  typedef l2_pgentry_t shadow_l2e_t;
    7.74 -#if SHADOW_PAGING_LEVELS >= 3
    7.75  typedef l3_pgentry_t shadow_l3e_t;
    7.76  #if SHADOW_PAGING_LEVELS >= 4
    7.77  typedef l4_pgentry_t shadow_l4e_t;
    7.78  #endif
    7.79 -#endif
    7.80  
    7.81  /* Access functions for them */
    7.82  static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
    7.83  { return l1e_get_paddr(sl1e); }
    7.84  static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
    7.85  { return l2e_get_paddr(sl2e); }
    7.86 -#if SHADOW_PAGING_LEVELS >= 3
    7.87  static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
    7.88  { return l3e_get_paddr(sl3e); }
    7.89  #if SHADOW_PAGING_LEVELS >= 4
    7.90  static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
    7.91  { return l4e_get_paddr(sl4e); }
    7.92  #endif
    7.93 -#endif
    7.94  
    7.95  static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
    7.96  { return _mfn(l1e_get_pfn(sl1e)); }
    7.97  static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
    7.98  { return _mfn(l2e_get_pfn(sl2e)); }
    7.99 -#if SHADOW_PAGING_LEVELS >= 3
   7.100  static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
   7.101  { return _mfn(l3e_get_pfn(sl3e)); }
   7.102  #if SHADOW_PAGING_LEVELS >= 4
   7.103  static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
   7.104  { return _mfn(l4e_get_pfn(sl4e)); }
   7.105  #endif
   7.106 -#endif
   7.107  
   7.108  static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
   7.109  { return l1e_get_flags(sl1e); }
   7.110  static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
   7.111  { return l2e_get_flags(sl2e); }
   7.112 -#if SHADOW_PAGING_LEVELS >= 3
   7.113  static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
   7.114  { return l3e_get_flags(sl3e); }
   7.115  #if SHADOW_PAGING_LEVELS >= 4
   7.116  static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
   7.117  { return l4e_get_flags(sl4e); }
   7.118  #endif
   7.119 -#endif
   7.120  
   7.121  static inline shadow_l1e_t
   7.122  shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
   7.123 @@ -143,27 +105,23 @@ static inline shadow_l1e_t shadow_l1e_em
   7.124  { return l1e_empty(); }
   7.125  static inline shadow_l2e_t shadow_l2e_empty(void) 
   7.126  { return l2e_empty(); }
   7.127 -#if SHADOW_PAGING_LEVELS >= 3
   7.128  static inline shadow_l3e_t shadow_l3e_empty(void) 
   7.129  { return l3e_empty(); }
   7.130  #if SHADOW_PAGING_LEVELS >= 4
   7.131  static inline shadow_l4e_t shadow_l4e_empty(void) 
   7.132  { return l4e_empty(); }
   7.133  #endif
   7.134 -#endif
   7.135  
   7.136  static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
   7.137  { return l1e_from_pfn(mfn_x(mfn), flags); }
   7.138  static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
   7.139  { return l2e_from_pfn(mfn_x(mfn), flags); }
   7.140 -#if SHADOW_PAGING_LEVELS >= 3
   7.141  static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
   7.142  { return l3e_from_pfn(mfn_x(mfn), flags); }
   7.143  #if SHADOW_PAGING_LEVELS >= 4
   7.144  static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
   7.145  { return l4e_from_pfn(mfn_x(mfn), flags); }
   7.146  #endif
   7.147 -#endif
   7.148  
   7.149  #define shadow_l1_table_offset(a) l1_table_offset(a)
   7.150  #define shadow_l2_table_offset(a) l2_table_offset(a)
   7.151 @@ -441,8 +399,7 @@ struct shadow_walk_t
   7.152  /* macros for dealing with the naming of the internal function names of the
   7.153   * shadow code's external entry points.
   7.154   */
   7.155 -#define INTERNAL_NAME(name) \
   7.156 -    SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)
   7.157 +#define INTERNAL_NAME(name) SHADOW_INTERNAL_NAME(name, GUEST_PAGING_LEVELS)
   7.158  
   7.159  /* macros for renaming the primary entry points, so that they are more
   7.160   * easily distinguished from a debugger
   7.161 @@ -481,42 +438,24 @@ struct shadow_walk_t
   7.162  #define sh_guess_wrmap             INTERNAL_NAME(sh_guess_wrmap)
   7.163  #define sh_clear_shadow_entry      INTERNAL_NAME(sh_clear_shadow_entry)
   7.164  
   7.165 -/* The sh_guest_(map|get)_* functions only depends on the number of config
   7.166 - * levels
   7.167 - */
   7.168 -#define sh_guest_map_l1e                                       \
   7.169 -        SHADOW_INTERNAL_NAME(sh_guest_map_l1e,                \
   7.170 -                              CONFIG_PAGING_LEVELS,             \
   7.171 -                              CONFIG_PAGING_LEVELS)
   7.172 -#define sh_guest_get_eff_l1e                                   \
   7.173 -        SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e,            \
   7.174 -                              CONFIG_PAGING_LEVELS,             \
   7.175 -                              CONFIG_PAGING_LEVELS)
   7.176  
   7.177 -/* sh_make_monitor_table only depends on the number of shadow levels */
   7.178 -#define sh_make_monitor_table                                  \
   7.179 -        SHADOW_INTERNAL_NAME(sh_make_monitor_table,           \
   7.180 -                              SHADOW_PAGING_LEVELS,             \
   7.181 -                              SHADOW_PAGING_LEVELS)
   7.182 -#define sh_destroy_monitor_table                               \
   7.183 -        SHADOW_INTERNAL_NAME(sh_destroy_monitor_table,        \
   7.184 -                              SHADOW_PAGING_LEVELS,             \
   7.185 -                              SHADOW_PAGING_LEVELS)
   7.186 +/* The sh_guest_(map|get)_* functions depends on Xen's paging levels */
   7.187 +#define sh_guest_map_l1e \
   7.188 +        SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS)
   7.189 +#define sh_guest_get_eff_l1e \
   7.190 +        SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS)
   7.191  
   7.192 +/* sh_make_monitor_table depends only on the number of shadow levels */
   7.193 +#define sh_make_monitor_table \
   7.194 +        SHADOW_INTERNAL_NAME(sh_make_monitor_table, SHADOW_PAGING_LEVELS)
   7.195 +#define sh_destroy_monitor_table \
   7.196 +        SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, SHADOW_PAGING_LEVELS)
   7.197  
   7.198  #if SHADOW_PAGING_LEVELS == 3
   7.199  #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
   7.200  #endif
   7.201  
   7.202 -#if SHADOW_PAGING_LEVELS == 2
   7.203 -#define SH_PRI_pte "08x"
   7.204 -#else /* SHADOW_PAGING_LEVELS >= 3 */
   7.205 -#ifndef __x86_64__
   7.206 -#define SH_PRI_pte "016llx"
   7.207 -#else
   7.208 -#define SH_PRI_pte "016lx"
   7.209 -#endif
   7.210 -#endif /* SHADOW_PAGING_LEVELS >= 3 */
   7.211 +#define SH_PRI_pte PRIpte
   7.212  
   7.213  #if GUEST_PAGING_LEVELS == 2
   7.214  #define SH_PRI_gpte "08x"
   7.215 @@ -529,7 +468,7 @@ struct shadow_walk_t
   7.216  #endif /* GUEST_PAGING_LEVELS >= 3 */
   7.217  
   7.218  
   7.219 -#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
   7.220 +#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
   7.221  /******************************************************************************
   7.222   * We implement a "fast path" for two special cases: faults that require
   7.223   * MMIO emulation, and faults where the guest PTE is not present.  We
     8.1 --- a/xen/include/asm-x86/mtrr.h	Thu May 08 14:33:31 2008 +0100
     8.2 +++ b/xen/include/asm-x86/mtrr.h	Thu May 08 16:58:33 2008 +0100
     8.3 @@ -62,5 +62,8 @@ extern int mtrr_add_page(unsigned long b
     8.4  extern int mtrr_del(int reg, unsigned long base, unsigned long size);
     8.5  extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
     8.6  extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
     8.7 +extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
     8.8 +                  paddr_t spaddr);
     8.9 +extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
    8.10  
    8.11  #endif /* __ASM_X86_MTRR_H__ */