ia64/xen-unstable

changeset 14013:9c2e6f8f3aa7

[XEN] 32on64 shadowing / live migration support for PV PAE compat guests
PAE compat guests on 64bit hypervisors are shadowed
using 4-on-4 with special handling for the top level
L4 page and the L2E M2P mappings.

Signed-off-by: Emmanuel Ackaouy <ack@xensource.com>
author Emmanuel Ackaouy <ack@xensource.com>
date Mon Feb 19 19:58:07 2007 +0000 (2007-02-19)
parents 0b882c911b88
children b83cfb117bdd
files xen/arch/x86/mm.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/arch/x86/mm/shadow/types.h xen/include/asm-x86/x86_64/page.h
line diff
     1.1 --- a/xen/arch/x86/mm.c	Mon Feb 19 16:16:53 2007 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Mon Feb 19 19:58:07 2007 +0000
     1.3 @@ -1147,7 +1147,7 @@ static int alloc_l4_table(struct page_in
     1.4  
     1.5      for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
     1.6      {
     1.7 -        if ( is_guest_l4_slot(i) &&
     1.8 +        if ( is_guest_l4_slot(d, i) &&
     1.9               unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
    1.10              goto fail;
    1.11  
    1.12 @@ -1173,7 +1173,7 @@ static int alloc_l4_table(struct page_in
    1.13   fail:
    1.14      MEM_LOG("Failure in alloc_l4_table: entry %d", i);
    1.15      while ( i-- > 0 )
    1.16 -        if ( is_guest_l4_slot(i) )
    1.17 +        if ( is_guest_l4_slot(d, i) )
    1.18              put_page_from_l4e(pl4e[i], pfn);
    1.19  
    1.20      return 0;
    1.21 @@ -1248,12 +1248,13 @@ static void free_l3_table(struct page_in
    1.22  
    1.23  static void free_l4_table(struct page_info *page)
    1.24  {
    1.25 +    struct domain *d = page_get_owner(page);
    1.26      unsigned long pfn = page_to_mfn(page);
    1.27      l4_pgentry_t *pl4e = page_to_virt(page);
    1.28      int           i;
    1.29  
    1.30      for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
    1.31 -        if ( is_guest_l4_slot(i) )
    1.32 +        if ( is_guest_l4_slot(d, i) )
    1.33              put_page_from_l4e(pl4e[i], pfn);
    1.34  }
    1.35  
    1.36 @@ -1480,13 +1481,14 @@ static int mod_l3_entry(l3_pgentry_t *pl
    1.37  #if CONFIG_PAGING_LEVELS >= 4
    1.38  
    1.39  /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
    1.40 -static int mod_l4_entry(l4_pgentry_t *pl4e, 
    1.41 +static int mod_l4_entry(struct domain *d,
    1.42 +                        l4_pgentry_t *pl4e, 
    1.43                          l4_pgentry_t nl4e, 
    1.44                          unsigned long pfn)
    1.45  {
    1.46      l4_pgentry_t ol4e;
    1.47  
    1.48 -    if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
    1.49 +    if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
    1.50      {
    1.51          MEM_LOG("Illegal L4 update attempt in Xen-private area %p", pl4e);
    1.52          return 0;
    1.53 @@ -1777,9 +1779,13 @@ int new_guest_cr3(unsigned long mfn)
    1.54      {
    1.55          okay = paging_mode_refcounts(d)
    1.56              ? 0 /* Old code was broken, but what should it be? */
    1.57 -            : mod_l4_entry(__va(pagetable_get_paddr(v->arch.guest_table)),
    1.58 -                           l4e_from_pfn(mfn, (_PAGE_PRESENT|_PAGE_RW|
    1.59 -                                              _PAGE_USER|_PAGE_ACCESSED)), 0);
    1.60 +            : mod_l4_entry(
    1.61 +                    d,
    1.62 +                    __va(pagetable_get_paddr(v->arch.guest_table)),
    1.63 +                    l4e_from_pfn(
    1.64 +                        mfn,
    1.65 +                        (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
    1.66 +                    pagetable_get_pfn(v->arch.guest_table));
    1.67          if ( unlikely(!okay) )
    1.68          {
    1.69              MEM_LOG("Error while installing new compat baseptr %lx", mfn);
    1.70 @@ -2339,7 +2345,7 @@ int do_mmu_update(
    1.71                      if ( !IS_COMPAT(FOREIGNDOM) )
    1.72                      {
    1.73                          l4_pgentry_t l4e = l4e_from_intpte(req.val);
    1.74 -                        okay = mod_l4_entry(va, l4e, mfn);
    1.75 +                        okay = mod_l4_entry(d, va, l4e, mfn);
    1.76                      }
    1.77                      break;
    1.78  #endif
     2.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Feb 19 16:16:53 2007 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Feb 19 19:58:07 2007 +0000
     2.3 @@ -485,7 +485,11 @@ void shadow_demote(struct vcpu *v, mfn_t
     2.4  {
     2.5      struct page_info *page = mfn_to_page(gmfn);
     2.6  
     2.7 -    ASSERT(test_bit(_PGC_page_table, &page->count_info));
     2.8 +#ifdef CONFIG_COMPAT
     2.9 +    if ( !IS_COMPAT(v->domain) || type != SH_type_l4_64_shadow )
    2.10 +#endif
    2.11 +        ASSERT(test_bit(_PGC_page_table, &page->count_info));
    2.12 +
    2.13      ASSERT(test_bit(type, &page->shadow_flags));
    2.14  
    2.15      clear_bit(type, &page->shadow_flags);
    2.16 @@ -567,6 +571,9 @@ sh_validate_guest_entry(struct vcpu *v, 
    2.17      if ( page->shadow_flags & SHF_L2_64 ) 
    2.18          result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4, 4)
    2.19              (v, gmfn, entry, size);
    2.20 +    if ( page->shadow_flags & SHF_L2H_64 ) 
    2.21 +        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4, 4)
    2.22 +            (v, gmfn, entry, size);
    2.23      if ( page->shadow_flags & SHF_L3_64 ) 
    2.24          result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4, 4)
    2.25              (v, gmfn, entry, size);
    2.26 @@ -575,7 +582,7 @@ sh_validate_guest_entry(struct vcpu *v, 
    2.27              (v, gmfn, entry, size);
    2.28  #else /* 32-bit/PAE hypervisor does not support 64-bit guests */
    2.29      ASSERT((page->shadow_flags 
    2.30 -            & (SHF_L4_64|SHF_L3_64|SHF_L2_64|SHF_L1_64)) == 0);
    2.31 +            & (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0);
    2.32  #endif
    2.33  
    2.34      return result;
    2.35 @@ -705,7 +712,7 @@ static inline u32
    2.36  shadow_order(unsigned int shadow_type) 
    2.37  {
    2.38  #if CONFIG_PAGING_LEVELS > 2
    2.39 -    static const u32 type_to_order[16] = {
    2.40 +    static const u32 type_to_order[SH_type_unused] = {
    2.41          0, /* SH_type_none           */
    2.42          1, /* SH_type_l1_32_shadow   */
    2.43          1, /* SH_type_fl1_32_shadow  */
    2.44 @@ -717,12 +724,13 @@ shadow_order(unsigned int shadow_type)
    2.45          0, /* SH_type_l1_64_shadow   */
    2.46          0, /* SH_type_fl1_64_shadow  */
    2.47          0, /* SH_type_l2_64_shadow   */
    2.48 +        0, /* SH_type_l2h_64_shadow  */
    2.49          0, /* SH_type_l3_64_shadow   */
    2.50          0, /* SH_type_l4_64_shadow   */
    2.51          2, /* SH_type_p2m_table      */
    2.52          0  /* SH_type_monitor_table  */
    2.53          };
    2.54 -    ASSERT(shadow_type < 16);
    2.55 +    ASSERT(shadow_type < SH_type_unused);
    2.56      return type_to_order[shadow_type];
    2.57  #else  /* 32-bit Xen only ever shadows 32-bit guests on 32-bit shadows. */
    2.58      return 0;
    2.59 @@ -1564,6 +1572,9 @@ void sh_destroy_shadow(struct vcpu *v, m
    2.60             t == SH_type_fl1_pae_shadow ||  
    2.61             t == SH_type_fl1_64_shadow  || 
    2.62             t == SH_type_monitor_table  || 
    2.63 +#ifdef CONFIG_COMPAT
    2.64 +           (IS_COMPAT(v->domain) && t == SH_type_l4_64_shadow) ||
    2.65 +#endif
    2.66             (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
    2.67              == v->domain)); 
    2.68  
    2.69 @@ -1605,6 +1616,8 @@ void sh_destroy_shadow(struct vcpu *v, m
    2.70      case SH_type_fl1_64_shadow:
    2.71          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
    2.72          break;
    2.73 +    case SH_type_l2h_64_shadow:
    2.74 +        ASSERT( IS_COMPAT(v->domain) );
    2.75      case SH_type_l2_64_shadow:
    2.76          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
    2.77          break;
    2.78 @@ -1633,7 +1646,7 @@ int sh_remove_write_access(struct vcpu *
    2.79                             unsigned long fault_addr)
    2.80  {
    2.81      /* Dispatch table for getting per-type functions */
    2.82 -    static hash_callback_t callbacks[16] = {
    2.83 +    static hash_callback_t callbacks[SH_type_unused] = {
    2.84          NULL, /* none    */
    2.85  #if CONFIG_PAGING_LEVELS == 2
    2.86          SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,2,2), /* l1_32   */
    2.87 @@ -1660,6 +1673,7 @@ int sh_remove_write_access(struct vcpu *
    2.88          NULL, /* fl1_64  */
    2.89  #endif
    2.90          NULL, /* l2_64   */
    2.91 +        NULL, /* l2h_64  */
    2.92          NULL, /* l3_64   */
    2.93          NULL, /* l4_64   */
    2.94          NULL, /* p2m     */
    2.95 @@ -1822,7 +1836,7 @@ int sh_remove_all_mappings(struct vcpu *
    2.96      int expected_count, do_locking;
    2.97  
    2.98      /* Dispatch table for getting per-type functions */
    2.99 -    static hash_callback_t callbacks[16] = {
   2.100 +    static hash_callback_t callbacks[SH_type_unused] = {
   2.101          NULL, /* none    */
   2.102  #if CONFIG_PAGING_LEVELS == 2
   2.103          SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,2,2), /* l1_32   */
   2.104 @@ -1849,6 +1863,7 @@ int sh_remove_all_mappings(struct vcpu *
   2.105          NULL, /* fl1_64  */
   2.106  #endif
   2.107          NULL, /* l2_64   */
   2.108 +        NULL, /* l2h_64  */
   2.109          NULL, /* l3_64   */
   2.110          NULL, /* l4_64   */
   2.111          NULL, /* p2m     */
   2.112 @@ -1956,6 +1971,7 @@ static int sh_remove_shadow_via_pointer(
   2.113  #if CONFIG_PAGING_LEVELS >= 4
   2.114      case SH_type_l1_64_shadow:
   2.115      case SH_type_l2_64_shadow:
   2.116 +    case SH_type_l2h_64_shadow:
   2.117      case SH_type_l3_64_shadow:
   2.118      case SH_type_l4_64_shadow:
   2.119          SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
   2.120 @@ -1991,7 +2007,7 @@ void sh_remove_shadows(struct vcpu *v, m
   2.121      
   2.122      /* Dispatch table for getting per-type functions: each level must
   2.123       * be called with the function to remove a lower-level shadow. */
   2.124 -    static hash_callback_t callbacks[16] = {
   2.125 +    static hash_callback_t callbacks[SH_type_unused] = {
   2.126          NULL, /* none    */
   2.127          NULL, /* l1_32   */
   2.128          NULL, /* fl1_32  */
   2.129 @@ -2013,10 +2029,12 @@ void sh_remove_shadows(struct vcpu *v, m
   2.130          NULL, /* fl1_64  */
   2.131  #if CONFIG_PAGING_LEVELS >= 4
   2.132          SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2_64   */
   2.133 +        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2h_64  */
   2.134          SHADOW_INTERNAL_NAME(sh_remove_l2_shadow,4,4), /* l3_64   */
   2.135          SHADOW_INTERNAL_NAME(sh_remove_l3_shadow,4,4), /* l4_64   */
   2.136  #else
   2.137          NULL, /* l2_64   */
   2.138 +        NULL, /* l2h_64  */
   2.139          NULL, /* l3_64   */
   2.140          NULL, /* l4_64   */
   2.141  #endif
   2.142 @@ -2025,7 +2043,7 @@ void sh_remove_shadows(struct vcpu *v, m
   2.143      };
   2.144  
   2.145      /* Another lookup table, for choosing which mask to use */
   2.146 -    static unsigned int masks[16] = {
   2.147 +    static unsigned int masks[SH_type_unused] = {
   2.148          0, /* none    */
   2.149          1 << SH_type_l2_32_shadow, /* l1_32   */
   2.150          0, /* fl1_32  */
   2.151 @@ -2035,9 +2053,11 @@ void sh_remove_shadows(struct vcpu *v, m
   2.152          0, /* fl1_pae */
   2.153          0, /* l2_pae  */
   2.154          0, /* l2h_pae  */
   2.155 -        1 << SH_type_l2_64_shadow, /* l1_64   */
   2.156 +        ((1 << SH_type_l2h_64_shadow)
   2.157 +         | (1 << SH_type_l2_64_shadow)),  /* l1_64   */
   2.158          0, /* fl1_64  */
   2.159          1 << SH_type_l3_64_shadow, /* l2_64   */
   2.160 +        1 << SH_type_l3_64_shadow, /* l2h_64  */
   2.161          1 << SH_type_l4_64_shadow, /* l3_64   */
   2.162          0, /* l4_64   */
   2.163          0, /* p2m     */
   2.164 @@ -2088,6 +2108,7 @@ void sh_remove_shadows(struct vcpu *v, m
   2.165  #if CONFIG_PAGING_LEVELS >= 4
   2.166      if ( sh_flags & SHF_L1_64 )   DO_UNSHADOW(SH_type_l1_64_shadow);
   2.167      if ( sh_flags & SHF_L2_64 )   DO_UNSHADOW(SH_type_l2_64_shadow);
   2.168 +    if ( sh_flags & SHF_L2H_64 )  DO_UNSHADOW(SH_type_l2h_64_shadow);
   2.169      if ( sh_flags & SHF_L3_64 )   DO_UNSHADOW(SH_type_l3_64_shadow);
   2.170      if ( sh_flags & SHF_L4_64 )   DO_UNSHADOW(SH_type_l4_64_shadow);
   2.171  #endif
   2.172 @@ -2157,10 +2178,7 @@ static void sh_update_paging_modes(struc
   2.173          /// PV guest
   2.174          ///
   2.175  #if CONFIG_PAGING_LEVELS == 4
   2.176 -        if ( pv_32bit_guest(v) )
   2.177 -            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
   2.178 -        else
   2.179 -            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
   2.180 +        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
   2.181  #elif CONFIG_PAGING_LEVELS == 3
   2.182          v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
   2.183  #elif CONFIG_PAGING_LEVELS == 2
   2.184 @@ -2691,6 +2709,11 @@ static int shadow_log_dirty_enable(struc
   2.185          goto out;
   2.186      }
   2.187  
   2.188 +#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
   2.189 +    if ( IS_COMPAT(d) )
   2.190 +        d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
   2.191 +#endif
   2.192 +
   2.193      ret = sh_alloc_log_dirty_bitmap(d);
   2.194      if ( ret != 0 )
   2.195      {
   2.196 @@ -3016,7 +3039,7 @@ int shadow_domctl(struct domain *d,
   2.197  void shadow_audit_tables(struct vcpu *v) 
   2.198  {
   2.199      /* Dispatch table for getting per-type functions */
   2.200 -    static hash_callback_t callbacks[16] = {
   2.201 +    static hash_callback_t callbacks[SH_type_unused] = {
   2.202          NULL, /* none    */
   2.203  #if CONFIG_PAGING_LEVELS == 2
   2.204          SHADOW_INTERNAL_NAME(sh_audit_l1_table,2,2),  /* l1_32   */
   2.205 @@ -3034,6 +3057,7 @@ void shadow_audit_tables(struct vcpu *v)
   2.206          SHADOW_INTERNAL_NAME(sh_audit_l1_table,4,4),  /* l1_64   */
   2.207          SHADOW_INTERNAL_NAME(sh_audit_fl1_table,4,4), /* fl1_64  */
   2.208          SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2_64   */
   2.209 +        SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2h_64   */
   2.210          SHADOW_INTERNAL_NAME(sh_audit_l3_table,4,4),  /* l3_64   */
   2.211          SHADOW_INTERNAL_NAME(sh_audit_l4_table,4,4),  /* l4_64   */
   2.212  #endif /* CONFIG_PAGING_LEVELS >= 4 */
     3.1 --- a/xen/arch/x86/mm/shadow/multi.c	Mon Feb 19 16:16:53 2007 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Mon Feb 19 19:58:07 2007 +0000
     3.3 @@ -162,8 +162,13 @@ set_shadow_status(struct vcpu *v, mfn_t 
     3.4      else
     3.5          mfn_to_shadow_page(smfn)->logdirty = 0;
     3.6  
     3.7 -    res = get_page(mfn_to_page(gmfn), d);
     3.8 -    ASSERT(res == 1);
     3.9 +#ifdef CONFIG_COMPAT
    3.10 +    if ( !IS_COMPAT(d) || shadow_type != SH_type_l4_64_shadow )
    3.11 +#endif
    3.12 +    {
    3.13 +        res = get_page(mfn_to_page(gmfn), d);
    3.14 +        ASSERT(res == 1);
    3.15 +    }
    3.16  
    3.17      shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
    3.18  }
    3.19 @@ -185,7 +190,10 @@ delete_shadow_status(struct vcpu *v, mfn
    3.20                     v->domain->domain_id, v->vcpu_id,
    3.21                     mfn_x(gmfn), shadow_type, mfn_x(smfn));
    3.22      shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
    3.23 -    put_page(mfn_to_page(gmfn));
    3.24 +#ifdef CONFIG_COMPAT
    3.25 +    if ( !IS_COMPAT(v->domain) || shadow_type != SH_type_l4_64_shadow )
    3.26 +#endif
    3.27 +        put_page(mfn_to_page(gmfn));
    3.28  }
    3.29  
    3.30  /**************************************************************************/
    3.31 @@ -764,7 +772,7 @@ static always_inline void
    3.32      // PV guests in 64-bit mode use two different page tables for user vs
    3.33      // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
    3.34      // It is always shadowed as present...
    3.35 -    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) )
    3.36 +    if ( (GUEST_PAGING_LEVELS == 4) && !IS_COMPAT(d) && !is_hvm_domain(d) )
    3.37      {
    3.38          sflags |= _PAGE_USER;
    3.39      }
    3.40 @@ -1233,9 +1241,10 @@ do {                                    
    3.41  #if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
    3.42  
    3.43  /* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
    3.44 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)     \
    3.45 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)     \
    3.46  do {                                                                      \
    3.47      int _i, _j, __done = 0;                                               \
    3.48 +    int _xen = !shadow_mode_external(_dom);                               \
    3.49      ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
    3.50      for ( _j = 0; _j < 4 && !__done; _j++ )                               \
    3.51      {                                                                     \
    3.52 @@ -1259,9 +1268,10 @@ do {                                    
    3.53  #elif GUEST_PAGING_LEVELS == 2
    3.54  
    3.55  /* 32-bit on 32-bit: avoid Xen entries */
    3.56 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)      \
    3.57 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \
    3.58  do {                                                                       \
    3.59      int _i;                                                                \
    3.60 +    int _xen = !shadow_mode_external(_dom);                                \
    3.61      shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
    3.62      ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);     \
    3.63      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
    3.64 @@ -1281,9 +1291,10 @@ do {                                    
    3.65  #elif GUEST_PAGING_LEVELS == 3
    3.66  
    3.67  /* PAE: if it's an l2h, don't touch Xen mappings */
    3.68 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)      \
    3.69 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \
    3.70  do {                                                                       \
    3.71      int _i;                                                                \
    3.72 +    int _xen = !shadow_mode_external(_dom);                                \
    3.73      shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
    3.74      ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
    3.75             || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
    3.76 @@ -1304,21 +1315,29 @@ do {                                    
    3.77  
    3.78  #else 
    3.79  
    3.80 -/* 64-bit l2: touch all entries */
    3.81 -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)   \
    3.82 -do {                                                                    \
    3.83 -    int _i;                                                             \
    3.84 -    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                     \
    3.85 -    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow);  \
    3.86 -    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )              \
    3.87 -    {                                                                   \
    3.88 -        (_sl2e) = _sp + _i;                                             \
    3.89 -        if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )           \
    3.90 -            {_code}                                                     \
    3.91 -        if ( _done ) break;                                             \
    3.92 -        increment_ptr_to_guest_entry(_gl2p);                            \
    3.93 -    }                                                                   \
    3.94 -    unmap_shadow_page(_sp);                                             \
    3.95 +/* 64-bit l2: touch all entries except for PAE compat guests. */
    3.96 +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)       \
    3.97 +do {                                                                        \
    3.98 +    int _i;                                                                 \
    3.99 +    int _xen = !shadow_mode_external(_dom);                                 \
   3.100 +    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                         \
   3.101 +    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \
   3.102 +           mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \
   3.103 +    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
   3.104 +    {                                                                       \
   3.105 +        if ( (!(_xen))                                                      \
   3.106 +             || !IS_COMPAT(_dom)                                            \
   3.107 +             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \
   3.108 +             || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
   3.109 +        {                                                                   \
   3.110 +            (_sl2e) = _sp + _i;                                             \
   3.111 +            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )           \
   3.112 +                {_code}                                                     \
   3.113 +            if ( _done ) break;                                             \
   3.114 +            increment_ptr_to_guest_entry(_gl2p);                            \
   3.115 +        }                                                                   \
   3.116 +    }                                                                       \
   3.117 +    unmap_shadow_page(_sp);                                                 \
   3.118  } while (0)
   3.119  
   3.120  #endif /* different kinds of l2 */
   3.121 @@ -1343,14 +1362,15 @@ do {                                    
   3.122  } while (0)
   3.123  
   3.124  /* 64-bit l4: avoid Xen mappings */
   3.125 -#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code)   \
   3.126 +#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code)   \
   3.127  do {                                                                    \
   3.128 +    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
   3.129 +    int _xen = !shadow_mode_external(_dom);                             \
   3.130      int _i;                                                             \
   3.131 -    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
   3.132      ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
   3.133      for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
   3.134      {                                                                   \
   3.135 -        if ( (!(_xen)) || is_guest_l4_slot(_i) )                        \
   3.136 +        if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \
   3.137          {                                                               \
   3.138              (_sl4e) = _sp + _i;                                         \
   3.139              if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT )       \
   3.140 @@ -1417,17 +1437,25 @@ void sh_install_xen_entries_in_l4(struct
   3.141                                  __PAGE_HYPERVISOR);
   3.142      }
   3.143  
   3.144 +    if ( IS_COMPAT(v->domain) )
   3.145 +    {
   3.146 +        /* install compat arg xlat entry */
   3.147 +        sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
   3.148 +            shadow_l4e_from_mfn(
   3.149 +                    page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
   3.150 +                    __PAGE_HYPERVISOR);
   3.151 +    }
   3.152 +
   3.153      sh_unmap_domain_page(sl4e);    
   3.154  }
   3.155  #endif
   3.156  
   3.157 -#if (CONFIG_PAGING_LEVELS == 3 || defined(CONFIG_COMPAT)) && GUEST_PAGING_LEVELS == 3
   3.158 +#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
   3.159  // For 3-on-3 PV guests, we need to make sure the xen mappings are in
   3.160  // place, which means that we need to populate the l2h entry in the l3
   3.161  // table.
   3.162  
   3.163 -void sh_install_xen_entries_in_l2h(struct vcpu *v, 
   3.164 -                                    mfn_t sl2hmfn)
   3.165 +static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
   3.166  {
   3.167      struct domain *d = v->domain;
   3.168      shadow_l2e_t *sl2e;
   3.169 @@ -1489,9 +1517,10 @@ void sh_install_xen_entries_in_l2h(struc
   3.170  #else
   3.171  
   3.172      /* Copy the common Xen mappings from the idle domain */
   3.173 -    memcpy(&sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
   3.174 -           &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
   3.175 -           COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
   3.176 +    memcpy(
   3.177 +        &sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
   3.178 +        &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
   3.179 +        COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
   3.180  
   3.181  #endif
   3.182      
   3.183 @@ -1617,8 +1646,11 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
   3.184          case SH_type_l4_shadow:
   3.185              sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
   3.186  #endif
   3.187 -#if CONFIG_PAGING_LEVELS == 3 && GUEST_PAGING_LEVELS == 3
   3.188 +#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
   3.189          case SH_type_l2h_shadow:
   3.190 +#ifdef CONFIG_COMPAT
   3.191 +            ASSERT( IS_COMPAT(v->domain) );
   3.192 +#endif
   3.193              sh_install_xen_entries_in_l2h(v, smfn); break;
   3.194  #endif
   3.195  #if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
   3.196 @@ -1832,12 +1864,21 @@ static shadow_l2e_t * shadow_get_and_cre
   3.197      {
   3.198          int r;
   3.199          shadow_l3e_t new_sl3e;
   3.200 +        unsigned int t = SH_type_l2_shadow;
   3.201 +
   3.202 +#ifdef CONFIG_COMPAT
   3.203 +        /* Tag compat L2 containing hypervisor (m2p) mappings */
   3.204 +        if ( IS_COMPAT(v->domain) &&
   3.205 +             guest_l4_table_offset(gw->va) == 0 &&
   3.206 +             guest_l3_table_offset(gw->va) == 3 )
   3.207 +            t = SH_type_l2h_shadow;
   3.208 +#endif
   3.209          /* No l2 shadow installed: find and install it. */
   3.210 -        *sl2mfn = get_shadow_status(v, gw->l2mfn, SH_type_l2_shadow);
   3.211 +        *sl2mfn = get_shadow_status(v, gw->l2mfn, t);
   3.212          if ( !mfn_valid(*sl2mfn) ) 
   3.213          {
   3.214              /* No l2 shadow of this page exists at all: make one. */
   3.215 -            *sl2mfn = sh_make_shadow(v, gw->l2mfn, SH_type_l2_shadow);
   3.216 +            *sl2mfn = sh_make_shadow(v, gw->l2mfn, t);
   3.217          }
   3.218          /* Install the new sl2 table in the sl3e */
   3.219          l3e_propagate_from_guest(v, gw->l3e, gw->l3mfn, 
   3.220 @@ -1958,7 +1999,6 @@ void sh_destroy_l4_shadow(struct vcpu *v
   3.221      shadow_l4e_t *sl4e;
   3.222      u32 t = mfn_to_shadow_page(smfn)->type;
   3.223      mfn_t gmfn, sl4mfn;
   3.224 -    int xen_mappings;
   3.225  
   3.226      SHADOW_DEBUG(DESTROY_SHADOW,
   3.227                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   3.228 @@ -1969,9 +2009,8 @@ void sh_destroy_l4_shadow(struct vcpu *v
   3.229      delete_shadow_status(v, gmfn, t, smfn);
   3.230      shadow_demote(v, gmfn, t);
   3.231      /* Decrement refcounts of all the old entries */
   3.232 -    xen_mappings = (!shadow_mode_external(v->domain));
   3.233      sl4mfn = smfn; 
   3.234 -    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, xen_mappings, {
   3.235 +    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
   3.236          if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) 
   3.237          {
   3.238              sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
   3.239 @@ -2019,12 +2058,15 @@ void sh_destroy_l2_shadow(struct vcpu *v
   3.240      shadow_l2e_t *sl2e;
   3.241      u32 t = mfn_to_shadow_page(smfn)->type;
   3.242      mfn_t gmfn, sl2mfn;
   3.243 -    int xen_mappings;
   3.244  
   3.245      SHADOW_DEBUG(DESTROY_SHADOW,
   3.246                    "%s(%05lx)\n", __func__, mfn_x(smfn));
   3.247 -    ASSERT(t == SH_type_l2_shadow 
   3.248 -           || t == SH_type_l2h_pae_shadow);
   3.249 +
   3.250 +#if GUEST_PAGING_LEVELS >= 3
   3.251 +    ASSERT(t == SH_type_l2_shadow || t == SH_type_l2h_shadow);
   3.252 +#else
   3.253 +    ASSERT(t == SH_type_l2_shadow);
   3.254 +#endif
   3.255  
   3.256      /* Record that the guest page isn't shadowed any more (in this type) */
   3.257      gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
   3.258 @@ -2033,11 +2075,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
   3.259  
   3.260      /* Decrement refcounts of all the old entries */
   3.261      sl2mfn = smfn;
   3.262 -    xen_mappings = (!shadow_mode_external(v->domain) &&
   3.263 -                    ((GUEST_PAGING_LEVELS == 2) ||
   3.264 -                     ((GUEST_PAGING_LEVELS == 3) &&
   3.265 -                      (t == SH_type_l2h_pae_shadow))));
   3.266 -    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
   3.267 +    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
   3.268          if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT ) 
   3.269              sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
   3.270                          (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) 
   3.271 @@ -2138,8 +2176,7 @@ void sh_destroy_monitor_table(struct vcp
   3.272  void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn)
   3.273  {    
   3.274      shadow_l2e_t *sl2e;
   3.275 -    int xen_mappings = !shadow_mode_external(v->domain);
   3.276 -    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
   3.277 +    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
   3.278          (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
   3.279      });
   3.280  }
   3.281 @@ -2150,8 +2187,7 @@ void sh_unhook_pae_mappings(struct vcpu 
   3.282  /* Walk a PAE l2 shadow, unhooking entries from all the subshadows */
   3.283  {
   3.284      shadow_l2e_t *sl2e;
   3.285 -    int xen_mappings = !shadow_mode_external(v->domain);
   3.286 -    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
   3.287 +    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
   3.288          (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
   3.289      });
   3.290  }
   3.291 @@ -2161,8 +2197,7 @@ void sh_unhook_pae_mappings(struct vcpu 
   3.292  void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn)
   3.293  {
   3.294      shadow_l4e_t *sl4e;
   3.295 -    int xen_mappings = !shadow_mode_external(v->domain);
   3.296 -    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, xen_mappings, {
   3.297 +    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
   3.298          (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
   3.299      });
   3.300  }
   3.301 @@ -2208,7 +2243,7 @@ static int validate_gl4e(struct vcpu *v,
   3.302      {
   3.303          int shadow_index = (((unsigned long)sl4p & ~PAGE_MASK) /
   3.304                              sizeof(shadow_l4e_t));
   3.305 -        int reserved_xen_slot = !is_guest_l4_slot(shadow_index);
   3.306 +        int reserved_xen_slot = !is_guest_l4_slot(v->domain, shadow_index);
   3.307  
   3.308          if ( unlikely(reserved_xen_slot) )
   3.309          {
   3.310 @@ -2471,7 +2506,7 @@ int
   3.311  sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn,
   3.312                             void *new_gl2p, u32 size)
   3.313  {
   3.314 -#if GUEST_PAGING_LEVELS == 3
   3.315 +#if GUEST_PAGING_LEVELS >= 3
   3.316      return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
   3.317                                  SH_type_l2h_shadow, 
   3.318                                  shadow_l2_index, 
   3.319 @@ -3346,7 +3381,12 @@ sh_set_toplevel_shadow(struct vcpu *v,
   3.320      
   3.321  #if SHADOW_OPTIMIZATIONS & SHOPT_EARLY_UNSHADOW
   3.322      /* Once again OK to unhook entries from this table if we see fork/exit */
   3.323 -    ASSERT(sh_mfn_is_a_page_table(gmfn));
   3.324 +#if CONFIG_PAGING_LEVELS == 4
   3.325 +    if ( IS_COMPAT(d) )
   3.326 +        ASSERT(!sh_mfn_is_a_page_table(gmfn));
   3.327 +    else
   3.328 +#endif
   3.329 +        ASSERT(sh_mfn_is_a_page_table(gmfn));
   3.330      mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
   3.331  #endif
   3.332  
   3.333 @@ -3754,7 +3794,7 @@ void sh_clear_shadow_entry(struct vcpu *
   3.334      case SH_type_l1_shadow:
   3.335          (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
   3.336      case SH_type_l2_shadow:
   3.337 -#if GUEST_PAGING_LEVELS == 3
   3.338 +#if GUEST_PAGING_LEVELS >= 3
   3.339      case SH_type_l2h_shadow:
   3.340  #endif
   3.341          (void) shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
   3.342 @@ -3774,11 +3814,8 @@ int sh_remove_l1_shadow(struct vcpu *v, 
   3.343      shadow_l2e_t *sl2e;
   3.344      int done = 0;
   3.345      int flags;
   3.346 -#if GUEST_PAGING_LEVELS != 4
   3.347 -    int xen_mappings = !shadow_mode_external(v->domain);
   3.348 -#endif
   3.349      
   3.350 -    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, xen_mappings, 
   3.351 +    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain, 
   3.352      {
   3.353          flags = shadow_l2e_get_flags(*sl2e);
   3.354          if ( (flags & _PAGE_PRESENT) 
   3.355 @@ -3821,9 +3858,9 @@ int sh_remove_l3_shadow(struct vcpu *v, 
   3.356  {
   3.357      shadow_l4e_t *sl4e;
   3.358      int done = 0;
   3.359 -    int flags, xen_mappings = !shadow_mode_external(v->domain);
   3.360 +    int flags;
   3.361      
   3.362 -    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, xen_mappings,
   3.363 +    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, v->domain,
   3.364      {
   3.365          flags = shadow_l4e_get_flags(*sl4e);
   3.366          if ( (flags & _PAGE_PRESENT) 
   3.367 @@ -4196,14 +4233,11 @@ int sh_audit_l2_table(struct vcpu *v, mf
   3.368      gfn_t gfn;
   3.369      char *s;
   3.370      int done = 0;
   3.371 -#if GUEST_PAGING_LEVELS != 4
   3.372 -    int xen_mappings = !shadow_mode_external(v->domain);
   3.373 -#endif
   3.374  
   3.375      /* Follow the backpointer */
   3.376      gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
   3.377      gl2e = gp = sh_map_domain_page(gl2mfn);
   3.378 -    SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, xen_mappings, {
   3.379 +    SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, v->domain, {
   3.380  
   3.381          s = sh_audit_flags(v, 2, guest_l2e_get_flags(*gl2e),
   3.382                              shadow_l2e_get_flags(*sl2e));
   3.383 @@ -4255,10 +4289,11 @@ int sh_audit_l3_table(struct vcpu *v, mf
   3.384              gfn = guest_l3e_get_gfn(*gl3e);
   3.385              mfn = shadow_l3e_get_mfn(*sl3e);
   3.386              gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl3mfn), 
   3.387 -                                     (GUEST_PAGING_LEVELS == 3 
   3.388 +                                     ((GUEST_PAGING_LEVELS == 3 ||
   3.389 +                                       IS_COMPAT(v->domain))
   3.390                                        && !shadow_mode_external(v->domain)
   3.391                                        && (guest_index(gl3e) % 4) == 3)
   3.392 -                                     ? SH_type_l2h_pae_shadow
   3.393 +                                     ? SH_type_l2h_shadow
   3.394                                       : SH_type_l2_shadow);
   3.395              if ( mfn_x(gmfn) != mfn_x(mfn) )
   3.396                  AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn
   3.397 @@ -4278,12 +4313,11 @@ int sh_audit_l4_table(struct vcpu *v, mf
   3.398      gfn_t gfn;
   3.399      char *s;
   3.400      int done = 0;
   3.401 -    int xen_mappings = !shadow_mode_external(v->domain);
   3.402  
   3.403      /* Follow the backpointer */
   3.404      gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
   3.405      gl4e = gp = sh_map_domain_page(gl4mfn);
   3.406 -    SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, xen_mappings,
   3.407 +    SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, v->domain,
   3.408      {
   3.409          s = sh_audit_flags(v, 4, guest_l4e_get_flags(*gl4e),
   3.410                              shadow_l4e_get_flags(*sl4e));
     4.1 --- a/xen/arch/x86/mm/shadow/private.h	Mon Feb 19 16:16:53 2007 +0000
     4.2 +++ b/xen/arch/x86/mm/shadow/private.h	Mon Feb 19 19:58:07 2007 +0000
     4.3 @@ -269,12 +269,13 @@ static inline void shadow_check_page_str
     4.4  #define SH_type_l1_64_shadow   (8U) /* shadowing a 64-bit L1 page */
     4.5  #define SH_type_fl1_64_shadow  (9U) /* L1 shadow for 64-bit 2M superpg */
     4.6  #define SH_type_l2_64_shadow  (10U) /* shadowing a 64-bit L2 page */
     4.7 -#define SH_type_l3_64_shadow  (11U) /* shadowing a 64-bit L3 page */
     4.8 -#define SH_type_l4_64_shadow  (12U) /* shadowing a 64-bit L4 page */
     4.9 -#define SH_type_max_shadow    (12U)
    4.10 -#define SH_type_p2m_table     (13U) /* in use as the p2m table */
    4.11 -#define SH_type_monitor_table (14U) /* in use as a monitor table */
    4.12 -#define SH_type_unused        (15U)
    4.13 +#define SH_type_l2h_64_shadow (11U) /* shadowing a compat PAE L2 high page */
    4.14 +#define SH_type_l3_64_shadow  (12U) /* shadowing a 64-bit L3 page */
    4.15 +#define SH_type_l4_64_shadow  (13U) /* shadowing a 64-bit L4 page */
    4.16 +#define SH_type_max_shadow    (13U)
    4.17 +#define SH_type_p2m_table     (14U) /* in use as the p2m table */
    4.18 +#define SH_type_monitor_table (15U) /* in use as a monitor table */
    4.19 +#define SH_type_unused        (16U)
    4.20  
    4.21  /* 
    4.22   * What counts as a pinnable shadow?
    4.23 @@ -325,12 +326,13 @@ static inline int sh_type_is_pinnable(st
    4.24  #define SHF_L1_64   (1u << SH_type_l1_64_shadow)
    4.25  #define SHF_FL1_64  (1u << SH_type_fl1_64_shadow)
    4.26  #define SHF_L2_64   (1u << SH_type_l2_64_shadow)
    4.27 +#define SHF_L2H_64  (1u << SH_type_l2h_64_shadow)
    4.28  #define SHF_L3_64   (1u << SH_type_l3_64_shadow)
    4.29  #define SHF_L4_64   (1u << SH_type_l4_64_shadow)
    4.30  
    4.31  #define SHF_32  (SHF_L1_32|SHF_FL1_32|SHF_L2_32)
    4.32  #define SHF_PAE (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE|SHF_L2H_PAE)
    4.33 -#define SHF_64  (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L3_64|SHF_L4_64)
    4.34 +#define SHF_64  (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L2H_64|SHF_L3_64|SHF_L4_64)
    4.35  
    4.36  /* Used for hysteresis when automatically unhooking mappings on fork/exit */
    4.37  #define SHF_unhooked_mappings (1u<<31)
    4.38 @@ -367,7 +369,6 @@ void shadow_unhook_mappings(struct vcpu 
    4.39  
    4.40  /* Install the xen mappings in various flavours of shadow */
    4.41  void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
    4.42 -void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn);
    4.43  void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
    4.44  
    4.45  /* Update the shadows in response to a pagetable write from Xen */
     5.1 --- a/xen/arch/x86/mm/shadow/types.h	Mon Feb 19 16:16:53 2007 +0000
     5.2 +++ b/xen/arch/x86/mm/shadow/types.h	Mon Feb 19 19:58:07 2007 +0000
     5.3 @@ -389,6 +389,7 @@ static inline guest_l4e_t guest_l4e_from
     5.4  #define SH_type_l1_shadow  SH_type_l1_64_shadow
     5.5  #define SH_type_fl1_shadow SH_type_fl1_64_shadow
     5.6  #define SH_type_l2_shadow  SH_type_l2_64_shadow
     5.7 +#define SH_type_l2h_shadow SH_type_l2h_64_shadow
     5.8  #define SH_type_l3_shadow  SH_type_l3_64_shadow
     5.9  #define SH_type_l4_shadow  SH_type_l4_64_shadow
    5.10  #endif
     6.1 --- a/xen/include/asm-x86/x86_64/page.h	Mon Feb 19 16:16:53 2007 +0000
     6.2 +++ b/xen/include/asm-x86/x86_64/page.h	Mon Feb 19 19:58:07 2007 +0000
     6.3 @@ -59,9 +59,11 @@ typedef l4_pgentry_t root_pgentry_t;
     6.4        !((_t) & PGT_pae_xen_l2) ||                      \
     6.5        ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
     6.6  #define is_guest_l3_slot(_s) (1)
     6.7 -#define is_guest_l4_slot(_s)                   \
     6.8 -    (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
     6.9 -     ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))
    6.10 +#define is_guest_l4_slot(_d, _s)                    \
    6.11 +    ( IS_COMPAT(_d)                                 \
    6.12 +      ? ((_s) == 0)                                 \
    6.13 +      : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) ||  \
    6.14 +         ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
    6.15  
    6.16  #define root_get_pfn              l4e_get_pfn
    6.17  #define root_get_flags            l4e_get_flags
    6.18 @@ -96,7 +98,7 @@ typedef l4_pgentry_t root_pgentry_t;
    6.19  #define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
    6.20  #define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
    6.21  
    6.22 -#define COMPAT_L3_DISALLOW_MASK 0xFFFFF1E6U
    6.23 +#define COMPAT_L3_DISALLOW_MASK L3_DISALLOW_MASK
    6.24  
    6.25  #define PAGE_HYPERVISOR         (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
    6.26  #define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)