ia64/xen-unstable

changeset 8898:eaeb26494a39

Enable x86_32 PAE unmodified guests on 64-bit Xen when the hvm feature
is present. We tested only Linux at this point, and we'll improve the
functionality as we test other guests.

The SVM needs the equivalent changes to the vmc.c to get this
functionality working, but this patch does not break the build.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Feb 18 11:56:13 2006 +0100 (2006-02-18)
parents 39f624f2b241
children 864dcee1f899
files xen/arch/x86/Makefile xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/shadow.c xen/arch/x86/shadow_public.c xen/common/page_alloc.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vmx/vmx.h xen/include/asm-x86/shadow_64.h
line diff
     1.1 --- a/xen/arch/x86/Makefile	Sat Feb 18 11:41:42 2006 +0100
     1.2 +++ b/xen/arch/x86/Makefile	Sat Feb 18 11:56:13 2006 +0100
     1.3 @@ -23,7 +23,7 @@ endif
     1.4  
     1.5  OBJS := $(patsubst shadow%.o,,$(OBJS))	# drop all
     1.6  ifeq ($(TARGET_SUBARCH),x86_64) 
     1.7 - OBJS += shadow.o shadow_public.o shadow_guest32.o	# x86_64: new code
     1.8 + OBJS += shadow.o shadow_public.o shadow_guest32.o shadow_guest32pae.o	# x86_64: new code
     1.9  endif
    1.10  ifeq ($(TARGET_SUBARCH),x86_32) 
    1.11   ifneq ($(pae),n)
     2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Sat Feb 18 11:41:42 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Sat Feb 18 11:56:13 2006 +0100
     2.3 @@ -244,7 +244,6 @@ static inline int long_mode_do_msr_write
     2.4                         host_state->msr_items[VMX_INDEX_MSR_EFER]);
     2.5                  set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
     2.6                  set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
     2.7 -                wrmsrl(MSR_EFER, msr_content);
     2.8              }
     2.9          }
    2.10          break;
    2.11 @@ -433,6 +432,22 @@ int vmx_instruction_length(struct vcpu *
    2.12      return inst_len;
    2.13  }
    2.14  
    2.15 +unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
    2.16 +{
    2.17 +    switch ( num )
    2.18 +    {
    2.19 +    case 0:
    2.20 +        return v->arch.hvm_vmx.cpu_cr0;
    2.21 +    case 2:
    2.22 +        return v->arch.hvm_vmx.cpu_cr2;
    2.23 +    case 3:
    2.24 +        return v->arch.hvm_vmx.cpu_cr3;
    2.25 +    default:
    2.26 +        BUG();
    2.27 +    }
    2.28 +    return 0;                   /* dummy */
    2.29 +}
    2.30 +
    2.31  extern long evtchn_send(int lport);
    2.32  void do_nmi(struct cpu_user_regs *);
    2.33  
    2.34 @@ -529,6 +544,7 @@ int start_vmx(void)
    2.35      hvm_funcs.realmode = vmx_realmode;
    2.36      hvm_funcs.paging_enabled = vmx_paging_enabled;
    2.37      hvm_funcs.instruction_length = vmx_instruction_length;
    2.38 +    hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
    2.39  
    2.40      hvm_enabled = 1;
    2.41  
    2.42 @@ -652,14 +668,17 @@ static void vmx_vmexit_do_cpuid(unsigned
    2.43                  !vlapic_global_enabled((VLAPIC(v))) )
    2.44              clear_bit(X86_FEATURE_APIC, &edx);
    2.45  
    2.46 -#if CONFIG_PAGING_LEVELS >= 3
    2.47 +#if CONFIG_PAGING_LEVELS < 3
    2.48 +        clear_bit(X86_FEATURE_PSE, &edx);
    2.49 +        clear_bit(X86_FEATURE_PAE, &edx);
    2.50 +        clear_bit(X86_FEATURE_PSE36, &edx);
    2.51 +#else
    2.52          if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
    2.53 -#endif
    2.54          {
    2.55              clear_bit(X86_FEATURE_PSE, &edx);
    2.56 -            clear_bit(X86_FEATURE_PAE, &edx);
    2.57              clear_bit(X86_FEATURE_PSE36, &edx);
    2.58          }
    2.59 +#endif
    2.60  
    2.61          /* Unsupportable for virtualised CPUs. */
    2.62          ecx &= ~VMX_VCPU_CPUID_L1_RESERVED; /* mask off reserved bits */
    2.63 @@ -1005,11 +1024,11 @@ vmx_world_restore(struct vcpu *v, struct
    2.64          v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    2.65          if (old_base_mfn)
    2.66               put_page(mfn_to_page(old_base_mfn));
    2.67 -        update_pagetables(v);
    2.68          /*
    2.69           * arch.shadow_table should now hold the next CR3 for shadow
    2.70           */
    2.71          v->arch.hvm_vmx.cpu_cr3 = c->cr3;
    2.72 +        update_pagetables(v);
    2.73          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
    2.74          __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
    2.75      }
    2.76 @@ -1400,11 +1419,16 @@ static int mov_to_cr(int gp, int cr, str
    2.77              v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    2.78              if (old_base_mfn)
    2.79                  put_page(mfn_to_page(old_base_mfn));
    2.80 -            update_pagetables(v);
    2.81              /*
    2.82               * arch.shadow_table should now hold the next CR3 for shadow
    2.83               */
    2.84 +#if CONFIG_PAGING_LEVELS >= 3
    2.85 +            if ( v->domain->arch.ops->guest_paging_levels == PAGING_L3 )
    2.86 +                shadow_sync_all(v->domain);
    2.87 +#endif
    2.88 +
    2.89              v->arch.hvm_vmx.cpu_cr3 = value;
    2.90 +            update_pagetables(v);
    2.91              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
    2.92                          value);
    2.93              __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
    2.94 @@ -1413,18 +1437,81 @@ static int mov_to_cr(int gp, int cr, str
    2.95      }
    2.96      case 4: /* CR4 */
    2.97      {
    2.98 -        if (value & X86_CR4_PAE){
    2.99 +        unsigned long old_cr4;
   2.100 +
   2.101 +        __vmread(CR4_READ_SHADOW, &old_cr4);
   2.102 +
   2.103 +        if ( value & X86_CR4_PAE && !(old_cr4 & X86_CR4_PAE) )
   2.104 +        {
   2.105              set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
   2.106 -        } else {
   2.107 -            if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
   2.108 -                         &v->arch.hvm_vmx.cpu_state)){
   2.109 +
   2.110 +            if ( vmx_pgbit_test(v) ) 
   2.111 +            {
   2.112 +                /* The guest is 32 bit. */
   2.113 +#if CONFIG_PAGING_LEVELS >= 4
   2.114 +                unsigned long mfn, old_base_mfn;
   2.115 +
   2.116 +                if( !shadow_set_guest_paging_levels(v->domain, 3) )
   2.117 +                {
   2.118 +                    printk("Unsupported guest paging levels\n");
   2.119 +                    domain_crash_synchronous(); /* need to take a clean path */
   2.120 +                }
   2.121 +
   2.122 +                if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
   2.123 +                                    v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
   2.124 +                     !get_page(mfn_to_page(mfn), v->domain) ) 
   2.125 +                {
   2.126 +                    printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
   2.127 +                    domain_crash_synchronous(); /* need to take a clean path */
   2.128 +                }
   2.129 +
   2.130 +                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   2.131 +                if ( old_base_mfn )
   2.132 +                    put_page(mfn_to_page(old_base_mfn));
   2.133 +
   2.134 +                /*
   2.135 +                 * Now arch.guest_table points to machine physical.
   2.136 +                 */
   2.137 +
   2.138 +                v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
   2.139 +                update_pagetables(v);
   2.140 +
   2.141 +                HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
   2.142 +                            (unsigned long) (mfn << PAGE_SHIFT));
   2.143 +
   2.144 +                __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
   2.145 +
   2.146 +                /*
   2.147 +                 * arch->shadow_table should hold the next CR3 for shadow
   2.148 +                 */
   2.149 +
   2.150 +                HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
   2.151 +                            v->arch.hvm_vmx.cpu_cr3, mfn);
   2.152 +#endif
   2.153 +            } 
   2.154 +            else
   2.155 +            {
   2.156 +                /*  The guest is 64 bit. */
   2.157 +#if CONFIG_PAGING_LEVELS >= 4
   2.158 +                if ( !shadow_set_guest_paging_levels(v->domain, 4) ) 
   2.159 +                {
   2.160 +                    printk("Unsupported guest paging levels\n");
   2.161 +                    domain_crash_synchronous(); /* need to take a clean path */
   2.162 +                }
   2.163 +#endif
   2.164 +            }
   2.165 +        }
   2.166 +        else if ( value & X86_CR4_PAE )
   2.167 +            set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
   2.168 +        else
   2.169 +        {
   2.170 +            if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, &v->arch.hvm_vmx.cpu_state) )
   2.171                  vmx_inject_exception(v, TRAP_gp_fault, 0);
   2.172 -            }
   2.173 +
   2.174              clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
   2.175          }
   2.176  
   2.177          __vmread(CR4_READ_SHADOW, &old_cr);
   2.178 -
   2.179          __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
   2.180          __vmwrite(CR4_READ_SHADOW, value);
   2.181  
   2.182 @@ -1432,9 +1519,9 @@ static int mov_to_cr(int gp, int cr, str
   2.183           * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
   2.184           * all TLB entries except global entries.
   2.185           */
   2.186 -        if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
   2.187 +        if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
   2.188              shadow_sync_all(v->domain);
   2.189 -        }
   2.190 +
   2.191          break;
   2.192      }
   2.193      default:
     3.1 --- a/xen/arch/x86/shadow.c	Sat Feb 18 11:41:42 2006 +0100
     3.2 +++ b/xen/arch/x86/shadow.c	Sat Feb 18 11:56:13 2006 +0100
     3.3 @@ -49,12 +49,12 @@ static void mark_shadows_as_reflecting_s
     3.4  
     3.5  #if CONFIG_PAGING_LEVELS == 3
     3.6  static unsigned long shadow_l3_table(
     3.7 -    struct domain *d, unsigned long gpfn, unsigned long gmfn);
     3.8 +    struct vcpu *v, unsigned long gpfn, unsigned long gmfn);
     3.9  #endif
    3.10  
    3.11  #if CONFIG_PAGING_LEVELS == 4
    3.12  static unsigned long shadow_l4_table(
    3.13 -    struct domain *d, unsigned long gpfn, unsigned long gmfn);
    3.14 +    struct vcpu *v, unsigned long gpfn, unsigned long gmfn);
    3.15  #endif
    3.16  
    3.17  #if CONFIG_PAGING_LEVELS >= 3
    3.18 @@ -62,6 +62,7 @@ static void shadow_map_into_current(stru
    3.19      unsigned long va, unsigned int from, unsigned int to);
    3.20  static inline void validate_bl2e_change( struct domain *d,
    3.21      guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
    3.22 +static void update_top_level_shadow(struct vcpu *v, unsigned long smfn);
    3.23  #endif
    3.24  
    3.25  /********
    3.26 @@ -208,7 +209,7 @@ alloc_shadow_page(struct domain *d,
    3.27                    u32 psh_type)
    3.28  {
    3.29      struct page_info *page;
    3.30 -    unsigned long smfn;
    3.31 +    unsigned long smfn, real_gpfn;
    3.32      int pin = 0;
    3.33      void *l1, *lp;
    3.34  
    3.35 @@ -327,7 +328,8 @@ alloc_shadow_page(struct domain *d,
    3.36          break;
    3.37  
    3.38      case PGT_l4_shadow:
    3.39 -        if ( !shadow_promote(d, gpfn, gmfn, psh_type) )
    3.40 +        real_gpfn = gpfn & PGT_mfn_mask;
    3.41 +        if ( !shadow_promote(d, real_gpfn, gmfn, psh_type) )
    3.42              goto fail;
    3.43          perfc_incr(shadow_l4_pages);
    3.44          d->arch.shadow_page_count++;
    3.45 @@ -471,10 +473,11 @@ shadow_hl2_table(struct domain *d, unsig
    3.46   * Might be worth investigating...
    3.47   */
    3.48  static unsigned long shadow_l2_table(
    3.49 -    struct domain *d, unsigned long gpfn, unsigned long gmfn)
    3.50 +    struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
    3.51  {
    3.52      unsigned long smfn;
    3.53      l2_pgentry_t *spl2e;
    3.54 +    struct domain *d = v->domain;
    3.55      int i;
    3.56  
    3.57      SH_VVLOG("shadow_l2_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
    3.58 @@ -851,8 +854,17 @@ static void shadow_set_l1e_64(
    3.59                  __rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i);
    3.60              }
    3.61          }
    3.62 -        if ( i < PAGING_L4 )
    3.63 -            shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
    3.64 +        if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 
    3.65 +        {
    3.66 +            if ( i < PAGING_L3 )
    3.67 +                shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
    3.68 +        }
    3.69 +        else 
    3.70 +        {
    3.71 +            if ( i < PAGING_L4 )
    3.72 +                shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
    3.73 +        }
    3.74 +
    3.75          sle_up = sle;
    3.76      }
    3.77  
    3.78 @@ -1187,6 +1199,8 @@ static int is_out_of_sync(struct vcpu *v
    3.79          unsigned long gmfn;
    3.80          unsigned long gpfn;
    3.81          int i;
    3.82 +        unsigned int base_idx = 0;
    3.83 +        base_idx = get_cr3_idxval(v);
    3.84  
    3.85          gmfn = l2mfn;
    3.86          gpfn = l2pfn;
    3.87 @@ -1200,7 +1214,7 @@ static int is_out_of_sync(struct vcpu *v
    3.88  
    3.89              if ( page_out_of_sync(mfn_to_page(gmfn)) &&
    3.90                   !snapshot_entry_matches(
    3.91 -                     d, guest_pt, gpfn, table_offset_64(va, i)) )
    3.92 +                     d, guest_pt, gpfn, guest_table_offset_64(va, i, base_idx)) )
    3.93              {
    3.94                  unmap_and_return (1);
    3.95              }
    3.96 @@ -1481,6 +1495,74 @@ static int remove_all_write_access(
    3.97      return 0;
    3.98  }
    3.99  
   3.100 +static void resync_pae_guest_l3(struct domain *d)
   3.101 +{
   3.102 +    struct out_of_sync_entry *entry;
   3.103 +    unsigned long i, idx;
   3.104 +    unsigned long smfn, gmfn;
   3.105 +    pgentry_64_t *guest, *shadow_l3, *snapshot;
   3.106 +    struct vcpu *v = current;
   3.107 +    int max = -1;
   3.108 +    int unshadow = 0;
   3.109 +
   3.110 +    
   3.111 +    ASSERT( shadow_mode_external(d) );
   3.112 +
   3.113 +    gmfn = pagetable_get_pfn(v->arch.guest_table);
   3.114 +           
   3.115 +    for ( entry = d->arch.out_of_sync; entry; entry = entry->next ) 
   3.116 +    {
   3.117 +        if ( entry->snapshot_mfn == SHADOW_SNAPSHOT_ELSEWHERE )
   3.118 +            continue;
   3.119 +        if ( entry->gmfn != gmfn )
   3.120 +            continue;
   3.121 +
   3.122 +        idx = get_cr3_idxval(v);
   3.123 +        smfn = __shadow_status(
   3.124 +            d, ((unsigned long)(idx << PGT_score_shift) | entry->gpfn), PGT_l4_shadow);
   3.125 +
   3.126 +#ifndef NDEBUG
   3.127 +        if ( !smfn ) 
   3.128 +        {
   3.129 +            BUG();
   3.130 +        }
   3.131 +#endif
   3.132 +
   3.133 +        guest    = (pgentry_64_t *)map_domain_page(entry->gmfn);
   3.134 +        snapshot = (pgentry_64_t *)map_domain_page(entry->snapshot_mfn);
   3.135 +        shadow_l3 = (pgentry_64_t *)map_domain_page(smfn);
   3.136 +
   3.137 +        for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ ) 
   3.138 +        {
   3.139 +            int index = i + idx * PAE_L3_PAGETABLE_ENTRIES;
   3.140 +            if ( entry_has_changed(
   3.141 +                    guest[index], snapshot[index], PAGE_FLAG_MASK) ) 
   3.142 +            {
   3.143 +                validate_entry_change(d, &guest[index],
   3.144 +                                      &shadow_l3[i], PAGING_L3);
   3.145 +            }
   3.146 +            if ( entry_get_value(guest[index]) != 0 )
   3.147 +                max = i;
   3.148 +
   3.149 +            if ( !(entry_get_flags(guest[index]) & _PAGE_PRESENT) &&
   3.150 +                 unlikely(entry_get_value(guest[index]) != 0) &&
   3.151 +                 !unshadow &&
   3.152 +                 (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
   3.153 +                unshadow = 1;
   3.154 +
   3.155 +        }
   3.156 +        if ( max == -1 )
   3.157 +            unshadow = 1;
   3.158 +
   3.159 +        unmap_domain_page(guest);
   3.160 +        unmap_domain_page(snapshot);
   3.161 +        unmap_domain_page(shadow_l3);
   3.162 +
   3.163 +        if ( unlikely(unshadow) )
   3.164 +            shadow_unpin(smfn);
   3.165 +        break;
   3.166 +    }
   3.167 +}
   3.168  
   3.169  static int resync_all(struct domain *d, u32 stype)
   3.170  {
   3.171 @@ -1823,6 +1905,64 @@ static int resync_all(struct domain *d, 
   3.172      return need_flush;
   3.173  }
   3.174  
   3.175 +#if CONFIG_PAGING_LEVELS == 2
   3.176 +static int resync_all_levels_guest_page(struct domain *d)
   3.177 +{
   3.178 +    int need_flush = 0;
   3.179 +
   3.180 +    need_flush |= resync_all(d, PGT_l1_shadow); 
   3.181 +    if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
   3.182 +         shadow_mode_translate(d) )
   3.183 +    {
   3.184 +        need_flush |= resync_all(d, PGT_hl2_shadow);
   3.185 +    }
   3.186 +    return need_flush;
   3.187 +}
   3.188 +#elif CONFIG_PAGING_LEVELS == 3
   3.189 +static int resync_all_levels_guest_page(struct domain *d)
   3.190 +{
   3.191 +    int need_flush = 0;
   3.192 +
   3.193 +    need_flush |= resync_all(d, PGT_l1_shadow);
   3.194 +    if ( d->arch.ops->guest_paging_levels == PAGING_L2 ) 
   3.195 +        need_flush |= resync_all(d, PGT_l4_shadow);
   3.196 +    else
   3.197 +    {
   3.198 +        need_flush |= resync_all(d, PGT_l2_shadow);
   3.199 +        if ( shadow_mode_log_dirty(d) )
   3.200 +        {
   3.201 +            need_flush |= resync_all(d, PGT_l3_shadow);
   3.202 +            need_flush |= resync_all(d, PGT_l4_shadow);
   3.203 +        }
   3.204 +        else
   3.205 +            resync_pae_guest_l3(d);
   3.206 +    }
   3.207 +    
   3.208 +    return need_flush;
   3.209 +}
   3.210 +#elif CONFIG_PAGING_LEVELS == 4
   3.211 +static int resync_all_levels_guest_page(struct domain *d)
   3.212 +{
   3.213 +    int need_flush = 0;
   3.214 +
   3.215 +    need_flush |= resync_all(d, PGT_l1_shadow);
   3.216 +    if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
   3.217 +        need_flush |= resync_all(d, PGT_l4_shadow);
   3.218 +    else
   3.219 +    {
   3.220 +        need_flush |= resync_all(d, PGT_l2_shadow);
   3.221 +        if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
   3.222 +            resync_pae_guest_l3(d);
   3.223 +        else
   3.224 +        {
   3.225 +            need_flush |= resync_all(d, PGT_l3_shadow);
   3.226 +            need_flush |= resync_all(d, PGT_l4_shadow);
   3.227 +        }
   3.228 +    }
   3.229 +    return need_flush;
   3.230 +}
   3.231 +#endif
   3.232 +
   3.233  static void sync_all(struct domain *d)
   3.234  {
   3.235      struct out_of_sync_entry *entry;
   3.236 @@ -1869,29 +2009,7 @@ static void sync_all(struct domain *d)
   3.237      /* Flush ourself later. */
   3.238      need_flush = 1;
   3.239  
   3.240 -    /* Second, resync all L1 pages, then L2 pages, etc... */
   3.241 -    need_flush |= resync_all(d, PGT_l1_shadow);
   3.242 -
   3.243 -#if CONFIG_PAGING_LEVELS == 2
   3.244 -    if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
   3.245 -         shadow_mode_translate(d) )  
   3.246 -    {
   3.247 -        need_flush |= resync_all(d, PGT_hl2_shadow);
   3.248 -    }
   3.249 -#endif
   3.250 -
   3.251 -#if CONFIG_PAGING_LEVELS >= 3
   3.252 -    if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
   3.253 -        need_flush |= resync_all(d, PGT_l4_shadow);
   3.254 -    else
   3.255 -        need_flush |= resync_all(d, PGT_l2_shadow);
   3.256 -
   3.257 -    if ( d->arch.ops->guest_paging_levels >= PAGING_L3 )
   3.258 -    {
   3.259 -        need_flush |= resync_all(d, PGT_l3_shadow);
   3.260 -        need_flush |= resync_all(d, PGT_l4_shadow);
   3.261 -    }
   3.262 -#endif
   3.263 +    need_flush |= resync_all_levels_guest_page(d);
   3.264  
   3.265      if ( need_flush && !unlikely(shadow_mode_external(d)) )
   3.266          local_flush_tlb();
   3.267 @@ -2217,21 +2335,36 @@ static void shadow_update_pagetables(str
   3.268          v->arch.guest_vtable = map_domain_page_global(gmfn);
   3.269      }
   3.270  
   3.271 +#if CONFIG_PAGING_LEVELS >= 3
   3.272 +    /*
   3.273 +     * Handle 32-bit PAE enabled guest
   3.274 +     */
   3.275 +    if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 ) 
   3.276 +    {
   3.277 +        u32 index = get_cr3_idxval(v);
   3.278 +        gpfn = (index << PGT_score_shift) | gpfn;
   3.279 +    }
   3.280 +#endif
   3.281 +
   3.282      /*
   3.283       *  arch.shadow_table
   3.284       */
   3.285      if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_base_page_table))) ) 
   3.286      {
   3.287  #if CONFIG_PAGING_LEVELS == 2
   3.288 -        smfn = shadow_l2_table(d, gpfn, gmfn);
   3.289 +        smfn = shadow_l2_table(v, gpfn, gmfn);
   3.290  #elif CONFIG_PAGING_LEVELS == 3
   3.291 -        smfn = shadow_l3_table(d, gpfn, gmfn);
   3.292 +        smfn = shadow_l3_table(v, gpfn, gmfn);
   3.293  #elif CONFIG_PAGING_LEVELS == 4
   3.294 -        smfn = shadow_l4_table(d, gpfn, gmfn);
   3.295 +        smfn = shadow_l4_table(v, gpfn, gmfn);
   3.296  #endif
   3.297      }
   3.298      else
   3.299      {
   3.300 +#if CONFIG_PAGING_LEVELS >= 3
   3.301 +        if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
   3.302 +            update_top_level_shadow(v, smfn);
   3.303 +#endif
   3.304          /*
   3.305           *  move sync later in order to avoid this smfn been 
   3.306           *  unshadowed occasionally
   3.307 @@ -2867,14 +3000,15 @@ static inline unsigned long init_bl2(
   3.308  
   3.309  #if CONFIG_PAGING_LEVELS == 3
   3.310  static unsigned long shadow_l3_table(
   3.311 -    struct domain *d, unsigned long gpfn, unsigned long gmfn)
   3.312 +    struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
   3.313  {
   3.314      unsigned long smfn;
   3.315      l3_pgentry_t *spl3e;
   3.316 +    struct domain *d = v->domain;
   3.317  
   3.318      perfc_incrc(shadow_l3_table_count);
   3.319  
   3.320 -    SH_VVLOG("shadow_l4_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
   3.321 +    SH_VVLOG("shadow_l3_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
   3.322  
   3.323      if ( SH_L1_HAS_NEXT_PAGE &&
   3.324           d->arch.ops->guest_paging_levels == PAGING_L2 )
   3.325 @@ -2967,7 +3101,7 @@ static unsigned long shadow_l3_table(
   3.326  }
   3.327  #endif /* CONFIG_PAGING_LEVELS == 3 */
   3.328  
   3.329 -#ifndef GUEST_PGENTRY_32
   3.330 +#if (!defined(GUEST_PGENTRY_32) && !defined(GUEST_32PAE))
   3.331  static unsigned long gva_to_gpa_pae(unsigned long gva)
   3.332  {
   3.333      BUG();
   3.334 @@ -2977,10 +3111,11 @@ static unsigned long gva_to_gpa_pae(unsi
   3.335  
   3.336  #if CONFIG_PAGING_LEVELS == 4
   3.337  static unsigned long shadow_l4_table(
   3.338 -  struct domain *d, unsigned long gpfn, unsigned long gmfn)
   3.339 +  struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
   3.340  {
   3.341      unsigned long smfn;
   3.342      l4_pgentry_t *spl4e;
   3.343 +    struct domain *d = v->domain;
   3.344  
   3.345      SH_VVLOG("shadow_l4_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
   3.346  
   3.347 @@ -2999,6 +3134,24 @@ static unsigned long shadow_l4_table(
   3.348  
   3.349      spl4e = (l4_pgentry_t *)map_domain_page(smfn);
   3.350  
   3.351 +    /* For 32-bit PAE guest on 64-bit host */
   3.352 +    if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 ) 
   3.353 +    {
   3.354 +        unsigned long index;
   3.355 +        /*
   3.356 +         * Shadow L4's pfn_info->tlbflush_timestamp
   3.357 +         * should also save it's own index.
   3.358 +         */
   3.359 +        index = get_cr3_idxval(v);
   3.360 +        frame_table[smfn].tlbflush_timestamp = index;
   3.361 +
   3.362 +        memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
   3.363 +        /* Map the self entry */
   3.364 +        spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR);
   3.365 +        unmap_domain_page(spl4e);
   3.366 +        return smfn;
   3.367 +    }
   3.368 +
   3.369      /* Install hypervisor and 4x linear p.t. mapings. */
   3.370      if ( (PGT_base_page_table == PGT_l4_page_table) &&
   3.371        !shadow_mode_external(d) )
   3.372 @@ -3041,6 +3194,21 @@ static unsigned long shadow_l4_table(
   3.373  #endif /* CONFIG_PAGING_LEVELS == 4 */
   3.374  
   3.375  #if CONFIG_PAGING_LEVELS >= 3
   3.376 +static void 
   3.377 +update_top_level_shadow(struct vcpu *v, unsigned long smfn)
   3.378 +{
   3.379 +    unsigned long index = get_cr3_idxval(v);
   3.380 +    pgentry_64_t *sple = (pgentry_64_t *)map_domain_page(smfn);
   3.381 +    pgentry_64_t *gple = (pgentry_64_t *)&v->arch.guest_vtable;
   3.382 +    int i;
   3.383 +
   3.384 +    for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
   3.385 +        validate_entry_change(
   3.386 +            v->domain, &gple[index*4+i], &sple[i], PAGING_L3);
   3.387 +
   3.388 +    unmap_domain_page(sple);
   3.389 +}
   3.390 +
   3.391  /*
   3.392   * validate_bl2e_change()
   3.393   * The code is for 32-bit HVM guest on 64-bit host.
   3.394 @@ -3410,6 +3578,8 @@ static inline int guest_page_fault(
   3.395      pgentry_64_t gle = { 0 };
   3.396      unsigned long gpfn = 0, mfn;
   3.397      int i;
   3.398 +    unsigned int base_idx = 0;
   3.399 +    base_idx = get_cr3_idxval(v);
   3.400  
   3.401      ASSERT( d->arch.ops->guest_paging_levels >= PAGING_L3 );
   3.402  
   3.403 @@ -3438,7 +3608,10 @@ static inline int guest_page_fault(
   3.404  #if CONFIG_PAGING_LEVELS >= 3
   3.405      if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 
   3.406      {
   3.407 -        gpfn = pagetable_get_pfn(v->arch.guest_table);
   3.408 +        if ( SH_GUEST_32PAE )
   3.409 +            gpfn = hvm_get_guest_ctrl_reg(v, 3);
   3.410 +        else
   3.411 +            gpfn = pagetable_get_pfn(v->arch.guest_table);
   3.412      }
   3.413  #endif
   3.414  
   3.415 @@ -3451,7 +3624,8 @@ static inline int guest_page_fault(
   3.416          mfn = gmfn_to_mfn(d, gpfn);
   3.417  
   3.418          lva = (pgentry_64_t *) map_domain_page(mfn);
   3.419 -        gle = lva[table_offset_64(va, i)];
   3.420 +        gle = lva[guest_table_offset_64(va, i, base_idx)];
   3.421 +
   3.422          unmap_domain_page(lva);
   3.423  
   3.424          gpfn = entry_get_pfn(gle);
   3.425 @@ -3695,7 +3869,7 @@ static unsigned long gva_to_gpa_64(unsig
   3.426   * The naming convention of the shadow_ops:
   3.427   * MODE_<pgentry size>_<guest paging levels>_HANDLER
   3.428   */
   3.429 -#ifndef GUEST_PGENTRY_32
   3.430 +#if (!defined(GUEST_PGENTRY_32) && !defined(GUEST_32PAE))
   3.431  struct shadow_ops MODE_64_3_HANDLER = {
   3.432      .guest_paging_levels        = 3,
   3.433      .invlpg                     = shadow_invlpg_64,
   3.434 @@ -3741,7 +3915,7 @@ struct shadow_ops MODE_32_2_HANDLER = {
   3.435  #endif
   3.436  
   3.437  #if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) ) ||  \
   3.438 -    ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )
   3.439 +    ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) ) 
   3.440  
   3.441  
   3.442  /* 
     4.1 --- a/xen/arch/x86/shadow_public.c	Sat Feb 18 11:41:42 2006 +0100
     4.2 +++ b/xen/arch/x86/shadow_public.c	Sat Feb 18 11:56:13 2006 +0100
     4.3 @@ -92,7 +92,7 @@ void shadow_direct_map_clean(struct vcpu
     4.4  /****************************************************************************/
     4.5  /************* export interface functions ***********************************/
     4.6  /****************************************************************************/
     4.7 -
     4.8 +void free_shadow_pages(struct domain *d);
     4.9  
    4.10  int shadow_set_guest_paging_levels(struct domain *d, int levels)
    4.11  {
    4.12 @@ -106,13 +106,22 @@ int shadow_set_guest_paging_levels(struc
    4.13          shadow_unlock(d);
    4.14          return 1;
    4.15  #endif
    4.16 -#if CONFIG_PAGING_LEVELS >= 3
    4.17 +#if CONFIG_PAGING_LEVELS == 3
    4.18      case 3:
    4.19          if ( d->arch.ops != &MODE_64_3_HANDLER )
    4.20              d->arch.ops = &MODE_64_3_HANDLER;
    4.21          shadow_unlock(d);
    4.22          return 1;
    4.23  #endif
    4.24 +#if CONFIG_PAGING_LEVELS == 4
    4.25 +    case 3:
    4.26 +        if ( d->arch.ops == &MODE_64_2_HANDLER )
    4.27 +            free_shadow_pages(d);
    4.28 +        if ( d->arch.ops != &MODE_64_PAE_HANDLER )
    4.29 +            d->arch.ops = &MODE_64_PAE_HANDLER;
    4.30 +        shadow_unlock(d);
    4.31 +        return 1;
    4.32 +#endif
    4.33      case 2:
    4.34  #if CONFIG_PAGING_LEVELS == 2
    4.35          if ( d->arch.ops != &MODE_32_2_HANDLER )
    4.36 @@ -239,9 +248,19 @@ free_shadow_tables(struct domain *d, uns
    4.37           */
    4.38          if ( external )
    4.39          {
    4.40 -            for ( i = 0; i < PAGETABLE_ENTRIES; i++ )
    4.41 +            for ( i = 0; i < PAGETABLE_ENTRIES; i++ ) {
    4.42                  if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
    4.43                      put_shadow_ref(entry_get_pfn(ple[i]));
    4.44 +                if (d->arch.ops->guest_paging_levels == PAGING_L3)
    4.45 +                {
    4.46 +#if CONFIG_PAGING_LEVELS == 4
    4.47 +                    if ( i == PAE_L3_PAGETABLE_ENTRIES && level == PAGING_L4 )
    4.48 +#elif CONFIG_PAGING_LEVELS == 3
    4.49 +                    if ( i == PAE_L3_PAGETABLE_ENTRIES && level == PAGING_L3 )
    4.50 +#endif
    4.51 +                        break;
    4.52 +                }
    4.53 +            }
    4.54          } 
    4.55          else
    4.56          {
    4.57 @@ -622,7 +641,7 @@ void free_shadow_page(unsigned long smfn
    4.58      SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn);
    4.59  
    4.60      ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) );
    4.61 -#if CONFIG_PAGING_LEVELS >=4
    4.62 +#if CONFIG_PAGING_LEVELS >= 4
    4.63      if ( type == PGT_fl1_shadow ) 
    4.64      {
    4.65          unsigned long mfn;
    4.66 @@ -630,6 +649,10 @@ void free_shadow_page(unsigned long smfn
    4.67          if ( !mfn )
    4.68              gpfn |= (1UL << 63);
    4.69      }
    4.70 +    if (d->arch.ops->guest_paging_levels == PAGING_L3)
    4.71 +        if (type == PGT_l4_shadow ) {
    4.72 +            gpfn = ((unsigned long)page->tlbflush_timestamp << PGT_score_shift) | gpfn;
    4.73 +        }
    4.74  #endif
    4.75  
    4.76      delete_shadow_status(d, gpfn, gmfn, type);
    4.77 @@ -661,6 +684,7 @@ void free_shadow_page(unsigned long smfn
    4.78      case PGT_l2_shadow:
    4.79      case PGT_l3_shadow:
    4.80      case PGT_l4_shadow:
    4.81 +        gpfn = gpfn & PGT_mfn_mask;
    4.82          shadow_demote(d, gpfn, gmfn);
    4.83          free_shadow_tables(d, smfn, shadow_type_to_level(type));
    4.84          d->arch.shadow_page_count--;
     5.1 --- a/xen/common/page_alloc.c	Sat Feb 18 11:41:42 2006 +0100
     5.2 +++ b/xen/common/page_alloc.c	Sat Feb 18 11:56:13 2006 +0100
     5.3 @@ -539,6 +539,7 @@ struct page_info *alloc_domheap_pages(
     5.4          pg[i].count_info        = 0;
     5.5          pg[i].u.inuse._domain   = 0;
     5.6          pg[i].u.inuse.type_info = 0;
     5.7 +        page_set_owner(&pg[i], NULL);
     5.8      }
     5.9  
    5.10      if ( unlikely(!cpus_empty(mask)) )
     6.1 --- a/xen/include/asm-x86/hvm/hvm.h	Sat Feb 18 11:41:42 2006 +0100
     6.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Sat Feb 18 11:56:13 2006 +0100
     6.3 @@ -61,10 +61,12 @@ struct hvm_function_table {
     6.4       * 1) determine whether the guest is in real or vm8086 mode,
     6.5       * 2) determine whether paging is enabled,
     6.6       * 3) return the length of the instruction that caused an exit.
     6.7 +     * 4) return the current guest control-register value
     6.8       */
     6.9      int (*realmode)(struct vcpu *v);
    6.10      int (*paging_enabled)(struct vcpu *v);
    6.11      int (*instruction_length)(struct vcpu *v);
    6.12 +    unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
    6.13  };
    6.14  
    6.15  extern struct hvm_function_table hvm_funcs;
    6.16 @@ -163,4 +165,12 @@ hvm_instruction_length(struct vcpu *v)
    6.17  {
    6.18      return hvm_funcs.instruction_length(v);
    6.19  }
    6.20 +
    6.21 +static inline unsigned long
    6.22 +hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
    6.23 +{
    6.24 +    if ( hvm_funcs.get_guest_ctrl_reg )
    6.25 +        return hvm_funcs.get_guest_ctrl_reg(v, num);
    6.26 +    return 0;                   /* force to fail */
    6.27 +}
    6.28  #endif /* __ASM_X86_HVM_HVM_H__ */
     7.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Sat Feb 18 11:41:42 2006 +0100
     7.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Sat Feb 18 11:56:13 2006 +0100
     7.3 @@ -410,6 +410,14 @@ static inline int vmx_paging_enabled(str
     7.4      return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
     7.5  }
     7.6  
     7.7 +static inline int vmx_pgbit_test(struct vcpu *v)
     7.8 +{
     7.9 +    unsigned long cr0;
    7.10 +
    7.11 +    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
    7.12 +    return (cr0 & X86_CR0_PG);
    7.13 +}
    7.14 +
    7.15  static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type, 
    7.16                                           int error_code)
    7.17  {
     8.1 --- a/xen/include/asm-x86/shadow_64.h	Sat Feb 18 11:41:42 2006 +0100
     8.2 +++ b/xen/include/asm-x86/shadow_64.h	Sat Feb 18 11:56:13 2006 +0100
     8.3 @@ -28,6 +28,7 @@
     8.4  #define _XEN_SHADOW_64_H
     8.5  #include <asm/shadow.h>
     8.6  #include <asm/shadow_ops.h>
     8.7 +#include <asm/hvm/hvm.h>
     8.8  
     8.9  /*
    8.10   * The naming convention of the shadow_ops:
    8.11 @@ -37,6 +38,7 @@ extern struct shadow_ops MODE_64_2_HANDL
    8.12  extern struct shadow_ops MODE_64_3_HANDLER;
    8.13  #if CONFIG_PAGING_LEVELS == 4
    8.14  extern struct shadow_ops MODE_64_4_HANDLER;
    8.15 +extern struct shadow_ops MODE_64_PAE_HANDLER;
    8.16  #endif
    8.17  
    8.18  #if CONFIG_PAGING_LEVELS == 3
    8.19 @@ -106,6 +108,15 @@ typedef struct { intpte_t lo; } pgentry_
    8.20  #define PAE_SHADOW_SELF_ENTRY   259
    8.21  #define PAE_L3_PAGETABLE_ENTRIES   4
    8.22  
    8.23 +/******************************************************************************/
    8.24 +/*
    8.25 + * The macro and inlines are for 32-bit PAE guest on 64-bit host
    8.26 + */
    8.27 +#define PAE_CR3_ALIGN       5
    8.28 +#define PAE_CR3_IDX_MASK    0x7f
    8.29 +#define PAE_CR3_IDX_NO      128
    8.30 +
    8.31 +/******************************************************************************/
    8.32  static inline int  table_offset_64(unsigned long va, int level)
    8.33  {
    8.34      switch(level) {
    8.35 @@ -122,10 +133,15 @@ static inline int  table_offset_64(unsig
    8.36  
    8.37  #if CONFIG_PAGING_LEVELS >= 4
    8.38  #ifndef GUEST_PGENTRY_32
    8.39 +#ifndef GUEST_32PAE
    8.40          case 4:
    8.41              return  (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
    8.42  #else
    8.43          case 4:
    8.44 +            return PAE_SHADOW_SELF_ENTRY;
    8.45 +#endif
    8.46 +#else
    8.47 +        case 4:
    8.48              return PAE_SHADOW_SELF_ENTRY; 
    8.49  #endif
    8.50  #endif
    8.51 @@ -134,6 +150,55 @@ static inline int  table_offset_64(unsig
    8.52      }
    8.53  }
    8.54  
    8.55 +/*****************************************************************************/
    8.56 +
    8.57 +#if defined( GUEST_32PAE )
    8.58 +static inline int guest_table_offset_64(unsigned long va, int level, unsigned int index)
    8.59 +{
    8.60 +    switch(level) {
    8.61 +        case 1:
    8.62 +            return  (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
    8.63 +        case 2:
    8.64 +            return  (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
    8.65 +        case 3:
    8.66 +            return  (index * 4 + ((va) >> L3_PAGETABLE_SHIFT));
    8.67 +#if CONFIG_PAGING_LEVELS == 3
    8.68 +        case 4:
    8.69 +            return PAE_SHADOW_SELF_ENTRY;
    8.70 +#endif
    8.71 +
    8.72 +#if CONFIG_PAGING_LEVELS >= 4
    8.73 +#ifndef GUEST_PGENTRY_32
    8.74 +        case 4:
    8.75 +            return  (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
    8.76 +#else
    8.77 +        case 4:
    8.78 +            return PAE_SHADOW_SELF_ENTRY;
    8.79 +#endif
    8.80 +#endif
    8.81 +        default:
    8.82 +            return -1;
    8.83 +    }
    8.84 +}
    8.85 +
    8.86 +static inline unsigned long get_cr3_idxval(struct vcpu *v)
    8.87 +{
    8.88 +    unsigned long pae_cr3 = hvm_get_guest_ctrl_reg(v, 3); /* get CR3 */
    8.89 +
    8.90 +    return (pae_cr3 >> PAE_CR3_ALIGN) & PAE_CR3_IDX_MASK;
    8.91 +}
    8.92 +
    8.93 +
    8.94 +#define SH_GUEST_32PAE 1
    8.95 +#else 
    8.96 +#define guest_table_offset_64(va, level, index) \
    8.97 +            table_offset_64((va),(level))
    8.98 +#define get_cr3_idxval(v) 0
    8.99 +#define SH_GUEST_32PAE 0
   8.100 +#endif
   8.101 +
   8.102 +/********************************************************************************/
   8.103 +
   8.104  static inline void free_out_of_sync_state(struct domain *d)
   8.105  {
   8.106      struct out_of_sync_entry *entry;
   8.107 @@ -163,6 +228,9 @@ static inline int __entry(
   8.108      u32 level = flag & L_MASK;
   8.109      struct domain *d = v->domain;
   8.110      int root_level;
   8.111 +    unsigned int base_idx;
   8.112 +
   8.113 +    base_idx = get_cr3_idxval(v);
   8.114  
   8.115      if ( flag & SHADOW_ENTRY )
   8.116      {
   8.117 @@ -173,7 +241,10 @@ static inline int __entry(
   8.118      else if ( flag & GUEST_ENTRY )
   8.119      {
   8.120          root_level = v->domain->arch.ops->guest_paging_levels;
   8.121 -        index = table_offset_64(va, root_level);
   8.122 +        if ( root_level == PAGING_L3 )
   8.123 +            index = guest_table_offset_64(va, PAGING_L3, base_idx);
   8.124 +        else
   8.125 +            index = guest_table_offset_64(va, root_level, base_idx);
   8.126          le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
   8.127      }
   8.128      else /* direct mode */
   8.129 @@ -199,7 +270,10 @@ static inline int __entry(
   8.130          if ( le_p )
   8.131              unmap_domain_page(le_p);
   8.132          le_p = (pgentry_64_t *)map_domain_page(mfn);
   8.133 -        index = table_offset_64(va, (level + i - 1));
   8.134 +        if ( flag & SHADOW_ENTRY )
   8.135 +            index = table_offset_64(va, (level + i - 1));
   8.136 +        else
   8.137 +            index = guest_table_offset_64(va, (level + i - 1), base_idx);
   8.138          le_e = &le_p[index];
   8.139      }
   8.140