ia64/xen-unstable

changeset 8752:0e7bdd973e17

Build the phys_to_machine_mapping array in Xen rather than
reusing the 1:1 page table built by the builder, and it also establishes
the 1:1 direct mapping at runtime a la shadow page tables. Since the
builder constructs the full 1:1 direct mapping (but <4GB), the current
implementation wastes memory but cannot support guests with >=4GB
memory (even for x64-64).

This is also required for HVM support on the PAE host. That patch
should be sent soon.

For SVM, I think the svm code needs changes. Please look at the changes
to vmx.c and vmcs.c; should be straightforward.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Feb 03 12:02:30 2006 +0100 (2006-02-03)
parents 0bd023cf351e
children ad0c3fa46c76
files tools/libxc/xc_hvm_build.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_public.h
line diff
     1.1 --- a/tools/libxc/xc_hvm_build.c	Fri Feb 03 11:54:05 2006 +0100
     1.2 +++ b/tools/libxc/xc_hvm_build.c	Fri Feb 03 12:02:30 2006 +0100
     1.3 @@ -168,133 +168,6 @@ static int set_hvm_info(int xc_handle, u
     1.4      return 0;
     1.5  }
     1.6  
     1.7 -#ifdef __i386__
     1.8 -static int zap_mmio_range(int xc_handle, uint32_t dom,
     1.9 -                          l2_pgentry_32_t *vl2tab,
    1.10 -                          unsigned long mmio_range_start,
    1.11 -                          unsigned long mmio_range_size)
    1.12 -{
    1.13 -    unsigned long mmio_addr;
    1.14 -    unsigned long mmio_range_end = mmio_range_start + mmio_range_size;
    1.15 -    unsigned long vl2e;
    1.16 -    l1_pgentry_32_t *vl1tab;
    1.17 -
    1.18 -    mmio_addr = mmio_range_start & PAGE_MASK;
    1.19 -    for (; mmio_addr < mmio_range_end; mmio_addr += PAGE_SIZE) {
    1.20 -        vl2e = vl2tab[l2_table_offset(mmio_addr)];
    1.21 -        if (vl2e == 0)
    1.22 -            continue;
    1.23 -        vl1tab = xc_map_foreign_range(
    1.24 -            xc_handle, dom, PAGE_SIZE,
    1.25 -            PROT_READ|PROT_WRITE, vl2e >> PAGE_SHIFT);
    1.26 -        if ( vl1tab == 0 )
    1.27 -        {
    1.28 -            PERROR("Failed zap MMIO range");
    1.29 -            return -1;
    1.30 -        }
    1.31 -        vl1tab[l1_table_offset(mmio_addr)] = 0;
    1.32 -        munmap(vl1tab, PAGE_SIZE);
    1.33 -    }
    1.34 -    return 0;
    1.35 -}
    1.36 -
    1.37 -static int zap_mmio_ranges(int xc_handle, uint32_t dom, unsigned long l2tab,
    1.38 -                           unsigned char e820_map_nr, unsigned char *e820map)
    1.39 -{
    1.40 -    unsigned int i;
    1.41 -    struct e820entry *e820entry = (struct e820entry *)e820map;
    1.42 -
    1.43 -    l2_pgentry_32_t *vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    1.44 -                                                   PROT_READ|PROT_WRITE,
    1.45 -                                                   l2tab >> PAGE_SHIFT);
    1.46 -    if ( vl2tab == 0 )
    1.47 -        return -1;
    1.48 -
    1.49 -    for ( i = 0; i < e820_map_nr; i++ )
    1.50 -    {
    1.51 -        if ( (e820entry[i].type == E820_IO) &&
    1.52 -             (zap_mmio_range(xc_handle, dom, vl2tab,
    1.53 -                             e820entry[i].addr, e820entry[i].size) == -1))
    1.54 -            return -1;
    1.55 -    }
    1.56 -
    1.57 -    munmap(vl2tab, PAGE_SIZE);
    1.58 -    return 0;
    1.59 -}
    1.60 -#else
    1.61 -static int zap_mmio_range(int xc_handle, uint32_t dom,
    1.62 -                          l3_pgentry_t *vl3tab,
    1.63 -                          unsigned long mmio_range_start,
    1.64 -                          unsigned long mmio_range_size)
    1.65 -{
    1.66 -    unsigned long mmio_addr;
    1.67 -    unsigned long mmio_range_end = mmio_range_start + mmio_range_size;
    1.68 -    unsigned long vl2e = 0;
    1.69 -    unsigned long vl3e;
    1.70 -    l1_pgentry_t *vl1tab;
    1.71 -    l2_pgentry_t *vl2tab;
    1.72 -
    1.73 -    mmio_addr = mmio_range_start & PAGE_MASK;
    1.74 -    for ( ; mmio_addr < mmio_range_end; mmio_addr += PAGE_SIZE )
    1.75 -    {
    1.76 -        vl3e = vl3tab[l3_table_offset(mmio_addr)];
    1.77 -        if ( vl3e == 0 )
    1.78 -            continue;
    1.79 -
    1.80 -        vl2tab = xc_map_foreign_range(
    1.81 -            xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, vl3e>>PAGE_SHIFT);
    1.82 -        if ( vl2tab == NULL )
    1.83 -        {
    1.84 -            PERROR("Failed zap MMIO range");
    1.85 -            return -1;
    1.86 -        }
    1.87 -
    1.88 -        vl2e = vl2tab[l2_table_offset(mmio_addr)];
    1.89 -        if ( vl2e == 0 )
    1.90 -        {
    1.91 -            munmap(vl2tab, PAGE_SIZE);
    1.92 -            continue;
    1.93 -        }
    1.94 -
    1.95 -        vl1tab = xc_map_foreign_range(
    1.96 -            xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, vl2e>>PAGE_SHIFT);
    1.97 -        if ( vl1tab == NULL )
    1.98 -        {
    1.99 -            PERROR("Failed zap MMIO range");
   1.100 -            munmap(vl2tab, PAGE_SIZE);
   1.101 -            return -1;
   1.102 -        }
   1.103 -
   1.104 -        vl1tab[l1_table_offset(mmio_addr)] = 0;
   1.105 -        munmap(vl2tab, PAGE_SIZE);
   1.106 -        munmap(vl1tab, PAGE_SIZE);
   1.107 -    }
   1.108 -    return 0;
   1.109 -}
   1.110 -
   1.111 -static int zap_mmio_ranges(int xc_handle, uint32_t dom, unsigned long l3tab,
   1.112 -                           unsigned char e820_map_nr, unsigned char *e820map)
   1.113 -{
   1.114 -    unsigned int i;
   1.115 -    struct e820entry *e820entry = (struct e820entry *)e820map;
   1.116 -
   1.117 -    l3_pgentry_t *vl3tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.118 -                                                PROT_READ|PROT_WRITE,
   1.119 -                                                l3tab >> PAGE_SHIFT);
   1.120 -    if (vl3tab == 0)
   1.121 -        return -1;
   1.122 -    for ( i = 0; i < e820_map_nr; i++ ) {
   1.123 -        if ( (e820entry[i].type == E820_IO) &&
   1.124 -             (zap_mmio_range(xc_handle, dom, vl3tab,
   1.125 -                             e820entry[i].addr, e820entry[i].size) == -1) )
   1.126 -            return -1;
   1.127 -    }
   1.128 -    munmap(vl3tab, PAGE_SIZE);
   1.129 -    return 0;
   1.130 -}
   1.131 -
   1.132 -#endif
   1.133 -
   1.134  static int setup_guest(int xc_handle,
   1.135                         uint32_t dom, int memsize,
   1.136                         char *image, unsigned long image_size,
   1.137 @@ -308,15 +181,8 @@ static int setup_guest(int xc_handle,
   1.138                         unsigned int store_evtchn,
   1.139                         unsigned long *store_mfn)
   1.140  {
   1.141 -    l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
   1.142 -    l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
   1.143      unsigned long *page_array = NULL;
   1.144 -#ifdef __x86_64__
   1.145 -    l3_pgentry_t *vl3tab=NULL;
   1.146 -    unsigned long l3tab;
   1.147 -#endif
   1.148 -    unsigned long l2tab = 0;
   1.149 -    unsigned long l1tab = 0;
   1.150 +
   1.151      unsigned long count, i;
   1.152      shared_info_t *shared_info;
   1.153      void *e820_page;
   1.154 @@ -325,7 +191,6 @@ static int setup_guest(int xc_handle,
   1.155      int rc;
   1.156  
   1.157      unsigned long nr_pt_pages;
   1.158 -    unsigned long ppt_alloc;
   1.159  
   1.160      struct domain_setup_info dsi;
   1.161      unsigned long vpt_start;
   1.162 @@ -391,120 +256,6 @@ static int setup_guest(int xc_handle,
   1.163      if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
   1.164          goto error_out;
   1.165  
   1.166 -    /* First allocate page for page dir or pdpt */
   1.167 -    ppt_alloc = vpt_start >> PAGE_SHIFT;
   1.168 -    if ( page_array[ppt_alloc] > 0xfffff )
   1.169 -    {
   1.170 -        unsigned long nmfn;
   1.171 -        nmfn = xc_make_page_below_4G( xc_handle, dom, page_array[ppt_alloc] );
   1.172 -        if ( nmfn == 0 )
   1.173 -        {
   1.174 -            fprintf(stderr, "Couldn't get a page below 4GB :-(\n");
   1.175 -            goto error_out;
   1.176 -        }
   1.177 -        page_array[ppt_alloc] = nmfn;
   1.178 -    }
   1.179 -
   1.180 -#ifdef __i386__
   1.181 -    l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.182 -    ctxt->ctrlreg[3] = l2tab;
   1.183 -
   1.184 -    if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.185 -                                        PROT_READ|PROT_WRITE,
   1.186 -                                        l2tab >> PAGE_SHIFT)) == NULL )
   1.187 -        goto error_out;
   1.188 -    memset(vl2tab, 0, PAGE_SIZE);
   1.189 -    vl2e = &vl2tab[l2_table_offset(0)];
   1.190 -    for ( count = 0; count < (v_end >> PAGE_SHIFT); count++ )
   1.191 -    {
   1.192 -        if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
   1.193 -        {
   1.194 -            l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.195 -            if ( vl1tab != NULL )
   1.196 -                munmap(vl1tab, PAGE_SIZE);
   1.197 -            if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.198 -                                                PROT_READ|PROT_WRITE,
   1.199 -                                                l1tab >> PAGE_SHIFT)) == NULL )
   1.200 -            {
   1.201 -                munmap(vl2tab, PAGE_SIZE);
   1.202 -                goto error_out;
   1.203 -            }
   1.204 -            memset(vl1tab, 0, PAGE_SIZE);
   1.205 -            vl1e = &vl1tab[l1_table_offset(count << PAGE_SHIFT)];
   1.206 -            *vl2e++ = l1tab | L2_PROT;
   1.207 -        }
   1.208 -
   1.209 -        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
   1.210 -        vl1e++;
   1.211 -    }
   1.212 -    munmap(vl1tab, PAGE_SIZE);
   1.213 -    munmap(vl2tab, PAGE_SIZE);
   1.214 -#else
   1.215 -    l3tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.216 -    ctxt->ctrlreg[3] = l3tab;
   1.217 -
   1.218 -    if ( (vl3tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.219 -                                        PROT_READ|PROT_WRITE,
   1.220 -                                        l3tab >> PAGE_SHIFT)) == NULL )
   1.221 -        goto error_out;
   1.222 -    memset(vl3tab, 0, PAGE_SIZE);
   1.223 -
   1.224 -    /* Fill in every PDPT entry. */
   1.225 -    for ( i = 0; i < L3_PAGETABLE_ENTRIES_PAE; i++ )
   1.226 -    {
   1.227 -        l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.228 -        if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.229 -                                            PROT_READ|PROT_WRITE,
   1.230 -                                            l2tab >> PAGE_SHIFT)) == NULL )
   1.231 -            goto error_out;
   1.232 -        memset(vl2tab, 0, PAGE_SIZE);
   1.233 -        munmap(vl2tab, PAGE_SIZE);
   1.234 -        vl2tab = NULL;
   1.235 -        vl3tab[i] = l2tab | L3_PROT;
   1.236 -    }
   1.237 -
   1.238 -    for ( count = 0; count < (v_end >> PAGE_SHIFT); count++ )
   1.239 -    {
   1.240 -        if ( !(count & ((1 << (L3_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)) - 1)) )
   1.241 -        {
   1.242 -            l2tab = vl3tab[count >> (L3_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)]
   1.243 -                    & PAGE_MASK;
   1.244 -
   1.245 -            if (vl2tab != NULL)
   1.246 -                munmap(vl2tab, PAGE_SIZE);
   1.247 -
   1.248 -            if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.249 -                                                PROT_READ|PROT_WRITE,
   1.250 -                                                l2tab >> PAGE_SHIFT)) == NULL )
   1.251 -                goto error_out;
   1.252 -
   1.253 -            vl2e = &vl2tab[l2_table_offset(count << PAGE_SHIFT)];
   1.254 -        }
   1.255 -        if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
   1.256 -        {
   1.257 -            l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   1.258 -            if ( vl1tab != NULL )
   1.259 -                munmap(vl1tab, PAGE_SIZE);
   1.260 -            if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.261 -                                                PROT_READ|PROT_WRITE,
   1.262 -                                                l1tab >> PAGE_SHIFT)) == NULL )
   1.263 -            {
   1.264 -                munmap(vl2tab, PAGE_SIZE);
   1.265 -                goto error_out;
   1.266 -            }
   1.267 -            memset(vl1tab, 0, PAGE_SIZE);
   1.268 -            vl1e = &vl1tab[l1_table_offset(count << PAGE_SHIFT)];
   1.269 -            *vl2e++ = l1tab | L2_PROT;
   1.270 -        }
   1.271 -
   1.272 -        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
   1.273 -        vl1e++;
   1.274 -    }
   1.275 -
   1.276 -    munmap(vl1tab, PAGE_SIZE);
   1.277 -    munmap(vl2tab, PAGE_SIZE);
   1.278 -    munmap(vl3tab, PAGE_SIZE);
   1.279 -#endif
   1.280      /* Write the machine->phys table entries. */
   1.281      for ( count = 0; count < nr_pages; count++ )
   1.282      {
   1.283 @@ -525,14 +276,6 @@ static int setup_guest(int xc_handle,
   1.284          goto error_out;
   1.285      memset(e820_page, 0, PAGE_SIZE);
   1.286      e820_map_nr = build_e820map(e820_page, v_end);
   1.287 -#if defined (__i386__)
   1.288 -    if (zap_mmio_ranges(xc_handle, dom, l2tab, e820_map_nr,
   1.289 -                        ((unsigned char *)e820_page) + E820_MAP_OFFSET) == -1)
   1.290 -#else
   1.291 -    if (zap_mmio_ranges(xc_handle, dom, l3tab, e820_map_nr,
   1.292 -                        ((unsigned char *)e820_page) + E820_MAP_OFFSET) == -1)
   1.293 -#endif
   1.294 -        goto error_out;
   1.295      munmap(e820_page, PAGE_SIZE);
   1.296  
   1.297      /* shared_info page starts its life empty. */
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Feb 03 11:54:05 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Feb 03 12:02:30 2006 +0100
     2.3 @@ -53,6 +53,20 @@ integer_param("hvm_debug", opt_hvm_debug
     2.4  
     2.5  struct hvm_function_table hvm_funcs;
     2.6  
     2.7 +static void vmx_zap_mmio_range(
     2.8 +    struct domain *d, unsigned long pfn, unsigned long nr_pfn)
     2.9 +{
    2.10 +    unsigned long i, val = INVALID_MFN;
    2.11 +
    2.12 +    for ( i = 0; i < nr_pfn; i++ )
    2.13 +    {
    2.14 +        if ( pfn + i >= 0xfffff ) 
    2.15 +            break;
    2.16 +        
    2.17 +        __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
    2.18 +    }
    2.19 +}
    2.20 +
    2.21  static void hvm_map_io_shared_page(struct domain *d)
    2.22  {
    2.23      int i;
    2.24 @@ -84,8 +98,12 @@ static void hvm_map_io_shared_page(struc
    2.25          if (e820entry[i].type == E820_SHARED_PAGE)
    2.26          {
    2.27              gpfn = (e820entry[i].addr >> PAGE_SHIFT);
    2.28 -            break;
    2.29          }
    2.30 +        if ( e820entry[i].type == E820_IO )
    2.31 +            vmx_zap_mmio_range(
    2.32 +                d, 
    2.33 +                e820entry[i].addr >> PAGE_SHIFT,
    2.34 +                e820entry[i].size >> PAGE_SHIFT);
    2.35      }
    2.36  
    2.37      if ( gpfn == 0 ) {
     3.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Feb 03 11:54:05 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Fri Feb 03 12:02:30 2006 +0100
     3.3 @@ -34,7 +34,8 @@
     3.4  #include <asm/flushtlb.h>
     3.5  #include <xen/event.h>
     3.6  #include <xen/kernel.h>
     3.7 -#if CONFIG_PAGING_LEVELS >= 4
     3.8 +#include <asm/shadow.h>
     3.9 +#if CONFIG_PAGING_LEVELS >= 3
    3.10  #include <asm/shadow_64.h>
    3.11  #endif
    3.12  
    3.13 @@ -218,6 +219,7 @@ static void vmx_do_launch(struct vcpu *v
    3.14      error |= __vmwrite(GUEST_TR_BASE, 0);
    3.15      error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
    3.16  
    3.17 +    shadow_direct_map_init(v);
    3.18      __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
    3.19      __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
    3.20      __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Feb 03 11:54:05 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Feb 03 12:02:30 2006 +0100
     4.3 @@ -563,7 +563,12 @@ static int vmx_do_page_fault(unsigned lo
     4.4      }
     4.5  #endif
     4.6  
     4.7 -    if (!vmx_paging_enabled(current)){
     4.8 +    if ( !vmx_paging_enabled(current) )
     4.9 +    {
    4.10 +        /* construct 1-to-1 direct mapping */
    4.11 +        if ( shadow_direct_map_fault(va, regs) ) 
    4.12 +            return 1;
    4.13 +
    4.14          handle_mmio(va, va);
    4.15          TRACE_VMEXIT (2,2);
    4.16          return 1;
    4.17 @@ -1213,6 +1218,9 @@ static int vmx_set_cr0(unsigned long val
    4.18              }
    4.19          }
    4.20  #endif
    4.21 +#if CONFIG_PAGING_LEVELS == 2
    4.22 +        shadow_direct_map_clean(v);
    4.23 +#endif
    4.24          /*
    4.25           * Now arch.guest_table points to machine physical.
    4.26           */
     5.1 --- a/xen/arch/x86/shadow.c	Fri Feb 03 11:54:05 2006 +0100
     5.2 +++ b/xen/arch/x86/shadow.c	Fri Feb 03 12:02:30 2006 +0100
     5.3 @@ -2858,7 +2858,7 @@ static inline unsigned long init_bl2(l4_
     5.4      if (!page)
     5.5          domain_crash_synchronous();
     5.6  
     5.7 -    for (count = 0; count < PDP_ENTRIES; count++)
     5.8 +    for ( count = 0; count < PAE_L3_PAGETABLE_ENTRIES; count++ )
     5.9      {
    5.10          sl2mfn = page_to_mfn(page+count);
    5.11          l2 = map_domain_page(sl2mfn);
    5.12 @@ -3568,6 +3568,7 @@ static void shadow_invlpg_64(struct vcpu
    5.13      shadow_unlock(d);
    5.14  }
    5.15  
    5.16 +
    5.17  #if CONFIG_PAGING_LEVELS == 4
    5.18  static unsigned long gva_to_gpa_64(unsigned long gva)
    5.19  {
    5.20 @@ -3637,6 +3638,79 @@ struct shadow_ops MODE_B_HANDLER = {
    5.21  
    5.22  #endif
    5.23  
    5.24 +#if CONFIG_PAGING_LEVELS == 3 ||                                \
    5.25 +    ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )
    5.26 +
    5.27 +/* 
    5.28 + * Use GUEST_PGENTRY_32 to force PAE_SHADOW_SELF_ENTRY for L4.
    5.29 + *
    5.30 + * Very simple shadow code to handle 1:1 direct mapping for guest 
    5.31 + * non-paging code, which actually is running in PAE/vm86 mode with 
    5.32 + * paging-enabled.
    5.33 + *
    5.34 + * We expect that the top level (L3) page has been allocated and initialized.
    5.35 + */
    5.36 +int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
    5.37 +{
    5.38 +    struct vcpu *v = current;
    5.39 +    struct domain *d = v->domain;
    5.40 +    l3_pgentry_t sl3e;
    5.41 +    l2_pgentry_t sl2e;
    5.42 +    l1_pgentry_t sl1e;
    5.43 +    unsigned long mfn, smfn;
    5.44 +    struct page_info *page;
    5.45 +
    5.46 +    /*
    5.47 +     * If the faulting address is within the MMIO range, we continue
    5.48 +     * on handling the #PF as such.
    5.49 +     */
    5.50 +    if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN )
    5.51 +    {
    5.52 +         goto fail;
    5.53 +    }
    5.54 +
    5.55 +    shadow_lock(d);
    5.56 +
    5.57 +    __shadow_get_l3e(v, vpa, &sl3e);
    5.58 +
    5.59 +    if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) ) 
    5.60 +    {
    5.61 +        page = alloc_domheap_page(NULL);
    5.62 +        if ( !page )
    5.63 +            goto fail; 
    5.64 +        smfn = page_to_mfn(page);
    5.65 +        sl3e = l3e_from_pfn(smfn, _PAGE_PRESENT);
    5.66 +        __shadow_set_l3e(v, vpa, &sl3e);
    5.67 +    }
    5.68 +
    5.69 +    __shadow_get_l2e(v, vpa, &sl2e);
    5.70 +
    5.71 +    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) ) 
    5.72 +    {
    5.73 +        page = alloc_domheap_page(NULL);
    5.74 +        if ( !page )
    5.75 +            goto fail; 
    5.76 +        smfn = page_to_mfn(page);
    5.77 +
    5.78 +        sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
    5.79 +        __shadow_set_l2e(v, vpa, &sl2e);
    5.80 +    }
    5.81 +
    5.82 +    __shadow_get_l1e(v, vpa, &sl1e);
    5.83 +        
    5.84 +    if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) ) 
    5.85 +    {
    5.86 +        sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
    5.87 +        __shadow_set_l1e(v, vpa, &sl1e);
    5.88 +    } 
    5.89 +
    5.90 +    shadow_unlock(d);
    5.91 +    return EXCRET_fault_fixed;
    5.92 +
    5.93 +fail:
    5.94 +    return 0;
    5.95 +}
    5.96 +#endif
    5.97  
    5.98  /*
    5.99   * Local variables:
     6.1 --- a/xen/arch/x86/shadow32.c	Fri Feb 03 11:54:05 2006 +0100
     6.2 +++ b/xen/arch/x86/shadow32.c	Fri Feb 03 12:02:30 2006 +0100
     6.3 @@ -43,6 +43,8 @@ static void free_writable_pte_prediction
     6.4  static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn);
     6.5  #endif
     6.6  
     6.7 +static void free_p2m_table(struct vcpu *v);
     6.8 +
     6.9  /********
    6.10  
    6.11  There's a per-domain shadow table spin lock which works fine for SMP
    6.12 @@ -746,19 +748,18 @@ static void alloc_monitor_pagetable(stru
    6.13              l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt) + i,
    6.14                            __PAGE_HYPERVISOR);
    6.15  
    6.16 -    // map the phys_to_machine map into the Read-Only MPT space for this domain
    6.17 -    mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
    6.18 -        l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
    6.19 -                        __PAGE_HYPERVISOR);
    6.20 -
    6.21      // Don't (yet) have mappings for these...
    6.22      // Don't want to accidentally see the idle_pg_table's linear mapping.
    6.23      //
    6.24      mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
    6.25      mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
    6.26 +    mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty();
    6.27  
    6.28      v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
    6.29      v->arch.monitor_vtable = mpl2e;
    6.30 +
    6.31 +    if ( v->vcpu_id == 0 )
    6.32 +        alloc_p2m_table(d);
    6.33  }
    6.34  
    6.35  /*
    6.36 @@ -792,6 +793,9 @@ void free_monitor_pagetable(struct vcpu 
    6.37          put_shadow_ref(mfn);
    6.38      }
    6.39  
    6.40 +    if ( v->vcpu_id == 0 )
    6.41 +        free_p2m_table(v);
    6.42 +
    6.43      /*
    6.44       * Then free monitor_table.
    6.45       */
    6.46 @@ -844,67 +848,209 @@ set_p2m_entry(struct domain *d, unsigned
    6.47      return 1;
    6.48  }
    6.49  
    6.50 -static int
    6.51 +int
    6.52  alloc_p2m_table(struct domain *d)
    6.53  {
    6.54      struct list_head *list_ent;
    6.55 -    struct page_info *page, *l2page;
    6.56 -    l2_pgentry_t *l2;
    6.57 -    unsigned long mfn, pfn;
    6.58 -    struct domain_mmap_cache l1cache, l2cache;
    6.59 -
    6.60 -    l2page = alloc_domheap_page(NULL);
    6.61 -    if ( l2page == NULL )
    6.62 -        return 0;
    6.63 -
    6.64 -    domain_mmap_cache_init(&l1cache);
    6.65 -    domain_mmap_cache_init(&l2cache);
    6.66 -
    6.67 -    d->arch.phys_table = mk_pagetable(page_to_maddr(l2page));
    6.68 -    l2 = map_domain_page_with_cache(page_to_mfn(l2page), &l2cache);
    6.69 -    memset(l2, 0, PAGE_SIZE);
    6.70 -    unmap_domain_page_with_cache(l2, &l2cache);
    6.71 +    unsigned long va = RO_MPT_VIRT_START;   /* phys_to_machine_mapping */
    6.72 +
    6.73 +    l2_pgentry_t *l2tab = NULL;
    6.74 +    l1_pgentry_t *l1tab = NULL;
    6.75 +    unsigned long *l0tab = NULL;
    6.76 +    l2_pgentry_t l2e = { 0 };
    6.77 +    l1_pgentry_t l1e = { 0 };
    6.78 +
    6.79 +    unsigned long pfn;
    6.80 +    int i;
    6.81 +
    6.82 +    ASSERT ( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
    6.83 +
    6.84 +    l2tab = map_domain_page(
    6.85 +        pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
    6.86  
    6.87      list_ent = d->page_list.next;
    6.88 -    while ( list_ent != &d->page_list )
    6.89 +
    6.90 +    for ( i = 0; list_ent != &d->page_list; i++ )
    6.91      {
    6.92 +        struct page_info *page;
    6.93          page = list_entry(list_ent, struct page_info, list);
    6.94 -        mfn = page_to_mfn(page);
    6.95 -        pfn = get_gpfn_from_mfn(mfn);
    6.96 -        ASSERT(pfn != INVALID_M2P_ENTRY);
    6.97 -        ASSERT(pfn < (1u<<20));
    6.98 -
    6.99 -        set_p2m_entry(d, pfn, mfn, &l2cache, &l1cache);
   6.100 -
   6.101 -        list_ent = page->list.next;
   6.102 +        pfn = page_to_mfn(page);
   6.103 +
   6.104 +        l2e = l2tab[l2_table_offset(va)];
   6.105 +        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   6.106 +        {
   6.107 +            page = alloc_domheap_page(NULL);
   6.108 +
   6.109 +            if ( !l1tab )
   6.110 +                unmap_domain_page(l1tab);
   6.111 +            l1tab = map_domain_page(page_to_mfn(page));
   6.112 +            memset(l1tab, 0, PAGE_SIZE);
   6.113 +            l2e = l2tab[l2_table_offset(va)] =
   6.114 +                l2e_from_page(page, __PAGE_HYPERVISOR);
   6.115 +        }
   6.116 +        else if ( l1tab == NULL)
   6.117 +            l1tab = map_domain_page(l2e_get_pfn(l2e));
   6.118 +
   6.119 +        l1e = l1tab[l1_table_offset(va)];
   6.120 +        if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
   6.121 +        {
   6.122 +            page = alloc_domheap_page(NULL);
   6.123 +            if ( !l0tab  )
   6.124 +                unmap_domain_page(l0tab);
   6.125 +            l0tab = map_domain_page(page_to_mfn(page));
   6.126 +            memset(l0tab, 0, PAGE_SIZE );
   6.127 +            l1e = l1tab[l1_table_offset(va)] =
   6.128 +                l1e_from_page(page, __PAGE_HYPERVISOR);
   6.129 +        }
   6.130 +        else if ( l0tab == NULL)
   6.131 +            l0tab = map_domain_page(l1e_get_pfn(l1e));
   6.132 +
   6.133 +        l0tab[i & ((1 << PAGETABLE_ORDER) - 1) ] = pfn;
   6.134 +        list_ent = frame_table[pfn].list.next;
   6.135 +        va += sizeof(pfn);
   6.136      }
   6.137  
   6.138 -    list_ent = d->xenpage_list.next;
   6.139 -    while ( list_ent != &d->xenpage_list )
   6.140 -    {
   6.141 -        page = list_entry(list_ent, struct page_info, list);
   6.142 -        mfn = page_to_mfn(page);
   6.143 -        pfn = get_gpfn_from_mfn(mfn);
   6.144 -        if ( (pfn != INVALID_M2P_ENTRY) &&
   6.145 -             (pfn < (1u<<20)) )
   6.146 -        {
   6.147 -            set_p2m_entry(d, pfn, mfn, &l2cache, &l1cache);
   6.148 -        }
   6.149 -
   6.150 -        list_ent = page->list.next;
   6.151 -    }
   6.152 -
   6.153 -    domain_mmap_cache_destroy(&l2cache);
   6.154 -    domain_mmap_cache_destroy(&l1cache);
   6.155 +    unmap_domain_page(l2tab);
   6.156 +    unmap_domain_page(l1tab);
   6.157 +    unmap_domain_page(l0tab);
   6.158  
   6.159      return 1;
   6.160  }
   6.161  
   6.162 -static void
   6.163 -free_p2m_table(struct domain *d)
   6.164 +static void 
   6.165 +free_p2m_table(struct vcpu *v)
   6.166  {
   6.167 -    // uh, this needs some work...  :)
   6.168 -    BUG();
   6.169 +    unsigned long va;
   6.170 +    l2_pgentry_t *l2tab;
   6.171 +    l1_pgentry_t *l1tab;
   6.172 +    l2_pgentry_t l2e;
   6.173 +    l1_pgentry_t l1e;
   6.174 +
   6.175 +    ASSERT ( pagetable_get_pfn(v->arch.monitor_table) );
   6.176 +
   6.177 +    l2tab = map_domain_page(
   6.178 +        pagetable_get_pfn(v->arch.monitor_table));
   6.179 +
   6.180 +    for ( va = RO_MPT_VIRT_START; va < RO_MPT_VIRT_END; )
   6.181 +    {
   6.182 +        int i;
   6.183 +
   6.184 +        l2e = l2tab[l2_table_offset(va)];
   6.185 +        if ( l2e_get_flags(l2e) & _PAGE_PRESENT )
   6.186 +        {
   6.187 +            l1tab = map_domain_page(l2e_get_pfn(l2e));
   6.188 +            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   6.189 +            {
   6.190 +                l1e = l1tab[l1_table_offset(va)];
   6.191 +
   6.192 +                if ( l1e_get_flags(l1e) & _PAGE_PRESENT )
   6.193 +                    free_domheap_page(mfn_to_page(l1e_get_pfn(l1e)));
   6.194 +                va += PAGE_SIZE; 
   6.195 +            }
   6.196 +            unmap_domain_page(l1tab);
   6.197 +            free_domheap_page(mfn_to_page(l2e_get_pfn(l2e)));
   6.198 +        }
   6.199 +    }
   6.200 +    unmap_domain_page(l2tab);
   6.201 +}
   6.202 +
   6.203 +int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
   6.204 +{
   6.205 +    struct vcpu *v = current;
   6.206 +    struct domain *d = v->domain;
   6.207 +    l2_pgentry_t sl2e;
   6.208 +    l1_pgentry_t sl1e;
   6.209 +    l1_pgentry_t *sple = NULL;
   6.210 +    unsigned long mfn, smfn;
   6.211 +    struct page_info *page;
   6.212 +
   6.213 +    /*
   6.214 +     * If the faulting address is within the MMIO range, we continue
   6.215 +     * on handling the #PF as such.
   6.216 +     */
   6.217 +    if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN )
   6.218 +    {
   6.219 +         goto fail;
   6.220 +    }
   6.221 +
   6.222 +    shadow_lock(d);
   6.223 +
   6.224 +    __shadow_get_l2e(v, vpa, &sl2e);
   6.225 +
   6.226 +   if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
   6.227 +    {
   6.228 +        page = alloc_domheap_page(NULL);
   6.229 +        if ( !page )
   6.230 +            goto fail;
   6.231 +
   6.232 +        smfn = page_to_mfn(page);
   6.233 +        sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
   6.234 +        __shadow_set_l2e(v, vpa, sl2e);
   6.235 +    }
   6.236 +
   6.237 +    sple = (l1_pgentry_t *)map_domain_page(l2e_get_pfn(sl2e));
   6.238 +    sl1e = sple[l1_table_offset(vpa)];
   6.239 +
   6.240 +    if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) )
   6.241 +    {
   6.242 +        sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
   6.243 +        sple[l1_table_offset(vpa)] = sl1e;
   6.244 +    }
   6.245 +    unmap_domain_page(sple);
   6.246 +    shadow_unlock(d);
   6.247 +
   6.248 +    return EXCRET_fault_fixed;
   6.249 +
   6.250 +fail:
   6.251 +    return 0;
   6.252 +}
   6.253 +
   6.254 +
   6.255 +int shadow_direct_map_init(struct vcpu *v)
   6.256 +{
   6.257 +    struct page_info *page;
   6.258 +    l2_pgentry_t *root;
   6.259 +
   6.260 +    if ( !(page = alloc_domheap_page(NULL)) )
   6.261 +        goto fail;
   6.262 +
   6.263 +    root = map_domain_page_global(page_to_mfn(page));
   6.264 +    memset(root, 0, PAGE_SIZE);
   6.265 +
   6.266 +    v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
   6.267 +    /* 
   6.268 +     * We need to set shadow_vtable to get __shadow_set/get_xxx
   6.269 +     * working
   6.270 +     */
   6.271 +    v->arch.shadow_vtable = (l2_pgentry_t *) root;
   6.272 +    v->arch.shadow_table = mk_pagetable(0);
   6.273 +    return 1;
   6.274 +
   6.275 +fail:
   6.276 +    return 0;
   6.277 +}
   6.278 +
   6.279 +void shadow_direct_map_clean(struct vcpu *v)
   6.280 +{
   6.281 +    int i;
   6.282 +    l2_pgentry_t *l2e;
   6.283 +
   6.284 +    ASSERT ( v->arch.shadow_vtable );
   6.285 +
   6.286 +    l2e = v->arch.shadow_vtable;
   6.287 +
   6.288 +    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   6.289 +    {
   6.290 +        if ( l2e_get_flags(l2e[i]) & _PAGE_PRESENT )
   6.291 +            free_domheap_page(mfn_to_page(l2e_get_pfn(l2e[i])));
   6.292 +    }
   6.293 +
   6.294 +    free_domheap_page(
   6.295 +            mfn_to_page(pagetable_get_pfn(v->domain->arch.phys_table)));
   6.296 +
   6.297 +    unmap_domain_page_global(v->arch.shadow_vtable);
   6.298 +    v->arch.shadow_vtable = 0;
   6.299 +    v->domain->arch.phys_table = mk_pagetable(0);
   6.300  }
   6.301  
   6.302  int __shadow_mode_enable(struct domain *d, unsigned int mode)
   6.303 @@ -1092,11 +1238,7 @@ int __shadow_mode_enable(struct domain *
   6.304          xfree(d->arch.shadow_dirty_bitmap);
   6.305          d->arch.shadow_dirty_bitmap = NULL;
   6.306      }
   6.307 -    if ( (new_modes & SHM_translate) && !(new_modes & SHM_external) &&
   6.308 -         pagetable_get_paddr(d->arch.phys_table) )
   6.309 -    {
   6.310 -        free_p2m_table(d);
   6.311 -    }
   6.312 +
   6.313      return -ENOMEM;
   6.314  }
   6.315  
     7.1 --- a/xen/arch/x86/shadow_public.c	Fri Feb 03 11:54:05 2006 +0100
     7.2 +++ b/xen/arch/x86/shadow_public.c	Fri Feb 03 12:02:30 2006 +0100
     7.3 @@ -37,11 +37,74 @@
     7.4  #if CONFIG_PAGING_LEVELS == 4
     7.5  extern struct shadow_ops MODE_F_HANDLER;
     7.6  extern struct shadow_ops MODE_D_HANDLER;
     7.7 +
     7.8 +static void free_p2m_table(struct vcpu *v);
     7.9  #endif
    7.10  
    7.11  extern struct shadow_ops MODE_A_HANDLER;
    7.12  
    7.13  #define SHADOW_MAX_GUEST32(_encoded) ((L1_PAGETABLE_ENTRIES_32 - 1) - ((_encoded) >> 16))
    7.14 +
    7.15 +
    7.16 +int shadow_direct_map_init(struct vcpu *v)
    7.17 +{
    7.18 +    struct page_info *page;
    7.19 +    l3_pgentry_t *root;
    7.20 +
    7.21 +    if ( !(page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA)) )
    7.22 +        goto fail;
    7.23 +
    7.24 +    root = map_domain_page_global(page_to_mfn(page));
    7.25 +    memset(root, 0, PAGE_SIZE);
    7.26 +    root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR);
    7.27 +
    7.28 +    v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
    7.29 +    /* 
    7.30 +     * We need to set shadow_vtable to get __shadow_set/get_xxx
    7.31 +     * working
    7.32 +     */
    7.33 +    v->arch.shadow_vtable = (l2_pgentry_t *) root;
    7.34 +
    7.35 +    return 1;
    7.36 +    
    7.37 +fail:
    7.38 +    return 0;
    7.39 +}
    7.40 +
    7.41 +static void shadow_direct_map_clean(struct vcpu *v)
    7.42 +{
    7.43 +    l2_pgentry_t *l2e;
    7.44 +    l3_pgentry_t *l3e;
    7.45 +    int i, j;
    7.46 +
    7.47 +    ASSERT ( v->arch.shadow_vtable );
    7.48 +
    7.49 +    l3e = (l3_pgentry_t *) v->arch.shadow_vtable;
    7.50 +    
    7.51 +    for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
    7.52 +    {
    7.53 +        if ( l3e_get_flags(l3e[i]) & _PAGE_PRESENT )
    7.54 +        {
    7.55 +            l2e = map_domain_page(l3e_get_pfn(l3e[i]));
    7.56 +
    7.57 +            for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
    7.58 +            {
    7.59 +                if ( l2e_get_flags(l2e[j]) & _PAGE_PRESENT )
    7.60 +                    free_domheap_page(mfn_to_page(l2e_get_pfn(l2e[j])));
    7.61 +            }
    7.62 +            unmap_domain_page(l2e);
    7.63 +            free_domheap_page(mfn_to_page(l3e_get_pfn(l3e[i])));
    7.64 +        }
    7.65 +    }
    7.66 +
    7.67 +    free_domheap_page(
    7.68 +        mfn_to_page(pagetable_get_pfn(v->domain->arch.phys_table)));
    7.69 +
    7.70 +    unmap_domain_page_global(v->arch.shadow_vtable);
    7.71 +    v->arch.shadow_vtable = 0;
    7.72 +    v->domain->arch.phys_table = mk_pagetable(0);
    7.73 +}
    7.74 +
    7.75  /****************************************************************************/
    7.76  /************* export interface functions ***********************************/
    7.77  /****************************************************************************/
    7.78 @@ -49,8 +112,13 @@ extern struct shadow_ops MODE_A_HANDLER;
    7.79  
    7.80  int shadow_set_guest_paging_levels(struct domain *d, int levels)
    7.81  {
    7.82 +    struct vcpu *v = current;
    7.83      shadow_lock(d);
    7.84  
    7.85 +    if ( shadow_mode_translate(d) && 
    7.86 +         !(pagetable_get_paddr(v->domain->arch.phys_table)) )
    7.87 +         shadow_direct_map_clean(v);
    7.88 +
    7.89      switch(levels) {
    7.90  #if CONFIG_PAGING_LEVELS >= 4
    7.91      case 4:
    7.92 @@ -171,7 +239,7 @@ free_shadow_tables(struct domain *d, uns
    7.93      if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
    7.94      {
    7.95          struct page_info *page = mfn_to_page(smfn);
    7.96 -        for ( i = 0; i < PDP_ENTRIES; i++ )
    7.97 +        for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
    7.98          {
    7.99              if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
   7.100                  free_fake_shadow_l2(d,entry_get_pfn(ple[i]));
   7.101 @@ -229,48 +297,12 @@ free_shadow_tables(struct domain *d, uns
   7.102  #endif
   7.103  
   7.104  #if CONFIG_PAGING_LEVELS == 4
   7.105 -/*
   7.106 - * Convert PAE 3-level page-table to 4-level page-table
   7.107 - */
   7.108 -static pagetable_t page_table_convert(struct domain *d)
   7.109 -{
   7.110 -    struct page_info *l4page, *l3page;
   7.111 -    l4_pgentry_t *l4;
   7.112 -    l3_pgentry_t *l3, *pae_l3;
   7.113 -    int i;
   7.114 -
   7.115 -    l4page = alloc_domheap_page(NULL);
   7.116 -    if (l4page == NULL)
   7.117 -        domain_crash_synchronous();
   7.118 -    l4 = map_domain_page(page_to_mfn(l4page));
   7.119 -    memset(l4, 0, PAGE_SIZE);
   7.120 -
   7.121 -    l3page = alloc_domheap_page(NULL);
   7.122 -    if (l3page == NULL)
   7.123 -        domain_crash_synchronous();
   7.124 -    l3 = map_domain_page(page_to_mfn(l3page));
   7.125 -    memset(l3, 0, PAGE_SIZE);
   7.126 -
   7.127 -    l4[0] = l4e_from_page(l3page, __PAGE_HYPERVISOR);
   7.128 -
   7.129 -    pae_l3 = map_domain_page(pagetable_get_pfn(d->arch.phys_table));
   7.130 -    for (i = 0; i < PDP_ENTRIES; i++)
   7.131 -        l3[i] = l3e_from_pfn(l3e_get_pfn(pae_l3[i]), __PAGE_HYPERVISOR);
   7.132 -    unmap_domain_page(pae_l3);
   7.133 -
   7.134 -    unmap_domain_page(l4);
   7.135 -    unmap_domain_page(l3);
   7.136 -
   7.137 -    return mk_pagetable(page_to_maddr(l4page));
   7.138 -}
   7.139 -
   7.140  static void alloc_monitor_pagetable(struct vcpu *v)
   7.141  {
   7.142      unsigned long mmfn;
   7.143      l4_pgentry_t *mpl4e;
   7.144      struct page_info *mmfn_info;
   7.145      struct domain *d = v->domain;
   7.146 -    pagetable_t phys_table;
   7.147  
   7.148      ASSERT(!pagetable_get_paddr(v->arch.monitor_table)); /* we should only get called once */
   7.149  
   7.150 @@ -284,13 +316,13 @@ static void alloc_monitor_pagetable(stru
   7.151          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
   7.152  
   7.153      /* map the phys_to_machine map into the per domain Read-Only MPT space */
   7.154 -    phys_table = page_table_convert(d);
   7.155 -    mpl4e[l4_table_offset(RO_MPT_VIRT_START)] =
   7.156 -        l4e_from_paddr(pagetable_get_paddr(phys_table),
   7.157 -                       __PAGE_HYPERVISOR);
   7.158  
   7.159      v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
   7.160      v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e;
   7.161 +    mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
   7.162 +
   7.163 +    if ( v->vcpu_id == 0 )
   7.164 +        alloc_p2m_table(d);
   7.165  }
   7.166  
   7.167  void free_monitor_pagetable(struct vcpu *v)
   7.168 @@ -300,6 +332,12 @@ void free_monitor_pagetable(struct vcpu 
   7.169      /*
   7.170       * free monitor_table.
   7.171       */
   7.172 +    if ( v->vcpu_id == 0 )
   7.173 +        free_p2m_table(v);
   7.174 +
   7.175 +    /*
   7.176 +     * Then free monitor_table.
   7.177 +     */
   7.178      mfn = pagetable_get_pfn(v->arch.monitor_table);
   7.179      unmap_domain_page_global(v->arch.monitor_vtable);
   7.180      free_domheap_page(mfn_to_page(mfn));
   7.181 @@ -307,7 +345,6 @@ void free_monitor_pagetable(struct vcpu 
   7.182      v->arch.monitor_table = mk_pagetable(0);
   7.183      v->arch.monitor_vtable = 0;
   7.184  }
   7.185 -
   7.186  #elif CONFIG_PAGING_LEVELS == 3
   7.187  
   7.188  static void alloc_monitor_pagetable(struct vcpu *v)
   7.189 @@ -319,91 +356,6 @@ void free_monitor_pagetable(struct vcpu 
   7.190  {
   7.191      BUG(); /* PAE not implemented yet */
   7.192  }
   7.193 -
   7.194 -#elif CONFIG_PAGING_LEVELS == 2
   7.195 -
   7.196 -static void alloc_monitor_pagetable(struct vcpu *v)
   7.197 -{
   7.198 -    unsigned long mmfn;
   7.199 -    l2_pgentry_t *mpl2e;
   7.200 -    struct page_info *mmfn_info;
   7.201 -    struct domain *d = v->domain;
   7.202 -    int i;
   7.203 -
   7.204 -    ASSERT(pagetable_get_paddr(v->arch.monitor_table) == 0);
   7.205 -
   7.206 -    mmfn_info = alloc_domheap_page(NULL);
   7.207 -    ASSERT(mmfn_info != NULL);
   7.208 -
   7.209 -    mmfn = page_to_mfn(mmfn_info);
   7.210 -    mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
   7.211 -    memset(mpl2e, 0, PAGE_SIZE);
   7.212 -
   7.213 -    memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
   7.214 -           &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
   7.215 -           HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
   7.216 -
   7.217 -    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
   7.218 -        mpl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   7.219 -            l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt) + i,
   7.220 -                          __PAGE_HYPERVISOR);
   7.221 -
   7.222 -    // map the phys_to_machine map into the Read-Only MPT space for this domain
   7.223 -    mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
   7.224 -        l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
   7.225 -                       __PAGE_HYPERVISOR);
   7.226 -
   7.227 -    // Don't (yet) have mappings for these...
   7.228 -    // Don't want to accidentally see the idle_pg_table's linear mapping.
   7.229 -    //
   7.230 -    mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
   7.231 -    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
   7.232 -
   7.233 -    v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
   7.234 -    v->arch.monitor_vtable = mpl2e;
   7.235 -}
   7.236 -
   7.237 -/*
   7.238 - * Free the pages for monitor_table and hl2_table
   7.239 - */
   7.240 -void free_monitor_pagetable(struct vcpu *v)
   7.241 -{
   7.242 -    l2_pgentry_t *mpl2e, hl2e, sl2e;
   7.243 -    unsigned long mfn;
   7.244 -
   7.245 -    ASSERT( pagetable_get_paddr(v->arch.monitor_table) );
   7.246 -
   7.247 -    mpl2e = v->arch.monitor_vtable;
   7.248 -
   7.249 -    /*
   7.250 -     * First get the mfn for hl2_table by looking at monitor_table
   7.251 -     */
   7.252 -    hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)];
   7.253 -    if ( l2e_get_flags(hl2e) & _PAGE_PRESENT )
   7.254 -    {
   7.255 -        mfn = l2e_get_pfn(hl2e);
   7.256 -        ASSERT(mfn);
   7.257 -        put_shadow_ref(mfn);
   7.258 -    }
   7.259 -
   7.260 -    sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)];
   7.261 -    if ( l2e_get_flags(sl2e) & _PAGE_PRESENT )
   7.262 -    {
   7.263 -        mfn = l2e_get_pfn(sl2e);
   7.264 -        ASSERT(mfn);
   7.265 -        put_shadow_ref(mfn);
   7.266 -    }
   7.267 -
   7.268 -    /*
   7.269 -     * Then free monitor_table.
   7.270 -     */
   7.271 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
   7.272 -    unmap_domain_page_global(v->arch.monitor_vtable);
   7.273 -    free_domheap_page(mfn_to_page(mfn));
   7.274 -
   7.275 -    v->arch.monitor_table = mk_pagetable(0);
   7.276 -    v->arch.monitor_vtable = 0;
   7.277 -}
   7.278  #endif
   7.279  
   7.280  static void
   7.281 @@ -942,14 +894,6 @@ void __shadow_mode_disable(struct domain
   7.282  }
   7.283  
   7.284  
   7.285 -static void
   7.286 -free_p2m_table(struct domain *d)
   7.287 -{
   7.288 -    // uh, this needs some work...  :)
   7.289 -    BUG();
   7.290 -}
   7.291 -
   7.292 -
   7.293  int __shadow_mode_enable(struct domain *d, unsigned int mode)
   7.294  {
   7.295      struct vcpu *v;
   7.296 @@ -1143,11 +1087,7 @@ int __shadow_mode_enable(struct domain *
   7.297          xfree(d->arch.shadow_dirty_bitmap);
   7.298          d->arch.shadow_dirty_bitmap = NULL;
   7.299      }
   7.300 -    if ( (new_modes & SHM_translate) && !(new_modes & SHM_external) &&
   7.301 -         pagetable_get_paddr(d->arch.phys_table) )
   7.302 -    {
   7.303 -        free_p2m_table(d);
   7.304 -    }
   7.305 +
   7.306      return -ENOMEM;
   7.307  }
   7.308  
   7.309 @@ -1375,58 +1315,222 @@ int
   7.310  alloc_p2m_table(struct domain *d)
   7.311  {
   7.312      struct list_head *list_ent;
   7.313 -    struct page_info *page, *l2page;
   7.314 -    l2_pgentry_t *l2;
   7.315 -    unsigned long mfn, pfn;
   7.316 -    struct domain_mmap_cache l1cache, l2cache;
   7.317 +    unsigned long va = RO_MPT_VIRT_START; /*  phys_to_machine_mapping */
   7.318 +//    unsigned long va = PML4_ADDR(264);
   7.319  
   7.320 -    l2page = alloc_domheap_page(NULL);
   7.321 -    if ( l2page == NULL )
   7.322 -        return 0;
   7.323 +#if CONFIG_PAGING_LEVELS >= 4
   7.324 +    l4_pgentry_t *l4tab = NULL;
   7.325 +    l4_pgentry_t l4e = { 0 };
   7.326 +#endif
   7.327 +#if CONFIG_PAGING_LEVELS >= 3
   7.328 +    l3_pgentry_t *l3tab = NULL;
   7.329 +    l3_pgentry_t l3e = { 0 };
   7.330 +#endif
   7.331 +    l2_pgentry_t *l2tab = NULL;
   7.332 +    l1_pgentry_t *l1tab = NULL;
   7.333 +    unsigned long *l0tab = NULL;
   7.334 +    l2_pgentry_t l2e = { 0 };
   7.335 +    l1_pgentry_t l1e = { 0 };
   7.336  
   7.337 -    domain_mmap_cache_init(&l1cache);
   7.338 -    domain_mmap_cache_init(&l2cache);
   7.339 +    unsigned long pfn;
   7.340 +    int i;
   7.341  
   7.342 -    d->arch.phys_table = mk_pagetable(page_to_maddr(l2page));
   7.343 -    l2 = map_domain_page_with_cache(page_to_mfn(l2page), &l2cache);
   7.344 -    memset(l2, 0, PAGE_SIZE);
   7.345 -    unmap_domain_page_with_cache(l2, &l2cache);
   7.346 +    ASSERT ( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) );
   7.347 +
   7.348 +#if CONFIG_PAGING_LEVELS >= 4
   7.349 +    l4tab = map_domain_page(
   7.350 +        pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
   7.351 +#endif
   7.352 +#if CONFIG_PAGING_LEVELS >= 3
   7.353 +    l3tab = map_domain_page(
   7.354 +        pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
   7.355 +#endif
   7.356  
   7.357      list_ent = d->page_list.next;
   7.358 -    while ( list_ent != &d->page_list )
   7.359 -    {
   7.360 -        page = list_entry(list_ent, struct page_info, list);
   7.361 -        mfn = page_to_mfn(page);
   7.362 -        pfn = get_gpfn_from_mfn(mfn);
   7.363 -        ASSERT(pfn != INVALID_M2P_ENTRY);
   7.364 -        ASSERT(pfn < (1u<<20));
   7.365 -
   7.366 -        set_p2m_entry(d, pfn, mfn, &l2cache, &l1cache);
   7.367 -
   7.368 -        list_ent = page->list.next;
   7.369 -    }
   7.370  
   7.371 -    list_ent = d->xenpage_list.next;
   7.372 -    while ( list_ent != &d->xenpage_list )
   7.373 +    for ( i = 0; list_ent != &d->page_list; i++ ) 
   7.374      {
   7.375 +        struct page_info *page;
   7.376 +
   7.377          page = list_entry(list_ent, struct page_info, list);
   7.378 -        mfn = page_to_mfn(page);
   7.379 -        pfn = get_gpfn_from_mfn(mfn);
   7.380 -        if ( (pfn != INVALID_M2P_ENTRY) &&
   7.381 -             (pfn < (1u<<20)) )
   7.382 +        pfn = page_to_mfn(page);
   7.383 +
   7.384 +#if CONFIG_PAGING_LEVELS >= 4
   7.385 +        l4e = l4tab[l4_table_offset(va)];
   7.386 +        if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) 
   7.387          {
   7.388 -            set_p2m_entry(d, pfn, mfn, &l2cache, &l1cache);
   7.389 -        }
   7.390 +            page = alloc_domheap_page(NULL);
   7.391  
   7.392 -        list_ent = page->list.next;
   7.393 +            if ( !l3tab )
   7.394 +                unmap_domain_page(l3tab);
   7.395 +
   7.396 +            l3tab = map_domain_page(page_to_mfn(page));
   7.397 +            memset(l3tab, 0, PAGE_SIZE);
   7.398 +            l4e = l4tab[l4_table_offset(va)] = 
   7.399 +                l4e_from_page(page, __PAGE_HYPERVISOR);
   7.400 +        } 
   7.401 +        else if ( l3tab == NULL)
   7.402 +            l3tab = map_domain_page(l4e_get_pfn(l4e));
   7.403 +#endif
   7.404 +        l3e = l3tab[l3_table_offset(va)];
   7.405 +        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) 
   7.406 +        {
   7.407 +            page = alloc_domheap_page(NULL);
   7.408 +            if ( !l2tab )
   7.409 +                unmap_domain_page(l2tab);
   7.410 +
   7.411 +            l2tab = map_domain_page(page_to_mfn(page));
   7.412 +            memset(l2tab, 0, PAGE_SIZE);
   7.413 +            l3e = l3tab[l3_table_offset(va)] = 
   7.414 +                l3e_from_page(page, __PAGE_HYPERVISOR);
   7.415 +        } 
   7.416 +        else if ( l2tab == NULL) 
   7.417 +            l2tab = map_domain_page(l3e_get_pfn(l3e));
   7.418 +
   7.419 +        l2e = l2tab[l2_table_offset(va)];
   7.420 +        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 
   7.421 +        {
   7.422 +            page = alloc_domheap_page(NULL);
   7.423 +
   7.424 +            if ( !l1tab )
   7.425 +                unmap_domain_page(l1tab);
   7.426 +            
   7.427 +            l1tab = map_domain_page(page_to_mfn(page));
   7.428 +            memset(l1tab, 0, PAGE_SIZE);
   7.429 +            l2e = l2tab[l2_table_offset(va)] = 
   7.430 +                l2e_from_page(page, __PAGE_HYPERVISOR);
   7.431 +        } 
   7.432 +        else if ( l1tab == NULL) 
   7.433 +            l1tab = map_domain_page(l2e_get_pfn(l2e));
   7.434 +
   7.435 +        l1e = l1tab[l1_table_offset(va)];
   7.436 +        if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 
   7.437 +        {
   7.438 +            page = alloc_domheap_page(NULL);
   7.439 +            if ( !l0tab )
   7.440 +                unmap_domain_page(l0tab);
   7.441 +
   7.442 +            l0tab = map_domain_page(page_to_mfn(page));
   7.443 +            memset(l0tab, 0, PAGE_SIZE);
   7.444 +            l1e = l1tab[l1_table_offset(va)] = 
   7.445 +                l1e_from_page(page, __PAGE_HYPERVISOR);
   7.446 +        }
   7.447 +        else if ( l0tab == NULL) 
   7.448 +            l0tab = map_domain_page(l1e_get_pfn(l1e));
   7.449 +
   7.450 +        l0tab[i & ((1 << PAGETABLE_ORDER) - 1) ] = pfn;
   7.451 +        list_ent = frame_table[pfn].list.next;
   7.452 +        va += sizeof (pfn);
   7.453      }
   7.454 -
   7.455 -    domain_mmap_cache_destroy(&l2cache);
   7.456 -    domain_mmap_cache_destroy(&l1cache);
   7.457 +#if CONFIG_PAGING_LEVELS >= 4
   7.458 +    unmap_domain_page(l4tab);
   7.459 +#endif
   7.460 +#if CONFIG_PAGING_LEVELS >= 3
   7.461 +    unmap_domain_page(l3tab);
   7.462 +#endif
   7.463 +    unmap_domain_page(l2tab);
   7.464 +    unmap_domain_page(l1tab);
   7.465 +    unmap_domain_page(l0tab);
   7.466  
   7.467      return 1;
   7.468  }
   7.469  
   7.470 +#if CONFIG_PAGING_LEVELS == 4
   7.471 +static void
   7.472 +free_p2m_table(struct vcpu *v)
   7.473 +{
   7.474 +    unsigned long va;
   7.475 +    l1_pgentry_t *l1tab;
   7.476 +    l1_pgentry_t l1e;
   7.477 +    l2_pgentry_t *l2tab;
   7.478 +    l2_pgentry_t l2e;
   7.479 +#if CONFIG_PAGING_LEVELS >= 3
   7.480 +    l3_pgentry_t *l3tab; 
   7.481 +    l3_pgentry_t l3e;
   7.482 +    int i3;
   7.483 +#endif
   7.484 +#if CONFIG_PAGING_LEVELS == 4
   7.485 +    l4_pgentry_t *l4tab; 
   7.486 +    l4_pgentry_t l4e;
   7.487 +#endif
   7.488 +
   7.489 +    ASSERT ( pagetable_get_pfn(v->arch.monitor_table) );
   7.490 +
   7.491 +#if CONFIG_PAGING_LEVELS == 4
   7.492 +    l4tab = map_domain_page(
   7.493 +        pagetable_get_pfn(v->arch.monitor_table));
   7.494 +#endif
   7.495 +#if CONFIG_PAGING_LEVELS == 3
   7.496 +    l3tab = map_domain_page(
   7.497 +        pagetable_get_pfn(v->arch.monitor_table));
   7.498 +#endif
   7.499 +
   7.500 +    for ( va = RO_MPT_VIRT_START; va < RO_MPT_VIRT_END; )
   7.501 +    {
   7.502 +#if CONFIG_PAGING_LEVELS == 4
   7.503 +        l4e = l4tab[l4_table_offset(va)];
   7.504 +
   7.505 +        if ( l4e_get_flags(l4e) & _PAGE_PRESENT )
   7.506 +        {
   7.507 +            l3tab = map_domain_page(l4e_get_pfn(l4e));
   7.508 +#endif
   7.509 +            for ( i3 = 0; i3 < L1_PAGETABLE_ENTRIES; i3++ )
   7.510 +            {
   7.511 +                l3e = l3tab[l3_table_offset(va)];
   7.512 +                if ( l3e_get_flags(l3e) & _PAGE_PRESENT )
   7.513 +                {
   7.514 +                    int i2;
   7.515 +
   7.516 +                    l2tab = map_domain_page(l3e_get_pfn(l3e));
   7.517 +
   7.518 +                    for ( i2 = 0; i2 < L1_PAGETABLE_ENTRIES; i2++ )
   7.519 +                    {
   7.520 +                        l2e = l2tab[l2_table_offset(va)];
   7.521 +                        if ( l2e_get_flags(l2e) & _PAGE_PRESENT )
   7.522 +                        {
   7.523 +                            int i1;
   7.524 +
   7.525 +                            l1tab = map_domain_page(l2e_get_pfn(l2e));
   7.526 +
   7.527 +                            for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++ )
   7.528 +                            {
   7.529 +                                l1e = l1tab[l1_table_offset(va)];
   7.530 +
   7.531 +                                if ( l1e_get_flags(l1e) & _PAGE_PRESENT )
   7.532 +                                    free_domheap_page(mfn_to_page(l1e_get_pfn(l1e)));
   7.533 +
   7.534 +                                va += 1UL << L1_PAGETABLE_SHIFT;
   7.535 +                            }
   7.536 +                            unmap_domain_page(l1tab);
   7.537 +                            free_domheap_page(mfn_to_page(l2e_get_pfn(l2e)));
   7.538 +                        }
   7.539 +                        else
   7.540 +                            va += 1UL << L2_PAGETABLE_SHIFT;
   7.541 +                    }
   7.542 +                    unmap_domain_page(l2tab);
   7.543 +                    free_domheap_page(mfn_to_page(l3e_get_pfn(l3e)));
   7.544 +                }
   7.545 +                else
   7.546 +                    va += 1UL << L3_PAGETABLE_SHIFT;
   7.547 +            }
   7.548 +#if CONFIG_PAGING_LEVELS == 4
   7.549 +            unmap_domain_page(l3tab);
   7.550 +            free_domheap_page(mfn_to_page(l4e_get_pfn(l4e)));
   7.551 +        }
   7.552 +        else
   7.553 +            va += 1UL << L4_PAGETABLE_SHIFT;
   7.554 +#endif
   7.555 +    }
   7.556 +
   7.557 +#if CONFIG_PAGING_LEVELS == 4
   7.558 +    unmap_domain_page(l4tab);
   7.559 +#endif
   7.560 +#if CONFIG_PAGING_LEVELS == 3
   7.561 +    unmap_domain_page(l3tab);
   7.562 +#endif
   7.563 +}
   7.564 +#endif
   7.565 +
   7.566  void shadow_l1_normal_pt_update(
   7.567      struct domain *d,
   7.568      paddr_t pa, l1_pgentry_t gpte,
   7.569 @@ -1770,6 +1874,7 @@ void clear_all_shadow_status(struct doma
   7.570      shadow_unlock(d);
   7.571  }
   7.572  
   7.573 +
   7.574  /*
   7.575   * Local variables:
   7.576   * mode: C
     8.1 --- a/xen/include/asm-x86/mm.h	Fri Feb 03 11:54:05 2006 +0100
     8.2 +++ b/xen/include/asm-x86/mm.h	Fri Feb 03 12:02:30 2006 +0100
     8.3 @@ -279,14 +279,9 @@ int check_descriptor(struct desc_struct 
     8.4  static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
     8.5  {
     8.6      unsigned long mfn;
     8.7 -    l1_pgentry_t pte;
     8.8  
     8.9 -    if ( (__copy_from_user(&pte, &phys_to_machine_mapping[pfn],
    8.10 -                           sizeof(pte)) == 0) &&
    8.11 -         (l1e_get_flags(pte) & _PAGE_PRESENT) )
    8.12 -        mfn = l1e_get_pfn(pte);
    8.13 -    else
    8.14 -        mfn = INVALID_MFN;
    8.15 +    if ( __copy_from_user(&mfn, &phys_to_machine_mapping[pfn], sizeof(mfn)) )
    8.16 +	mfn = INVALID_MFN;
    8.17  
    8.18      return mfn;
    8.19  }
     9.1 --- a/xen/include/asm-x86/shadow.h	Fri Feb 03 11:54:05 2006 +0100
     9.2 +++ b/xen/include/asm-x86/shadow.h	Fri Feb 03 12:02:30 2006 +0100
     9.3 @@ -115,7 +115,12 @@ do {                                    
     9.4  #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((GUEST_L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min))
     9.5  #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
     9.6  #define SHADOW_MAX(_encoded) ((GUEST_L1_PAGETABLE_ENTRIES - 1) - ((_encoded) >> 16))
     9.7 -
     9.8 +#if CONFIG_PAGING_LEVELS == 2
     9.9 +extern void shadow_direct_map_clean(struct vcpu *v);
    9.10 +#endif
    9.11 +extern int shadow_direct_map_init(struct vcpu *v);
    9.12 +extern int shadow_direct_map_fault(
    9.13 +    unsigned long vpa, struct cpu_user_regs *regs);
    9.14  extern void shadow_mode_init(void);
    9.15  extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
    9.16  extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
    10.1 --- a/xen/include/asm-x86/shadow_64.h	Fri Feb 03 11:54:05 2006 +0100
    10.2 +++ b/xen/include/asm-x86/shadow_64.h	Fri Feb 03 12:02:30 2006 +0100
    10.3 @@ -92,7 +92,7 @@ typedef struct { intpte_t lo; } pgentry_
    10.4          ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
    10.5  
    10.6  #define PAE_SHADOW_SELF_ENTRY   259
    10.7 -#define PDP_ENTRIES   4
    10.8 +#define PAE_L3_PAGETABLE_ENTRIES   4
    10.9  
   10.10  static inline int  table_offset_64(unsigned long va, int level)
   10.11  {
    11.1 --- a/xen/include/asm-x86/shadow_public.h	Fri Feb 03 11:54:05 2006 +0100
    11.2 +++ b/xen/include/asm-x86/shadow_public.h	Fri Feb 03 12:02:30 2006 +0100
    11.3 @@ -21,11 +21,12 @@
    11.4  
    11.5  #ifndef _XEN_SHADOW_PUBLIC_H
    11.6  #define _XEN_SHADOW_PUBLIC_H
    11.7 +
    11.8 +extern int alloc_p2m_table(struct domain *d);
    11.9 +
   11.10  #if CONFIG_PAGING_LEVELS >= 3
   11.11  #define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
   11.12  
   11.13 -extern int alloc_p2m_table(struct domain *d);
   11.14 -
   11.15  extern void shadow_sync_and_drop_references(
   11.16        struct domain *d, struct page_info *page);
   11.17  extern void shadow_drop_references(