ia64/xen-unstable

changeset 15618:ad87a4912874

[HVM] HAP: tidy up page allocation/tracking and monitor-table building.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Jul 18 13:56:00 2007 +0100 (2007-07-18)
parents 4197a1aad70b
children e1f74a5a09cb
files xen/arch/x86/mm/hap/hap.c xen/include/asm-x86/domain.h
line diff
     1.1 --- a/xen/arch/x86/mm/hap/hap.c	Wed Jul 18 10:04:46 2007 +0100
     1.2 +++ b/xen/arch/x86/mm/hap/hap.c	Wed Jul 18 13:56:00 2007 +0100
     1.3 @@ -87,83 +87,79 @@ void hap_clean_dirty_bitmap(struct domai
     1.4  /************************************************/
     1.5  /*             HAP SUPPORT FUNCTIONS            */
     1.6  /************************************************/
     1.7 -mfn_t hap_alloc(struct domain *d)
     1.8 +static struct page_info *hap_alloc(struct domain *d)
     1.9  {
    1.10 -    struct page_info *sp = NULL;
    1.11 +    struct page_info *pg = NULL;
    1.12      void *p;
    1.13  
    1.14      ASSERT(hap_locked_by_me(d));
    1.15  
    1.16 -    sp = list_entry(d->arch.paging.hap.freelists.next, struct page_info, list);
    1.17 -    list_del(&sp->list);
    1.18 -    d->arch.paging.hap.free_pages -= 1;
    1.19 -
    1.20 -    /* Now safe to clear the page for reuse */
    1.21 -    p = hap_map_domain_page(page_to_mfn(sp));
    1.22 -    ASSERT(p != NULL);
    1.23 -    clear_page(p);
    1.24 -    hap_unmap_domain_page(p);
    1.25 -
    1.26 -    return page_to_mfn(sp);
    1.27 -}
    1.28 -
    1.29 -void hap_free(struct domain *d, mfn_t smfn)
    1.30 -{
    1.31 -    struct page_info *sp = mfn_to_page(smfn); 
    1.32 -
    1.33 -    ASSERT(hap_locked_by_me(d));
    1.34 -
    1.35 -    d->arch.paging.hap.free_pages += 1;
    1.36 -    list_add_tail(&sp->list, &d->arch.paging.hap.freelists);
    1.37 -}
    1.38 -
    1.39 -struct page_info * hap_alloc_p2m_page(struct domain *d)
    1.40 -{
    1.41 -    struct page_info *pg;
    1.42 -    mfn_t mfn;
    1.43 -    void *p;
    1.44 -
    1.45 -    hap_lock(d);
    1.46 +    if ( unlikely(list_empty(&d->arch.paging.hap.freelist)) )
    1.47 +        return NULL;
    1.48  
    1.49 -#if CONFIG_PAGING_LEVELS == 3
    1.50 -    /* Under PAE mode, top-level P2M table should be allocated below 4GB space
    1.51 -     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to 
    1.52 -     * force this requirement. This page will be de-allocated in 
    1.53 -     * hap_free_p2m_page(), like other P2M pages.
    1.54 -    */
    1.55 -    if ( d->arch.paging.hap.p2m_pages == 0 ) 
    1.56 -    {
    1.57 -        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
    1.58 -        d->arch.paging.hap.p2m_pages += 1;
    1.59 -    }
    1.60 -    else
    1.61 -#endif
    1.62 -    {
    1.63 -        pg = mfn_to_page(hap_alloc(d));
    1.64 -        d->arch.paging.hap.p2m_pages += 1;
    1.65 -        d->arch.paging.hap.total_pages -= 1;
    1.66 -    }
    1.67 +    pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list);
    1.68 +    list_del(&pg->list);
    1.69 +    d->arch.paging.hap.free_pages--;
    1.70  
    1.71 -    if ( pg == NULL )
    1.72 -    {
    1.73 -        hap_unlock(d);
    1.74 -        return NULL;
    1.75 -    }   
    1.76 -
    1.77 -    hap_unlock(d);
    1.78 -
    1.79 -    page_set_owner(pg, d);
    1.80 -    pg->count_info = 1;
    1.81 -    mfn = page_to_mfn(pg);
    1.82 -    p = hap_map_domain_page(mfn);
    1.83 +    p = hap_map_domain_page(page_to_mfn(pg));
    1.84 +    ASSERT(p != NULL);
    1.85      clear_page(p);
    1.86      hap_unmap_domain_page(p);
    1.87  
    1.88      return pg;
    1.89  }
    1.90  
    1.91 +static void hap_free(struct domain *d, mfn_t mfn)
    1.92 +{
    1.93 +    struct page_info *pg = mfn_to_page(mfn); 
    1.94 +
    1.95 +    ASSERT(hap_locked_by_me(d));
    1.96 +
    1.97 +    d->arch.paging.hap.free_pages++;
    1.98 +    list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
    1.99 +}
   1.100 +
   1.101 +static struct page_info *hap_alloc_p2m_page(struct domain *d)
   1.102 +{
   1.103 +    struct page_info *pg;
   1.104 +
   1.105 +    hap_lock(d);
   1.106 +    pg = hap_alloc(d);
   1.107 +
   1.108 +#if CONFIG_PAGING_LEVELS == 3
   1.109 +    /* Under PAE mode, top-level P2M table should be allocated below 4GB space
   1.110 +     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to 
   1.111 +     * force this requirement, and exchange the guaranteed 32-bit-clean
   1.112 +     * page for the one we just hap_alloc()ed. */
   1.113 +    if ( d->arch.paging.hap.p2m_pages == 0
   1.114 +         && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
   1.115 +    {
   1.116 +        free_domheap_page(pg);
   1.117 +        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
   1.118 +        if ( likely(pg != NULL) )
   1.119 +        {
   1.120 +            void *p = hap_map_domain_page(page_to_mfn(pg));
   1.121 +            clear_page(p);
   1.122 +            hap_unmap_domain_page(p);
   1.123 +        }
   1.124 +    }
   1.125 +#endif
   1.126 +
   1.127 +    if ( likely(pg != NULL) )
   1.128 +    {
   1.129 +        d->arch.paging.hap.total_pages--;
   1.130 +        d->arch.paging.hap.p2m_pages++;
   1.131 +        page_set_owner(pg, d);
   1.132 +        pg->count_info = 1;
   1.133 +    }
   1.134 +
   1.135 +    hap_unlock(d);
   1.136 +    return pg;
   1.137 +}
   1.138 +
   1.139  void hap_free_p2m_page(struct domain *d, struct page_info *pg)
   1.140  {
   1.141 +    hap_lock(d);
   1.142      ASSERT(page_get_owner(pg) == d);
   1.143      /* Should have just the one ref we gave it in alloc_p2m_page() */
   1.144      if ( (pg->count_info & PGC_count_mask) != 1 )
   1.145 @@ -173,9 +169,10 @@ void hap_free_p2m_page(struct domain *d,
   1.146      /* Free should not decrement domain's total allocation, since 
   1.147       * these pages were allocated without an owner. */
   1.148      page_set_owner(pg, NULL); 
   1.149 -    free_domheap_pages(pg, 0);
   1.150 +    free_domheap_page(pg);
   1.151      d->arch.paging.hap.p2m_pages--;
   1.152      ASSERT(d->arch.paging.hap.p2m_pages >= 0);
   1.153 +    hap_unlock(d);
   1.154  }
   1.155  
   1.156  /* Return the size of the pool, rounded up to the nearest MB */
   1.157 @@ -193,7 +190,7 @@ hap_get_allocation(struct domain *d)
   1.158  static unsigned int
   1.159  hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
   1.160  {
   1.161 -    struct page_info *sp;
   1.162 +    struct page_info *pg;
   1.163  
   1.164      ASSERT(hap_locked_by_me(d));
   1.165  
   1.166 @@ -202,27 +199,27 @@ hap_set_allocation(struct domain *d, uns
   1.167          if ( d->arch.paging.hap.total_pages < pages )
   1.168          {
   1.169              /* Need to allocate more memory from domheap */
   1.170 -            sp = alloc_domheap_pages(NULL, 0, 0);
   1.171 -            if ( sp == NULL )
   1.172 +            pg = alloc_domheap_page(NULL);
   1.173 +            if ( pg == NULL )
   1.174              {
   1.175                  HAP_PRINTK("failed to allocate hap pages.\n");
   1.176                  return -ENOMEM;
   1.177              }
   1.178 -            d->arch.paging.hap.free_pages += 1;
   1.179 -            d->arch.paging.hap.total_pages += 1;
   1.180 -            list_add_tail(&sp->list, &d->arch.paging.hap.freelists);
   1.181 +            d->arch.paging.hap.free_pages++;
   1.182 +            d->arch.paging.hap.total_pages++;
   1.183 +            list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
   1.184          }
   1.185          else if ( d->arch.paging.hap.total_pages > pages )
   1.186          {
   1.187              /* Need to return memory to domheap */
   1.188 -            ASSERT(!list_empty(&d->arch.paging.hap.freelists));
   1.189 -            sp = list_entry(d->arch.paging.hap.freelists.next,
   1.190 +            ASSERT(!list_empty(&d->arch.paging.hap.freelist));
   1.191 +            pg = list_entry(d->arch.paging.hap.freelist.next,
   1.192                              struct page_info, list);
   1.193 -            list_del(&sp->list);
   1.194 -            d->arch.paging.hap.free_pages -= 1;
   1.195 -            d->arch.paging.hap.total_pages -= 1;
   1.196 -            sp->count_info = 0;
   1.197 -            free_domheap_pages(sp, 0);
   1.198 +            list_del(&pg->list);
   1.199 +            d->arch.paging.hap.free_pages--;
   1.200 +            d->arch.paging.hap.total_pages--;
   1.201 +            pg->count_info = 0;
   1.202 +            free_domheap_page(pg);
   1.203          }
   1.204          
   1.205          /* Check to see if we need to yield and try again */
   1.206 @@ -237,62 +234,63 @@ hap_set_allocation(struct domain *d, uns
   1.207  }
   1.208  
   1.209  #if CONFIG_PAGING_LEVELS == 4
   1.210 -void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
   1.211 +static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
   1.212  {
   1.213      struct domain *d = v->domain;
   1.214 -    l4_pgentry_t *sl4e;
   1.215 +    l4_pgentry_t *l4e;
   1.216  
   1.217 -    sl4e = hap_map_domain_page(sl4mfn);
   1.218 -    ASSERT(sl4e != NULL);
   1.219 +    l4e = hap_map_domain_page(l4mfn);
   1.220 +    ASSERT(l4e != NULL);
   1.221  
   1.222      /* Copy the common Xen mappings from the idle domain */
   1.223 -    memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
   1.224 +    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
   1.225             &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
   1.226             ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
   1.227  
   1.228      /* Install the per-domain mappings for this domain */
   1.229 -    sl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
   1.230 +    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
   1.231          l4e_from_pfn(mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3))),
   1.232                       __PAGE_HYPERVISOR);
   1.233  
   1.234 -    sl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
   1.235 -        l4e_from_pfn(mfn_x(gl4mfn), __PAGE_HYPERVISOR);
   1.236 +    /* Install a linear mapping */
   1.237 +    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
   1.238 +        l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
   1.239  
   1.240 -    /* install domain-specific P2M table */
   1.241 -    sl4e[l4_table_offset(RO_MPT_VIRT_START)] =
   1.242 +    /* Install the domain-specific P2M table */
   1.243 +    l4e[l4_table_offset(RO_MPT_VIRT_START)] =
   1.244          l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
   1.245                       __PAGE_HYPERVISOR);
   1.246  
   1.247 -    hap_unmap_domain_page(sl4e);
   1.248 +    hap_unmap_domain_page(l4e);
   1.249  }
   1.250  #endif /* CONFIG_PAGING_LEVELS == 4 */
   1.251  
   1.252  #if CONFIG_PAGING_LEVELS == 3
   1.253 -void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
   1.254 +static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
   1.255  {
   1.256      struct domain *d = v->domain;
   1.257 -    l2_pgentry_t *sl2e;
   1.258 +    l2_pgentry_t *l2e;
   1.259      l3_pgentry_t *p2m;
   1.260 -
   1.261      int i;
   1.262  
   1.263 -    sl2e = hap_map_domain_page(sl2hmfn);
   1.264 -    ASSERT(sl2e != NULL);
   1.265 +    l2e = hap_map_domain_page(l2hmfn);
   1.266 +    ASSERT(l2e != NULL);
   1.267      
   1.268      /* Copy the common Xen mappings from the idle domain */
   1.269 -    memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
   1.270 +    memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
   1.271             &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
   1.272             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
   1.273  
   1.274      /* Install the per-domain mappings for this domain */
   1.275      for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
   1.276 -        sl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   1.277 +        l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   1.278              l2e_from_pfn(
   1.279                  mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
   1.280                  __PAGE_HYPERVISOR);
   1.281  
   1.282 +    /* No linear mapping; will be set up by monitor-table contructor. */
   1.283      for ( i = 0; i < 4; i++ )
   1.284 -        sl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
   1.285 +        l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
   1.286              l2e_empty();
   1.287  
   1.288      /* Install the domain-specific p2m table */
   1.289 @@ -300,63 +298,66 @@ void hap_install_xen_entries_in_l2h(stru
   1.290      p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
   1.291      for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
   1.292      {
   1.293 -        sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
   1.294 +        l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
   1.295              (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
   1.296              ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
   1.297                             __PAGE_HYPERVISOR)
   1.298              : l2e_empty();
   1.299      }
   1.300      hap_unmap_domain_page(p2m);
   1.301 -    hap_unmap_domain_page(sl2e);
   1.302 +    hap_unmap_domain_page(l2e);
   1.303  }
   1.304  #endif
   1.305  
   1.306  #if CONFIG_PAGING_LEVELS == 2
   1.307 -void hap_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn)
   1.308 +static void hap_install_xen_entries_in_l2(struct vcpu *v, mfn_t l2mfn)
   1.309  {
   1.310      struct domain *d = v->domain;
   1.311 -    l2_pgentry_t *sl2e;
   1.312 +    l2_pgentry_t *l2e;
   1.313      int i;
   1.314  
   1.315 -    sl2e = hap_map_domain_page(sl2mfn);
   1.316 -    ASSERT(sl2e != NULL);
   1.317 +    l2e = hap_map_domain_page(l2mfn);
   1.318 +    ASSERT(l2e != NULL);
   1.319      
   1.320      /* Copy the common Xen mappings from the idle domain */
   1.321 -    memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT],
   1.322 +    memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT],
   1.323             &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
   1.324             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
   1.325  
   1.326      /* Install the per-domain mappings for this domain */
   1.327      for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
   1.328 -        sl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   1.329 +        l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   1.330              l2e_from_pfn(
   1.331                  mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
   1.332                  __PAGE_HYPERVISOR);
   1.333  
   1.334 +    /* Install the linear mapping */
   1.335 +    l2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   1.336 +        l2e_from_pfn(mfn_x(l2mfn), __PAGE_HYPERVISOR);
   1.337  
   1.338 -    sl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   1.339 -        l2e_from_pfn(mfn_x(gl2mfn), __PAGE_HYPERVISOR);
   1.340 -
   1.341 -    /* install domain-specific P2M table */
   1.342 -    sl2e[l2_table_offset(RO_MPT_VIRT_START)] =
   1.343 +    /* Install the domain-specific P2M table */
   1.344 +    l2e[l2_table_offset(RO_MPT_VIRT_START)] =
   1.345          l2e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
   1.346                              __PAGE_HYPERVISOR);
   1.347  
   1.348 -    hap_unmap_domain_page(sl2e);
   1.349 +    hap_unmap_domain_page(l2e);
   1.350  }
   1.351  #endif
   1.352  
   1.353 -mfn_t hap_make_monitor_table(struct vcpu *v)
   1.354 +static mfn_t hap_make_monitor_table(struct vcpu *v)
   1.355  {
   1.356      struct domain *d = v->domain;
   1.357 +    struct page_info *pg;
   1.358  
   1.359      ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
   1.360  
   1.361  #if CONFIG_PAGING_LEVELS == 4
   1.362      {
   1.363          mfn_t m4mfn;
   1.364 -        m4mfn = hap_alloc(d);
   1.365 -        hap_install_xen_entries_in_l4(v, m4mfn, m4mfn);
   1.366 +        if ( (pg = hap_alloc(d)) == NULL )
   1.367 +            goto oom;
   1.368 +        m4mfn = page_to_mfn(pg);
   1.369 +        hap_install_xen_entries_in_l4(v, m4mfn);
   1.370          return m4mfn;
   1.371      }
   1.372  #elif CONFIG_PAGING_LEVELS == 3
   1.373 @@ -366,12 +367,16 @@ mfn_t hap_make_monitor_table(struct vcpu
   1.374          l2_pgentry_t *l2e;
   1.375          int i;
   1.376  
   1.377 -        m3mfn = hap_alloc(d);
   1.378 +        if ( (pg = hap_alloc(d)) == NULL )
   1.379 +            goto oom;
   1.380 +        m3mfn = page_to_mfn(pg);
   1.381  
   1.382          /* Install a monitor l2 table in slot 3 of the l3 table.
   1.383           * This is used for all Xen entries, including linear maps
   1.384           */
   1.385 -        m2mfn = hap_alloc(d);
   1.386 +        if ( (pg = hap_alloc(d)) == NULL )
   1.387 +            goto oom;
   1.388 +        m2mfn = page_to_mfn(pg);
   1.389          l3e = hap_map_domain_page(m3mfn);
   1.390          l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
   1.391          hap_install_xen_entries_in_l2h(v, m2mfn);
   1.392 @@ -391,16 +396,21 @@ mfn_t hap_make_monitor_table(struct vcpu
   1.393  #else
   1.394      {
   1.395          mfn_t m2mfn;
   1.396 -        
   1.397 -        m2mfn = hap_alloc(d);
   1.398 -        hap_install_xen_entries_in_l2(v, m2mfn, m2mfn);
   1.399 -    
   1.400 +        if ( (pg = hap_alloc(d)) == NULL )
   1.401 +            goto oom;
   1.402 +        m2mfn = page_to_mfn(pg);;
   1.403 +        hap_install_xen_entries_in_l2(v, m2mfn);
   1.404          return m2mfn;
   1.405      }
   1.406  #endif
   1.407 +
   1.408 + oom:
   1.409 +    HAP_ERROR("out of memory building monitor pagetable\n");
   1.410 +    domain_crash(d);
   1.411 +    return _mfn(INVALID_MFN);
   1.412  }
   1.413  
   1.414 -void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
   1.415 +static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
   1.416  {
   1.417      struct domain *d = v->domain;
   1.418  
   1.419 @@ -424,7 +434,7 @@ void hap_destroy_monitor_table(struct vc
   1.420  void hap_domain_init(struct domain *d)
   1.421  {
   1.422      hap_lock_init(d);
   1.423 -    INIT_LIST_HEAD(&d->arch.paging.hap.freelists);
   1.424 +    INIT_LIST_HEAD(&d->arch.paging.hap.freelist);
   1.425  
   1.426      /* This domain will use HAP for log-dirty mode */
   1.427      paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
   1.428 @@ -640,12 +650,12 @@ static void hap_update_paging_modes(stru
   1.429  
   1.430  #if CONFIG_PAGING_LEVELS == 3
   1.431  static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e) 
   1.432 -/* Special case, only used for external-mode domains on PAE hosts:
   1.433 - * update the mapping of the p2m table.  Once again, this is trivial in
   1.434 - * other paging modes (one top-level entry points to the top-level p2m,
   1.435 - * no maintenance needed), but PAE makes life difficult by needing a
   1.436 - * copy l3es of the p2m table in eight l2h slots in the monitor table.  This 
   1.437 - * function makes fresh copies when a p2m l3e changes. */
   1.438 +/* Special case, only used for PAE hosts: update the mapping of the p2m
   1.439 + * table.  This is trivial in other paging modes (one top-level entry
   1.440 + * points to the top-level p2m, no maintenance needed), but PAE makes
   1.441 + * life difficult by needing a copy of the p2m table in eight l2h slots
   1.442 + * in the monitor table.  This function makes fresh copies when a p2m
   1.443 + * l3e changes. */
   1.444  {
   1.445      l2_pgentry_t *ml2e;
   1.446      struct vcpu *v;
     2.1 --- a/xen/include/asm-x86/domain.h	Wed Jul 18 10:04:46 2007 +0100
     2.2 +++ b/xen/include/asm-x86/domain.h	Wed Jul 18 13:56:00 2007 +0100
     2.3 @@ -117,7 +117,7 @@ struct hap_domain {
     2.4      int               locker;
     2.5      const char       *locker_function;
     2.6      
     2.7 -    struct list_head  freelists;
     2.8 +    struct list_head  freelist;
     2.9      unsigned int      total_pages;  /* number of pages allocated */
    2.10      unsigned int      free_pages;   /* number of pages on freelists */
    2.11      unsigned int      p2m_pages;    /* number of pages allocates to p2m */