ia64/xen-unstable

changeset 11973:041507e2754c

[XEN] Make memory hypercalls NUMA-aware.

This patch modifies memory ops to use the NUMA-aware page allocator
functions. We use the target domain's VCPU0 placement to determine
which node's memory to use. We expect the system administrator to
utilize the exposed NUMA topology information to help craft guest
config files that are NUMA-friendly (use only processors and memory values
that will fit within a given node).

Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
author kfraser@localhost.localdomain
date Wed Oct 25 12:30:08 2006 +0100 (2006-10-25)
parents cf95c3218a70
children e715360e82f8
files xen/common/memory.c
line diff
     1.1 --- a/xen/common/memory.c	Wed Oct 25 12:28:46 2006 +0100
     1.2 +++ b/xen/common/memory.c	Wed Oct 25 12:30:08 2006 +0100
     1.3 @@ -41,6 +41,8 @@ increase_reservation(
     1.4      struct page_info *page;
     1.5      unsigned long i;
     1.6      xen_pfn_t mfn;
     1.7 +    /* use domain's first processor for locality parameter */
     1.8 +    unsigned int cpu = d->vcpu[0]->processor;
     1.9  
    1.10      if ( !guest_handle_is_null(extent_list) &&
    1.11           !guest_handle_okay(extent_list, nr_extents) )
    1.12 @@ -58,8 +60,8 @@ increase_reservation(
    1.13              return i;
    1.14          }
    1.15  
    1.16 -        if ( unlikely((page = alloc_domheap_pages(
    1.17 -            d, extent_order, memflags)) == NULL) )
    1.18 +        if ( unlikely((page = __alloc_domheap_pages( d, cpu, 
    1.19 +            extent_order, memflags )) == NULL) ) 
    1.20          {
    1.21              DPRINTK("Could not allocate order=%d extent: "
    1.22                      "id=%d memflags=%x (%ld of %d)\n",
    1.23 @@ -92,6 +94,8 @@ populate_physmap(
    1.24      unsigned long i, j;
    1.25      xen_pfn_t gpfn;
    1.26      xen_pfn_t mfn;
    1.27 +    /* use domain's first processor for locality parameter */
    1.28 +    unsigned int cpu = d->vcpu[0]->processor;
    1.29  
    1.30      if ( !guest_handle_okay(extent_list, nr_extents) )
    1.31          return 0;
    1.32 @@ -111,8 +115,8 @@ populate_physmap(
    1.33          if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) )
    1.34              goto out;
    1.35  
    1.36 -        if ( unlikely((page = alloc_domheap_pages(
    1.37 -            d, extent_order, memflags)) == NULL) )
    1.38 +        if ( unlikely((page = __alloc_domheap_pages( d, cpu, 
    1.39 +            extent_order, memflags )) == NULL) ) 
    1.40          {
    1.41              DPRINTK("Could not allocate order=%d extent: "
    1.42                      "id=%d memflags=%x (%ld of %d)\n",
    1.43 @@ -294,7 +298,7 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
    1.44      unsigned long in_chunk_order, out_chunk_order;
    1.45      xen_pfn_t     gpfn, gmfn, mfn;
    1.46      unsigned long i, j, k;
    1.47 -    unsigned int  memflags = 0;
    1.48 +    unsigned int  memflags = 0, cpu;
    1.49      long          rc = 0;
    1.50      struct domain *d;
    1.51      struct page_info *page;
    1.52 @@ -368,6 +372,9 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
    1.53      }
    1.54      d = current->domain;
    1.55  
    1.56 +    /* use domain's first processor for locality parameter */
    1.57 +    cpu = d->vcpu[0]->processor;
    1.58 +
    1.59      for ( i = 0; i < (exch.in.nr_extents >> in_chunk_order); i++ )
    1.60      {
    1.61          if ( hypercall_preempt_check() )
    1.62 @@ -413,8 +420,8 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
    1.63          /* Allocate a chunk's worth of anonymous output pages. */
    1.64          for ( j = 0; j < (1UL << out_chunk_order); j++ )
    1.65          {
    1.66 -            page = alloc_domheap_pages(
    1.67 -                NULL, exch.out.extent_order, memflags);
    1.68 +            page = __alloc_domheap_pages( NULL, cpu, 
    1.69 +                  exch.out.extent_order, memflags);
    1.70              if ( unlikely(page == NULL) )
    1.71              {
    1.72                  rc = -ENOMEM;