ia64/xen-unstable

changeset 17385:57febe0264e1

xen: Allow NUMA node to be specific to alloc_domheap_pages() via a new
MEMF_node() sub-flag type.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Apr 04 10:48:01 2008 +0100 (2008-04-04)
parents 0834234fd668
children e52bf1822059
files xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/tlb_track.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/hvm/stdvga.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/paging.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/common/memory.c xen/common/page_alloc.c xen/drivers/passthrough/vtd/iommu.c xen/include/asm-x86/numa.h xen/include/xen/mm.h xen/include/xen/numa.h
line diff
     1.1 --- a/xen/arch/ia64/xen/mm.c	Fri Apr 04 10:18:45 2008 +0100
     1.2 +++ b/xen/arch/ia64/xen/mm.c	Fri Apr 04 10:48:01 2008 +0100
     1.3 @@ -820,7 +820,7 @@ static struct page_info *
     1.4  
     1.5      BUG_ON(!pte_none(*pte));
     1.6  
     1.7 -    p = alloc_domheap_page(d);
     1.8 +    p = alloc_domheap_page(d, 0);
     1.9      if (unlikely(!p)) {
    1.10          printk("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    1.11          return(p);
    1.12 @@ -2315,7 +2315,7 @@ steal_page(struct domain *d, struct page
    1.13          unsigned long new_mfn;
    1.14          int ret;
    1.15  
    1.16 -        new = alloc_domheap_page(d);
    1.17 +        new = alloc_domheap_page(d, 0);
    1.18          if (new == NULL) {
    1.19              gdprintk(XENLOG_INFO, "alloc_domheap_page() failed\n");
    1.20              return -1;
    1.21 @@ -2602,7 +2602,7 @@ void *pgtable_quicklist_alloc(void)
    1.22  
    1.23      BUG_ON(dom_p2m == NULL);
    1.24      if (!opt_p2m_xenheap) {
    1.25 -        struct page_info *page = alloc_domheap_page(dom_p2m);
    1.26 +        struct page_info *page = alloc_domheap_page(dom_p2m, 0);
    1.27          if (page == NULL)
    1.28              return NULL;
    1.29          p = page_to_virt(page);
     2.1 --- a/xen/arch/ia64/xen/tlb_track.c	Fri Apr 04 10:18:45 2008 +0100
     2.2 +++ b/xen/arch/ia64/xen/tlb_track.c	Fri Apr 04 10:48:01 2008 +0100
     2.3 @@ -48,7 +48,7 @@ tlb_track_allocate_entries(struct tlb_tr
     2.4                  __func__, tlb_track->num_entries, tlb_track->limit);
     2.5          return -ENOMEM;
     2.6      }
     2.7 -    entry_page = alloc_domheap_page(NULL);
     2.8 +    entry_page = alloc_domheap_page(NULL, 0);
     2.9      if (entry_page == NULL) {
    2.10          dprintk(XENLOG_WARNING,
    2.11                  "%s: domheap page failed. num_entries %d limit %d\n",
    2.12 @@ -84,7 +84,7 @@ tlb_track_create(struct domain* d)
    2.13      if (tlb_track == NULL)
    2.14          goto out;
    2.15  
    2.16 -    hash_page = alloc_domheap_page(NULL);
    2.17 +    hash_page = alloc_domheap_page(NULL, 0);
    2.18      if (hash_page == NULL)
    2.19          goto out;
    2.20  
     3.1 --- a/xen/arch/x86/domain.c	Fri Apr 04 10:18:45 2008 +0100
     3.2 +++ b/xen/arch/x86/domain.c	Fri Apr 04 10:48:01 2008 +0100
     3.3 @@ -46,6 +46,7 @@
     3.4  #include <asm/debugreg.h>
     3.5  #include <asm/msr.h>
     3.6  #include <asm/nmi.h>
     3.7 +#include <xen/numa.h>
     3.8  #include <xen/iommu.h>
     3.9  #ifdef CONFIG_COMPAT
    3.10  #include <compat/vcpu.h>
    3.11 @@ -171,7 +172,7 @@ int setup_arg_xlat_area(struct vcpu *v, 
    3.12  
    3.13      if ( !d->arch.mm_arg_xlat_l3 )
    3.14      {
    3.15 -        pg = alloc_domheap_page(NULL);
    3.16 +        pg = alloc_domheap_page(NULL, 0);
    3.17          if ( !pg )
    3.18              return -ENOMEM;
    3.19          d->arch.mm_arg_xlat_l3 = page_to_virt(pg);
    3.20 @@ -189,7 +190,7 @@ int setup_arg_xlat_area(struct vcpu *v, 
    3.21  
    3.22          if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) )
    3.23          {
    3.24 -            pg = alloc_domheap_page(NULL);
    3.25 +            pg = alloc_domheap_page(NULL, 0);
    3.26              if ( !pg )
    3.27                  return -ENOMEM;
    3.28              clear_page(page_to_virt(pg));
    3.29 @@ -198,7 +199,7 @@ int setup_arg_xlat_area(struct vcpu *v, 
    3.30          l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]);
    3.31          if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) )
    3.32          {
    3.33 -            pg = alloc_domheap_page(NULL);
    3.34 +            pg = alloc_domheap_page(NULL, 0);
    3.35              if ( !pg )
    3.36                  return -ENOMEM;
    3.37              clear_page(page_to_virt(pg));
    3.38 @@ -206,7 +207,7 @@ int setup_arg_xlat_area(struct vcpu *v, 
    3.39          }
    3.40          l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]);
    3.41          BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)]));
    3.42 -        pg = alloc_domheap_page(NULL);
    3.43 +        pg = alloc_domheap_page(NULL, 0);
    3.44          if ( !pg )
    3.45              return -ENOMEM;
    3.46          l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR);
    3.47 @@ -252,7 +253,7 @@ static void release_arg_xlat_area(struct
    3.48  
    3.49  static int setup_compat_l4(struct vcpu *v)
    3.50  {
    3.51 -    struct page_info *pg = alloc_domheap_page(NULL);
    3.52 +    struct page_info *pg = alloc_domheap_page(NULL, 0);
    3.53      l4_pgentry_t *l4tab;
    3.54      int rc;
    3.55  
    3.56 @@ -477,7 +478,8 @@ int arch_domain_create(struct domain *d,
    3.57  
    3.58  #else /* __x86_64__ */
    3.59  
    3.60 -    if ( (pg = alloc_domheap_page(NULL)) == NULL )
    3.61 +    pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
    3.62 +    if ( pg == NULL )
    3.63          goto fail;
    3.64      d->arch.mm_perdomain_l2 = page_to_virt(pg);
    3.65      clear_page(d->arch.mm_perdomain_l2);
    3.66 @@ -486,7 +488,8 @@ int arch_domain_create(struct domain *d,
    3.67              l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
    3.68                            __PAGE_HYPERVISOR);
    3.69  
    3.70 -    if ( (pg = alloc_domheap_page(NULL)) == NULL )
    3.71 +    pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
    3.72 +    if ( pg == NULL )
    3.73          goto fail;
    3.74      d->arch.mm_perdomain_l3 = page_to_virt(pg);
    3.75      clear_page(d->arch.mm_perdomain_l3);
     4.1 --- a/xen/arch/x86/domain_build.c	Fri Apr 04 10:18:45 2008 +0100
     4.2 +++ b/xen/arch/x86/domain_build.c	Fri Apr 04 10:48:01 2008 +0100
     4.3 @@ -630,7 +630,7 @@ int __init construct_dom0(
     4.4      }
     4.5      else
     4.6      {
     4.7 -        page = alloc_domheap_page(NULL);
     4.8 +        page = alloc_domheap_page(NULL, 0);
     4.9          if ( !page )
    4.10              panic("Not enough RAM for domain 0 PML4.\n");
    4.11          l4start = l4tab = page_to_virt(page);
     5.1 --- a/xen/arch/x86/hvm/stdvga.c	Fri Apr 04 10:18:45 2008 +0100
     5.2 +++ b/xen/arch/x86/hvm/stdvga.c	Fri Apr 04 10:48:01 2008 +0100
     5.3 @@ -32,6 +32,7 @@
     5.4  #include <xen/sched.h>
     5.5  #include <xen/domain_page.h>
     5.6  #include <asm/hvm/support.h>
     5.7 +#include <xen/numa.h>
     5.8  
     5.9  #define PAT(x) (x)
    5.10  static const uint32_t mask16[16] = {
    5.11 @@ -513,7 +514,8 @@ void stdvga_init(struct domain *d)
    5.12      
    5.13      for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
    5.14      {
    5.15 -        if ( (pg = alloc_domheap_page(NULL)) == NULL )
    5.16 +        pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
    5.17 +        if ( pg == NULL )
    5.18              break;
    5.19          s->vram_page[i] = pg;
    5.20          p = map_domain_page(page_to_mfn(pg));
     6.1 --- a/xen/arch/x86/hvm/vlapic.c	Fri Apr 04 10:18:45 2008 +0100
     6.2 +++ b/xen/arch/x86/hvm/vlapic.c	Fri Apr 04 10:48:01 2008 +0100
     6.3 @@ -33,6 +33,7 @@
     6.4  #include <xen/sched.h>
     6.5  #include <asm/current.h>
     6.6  #include <asm/hvm/vmx/vmx.h>
     6.7 +#include <xen/numa.h>
     6.8  #include <public/hvm/ioreq.h>
     6.9  #include <public/hvm/params.h>
    6.10  
    6.11 @@ -916,7 +917,7 @@ HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, la
    6.12  int vlapic_init(struct vcpu *v)
    6.13  {
    6.14      struct vlapic *vlapic = vcpu_vlapic(v);
    6.15 -    unsigned int memflags = 0;
    6.16 +    unsigned int memflags = MEMF_node(vcpu_to_node(v));
    6.17  
    6.18      HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
    6.19  
    6.20 @@ -925,10 +926,10 @@ int vlapic_init(struct vcpu *v)
    6.21  #ifdef __i386__
    6.22      /* 32-bit VMX may be limited to 32-bit physical addresses. */
    6.23      if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
    6.24 -        memflags = MEMF_bits(32);
    6.25 +        memflags |= MEMF_bits(32);
    6.26  #endif
    6.27  
    6.28 -    vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
    6.29 +    vlapic->regs_page = alloc_domheap_page(NULL, memflags);
    6.30      if ( vlapic->regs_page == NULL )
    6.31      {
    6.32          dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
    6.33 @@ -941,7 +942,7 @@ int vlapic_init(struct vcpu *v)
    6.34      {
    6.35          dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
    6.36                  v->domain->domain_id, v->vcpu_id);
    6.37 -	return -ENOMEM;
    6.38 +        return -ENOMEM;
    6.39      }
    6.40  
    6.41      clear_page(vlapic->regs);
     7.1 --- a/xen/arch/x86/mm/hap/hap.c	Fri Apr 04 10:18:45 2008 +0100
     7.2 +++ b/xen/arch/x86/mm/hap/hap.c	Fri Apr 04 10:48:01 2008 +0100
     7.3 @@ -38,6 +38,7 @@
     7.4  #include <asm/hap.h>
     7.5  #include <asm/paging.h>
     7.6  #include <asm/domain.h>
     7.7 +#include <xen/numa.h>
     7.8  
     7.9  #include "private.h"
    7.10  
    7.11 @@ -135,7 +136,8 @@ static struct page_info *hap_alloc_p2m_p
    7.12           && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
    7.13      {
    7.14          free_domheap_page(pg);
    7.15 -        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
    7.16 +        pg = alloc_domheap_page(
    7.17 +            NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
    7.18          if ( likely(pg != NULL) )
    7.19          {
    7.20              void *p = hap_map_domain_page(page_to_mfn(pg));
    7.21 @@ -199,7 +201,7 @@ hap_set_allocation(struct domain *d, uns
    7.22          if ( d->arch.paging.hap.total_pages < pages )
    7.23          {
    7.24              /* Need to allocate more memory from domheap */
    7.25 -            pg = alloc_domheap_page(NULL);
    7.26 +            pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
    7.27              if ( pg == NULL )
    7.28              {
    7.29                  HAP_PRINTK("failed to allocate hap pages.\n");
     8.1 --- a/xen/arch/x86/mm/paging.c	Fri Apr 04 10:18:45 2008 +0100
     8.2 +++ b/xen/arch/x86/mm/paging.c	Fri Apr 04 10:48:01 2008 +0100
     8.3 @@ -26,6 +26,7 @@
     8.4  #include <asm/p2m.h>
     8.5  #include <asm/hap.h>
     8.6  #include <asm/guest_access.h>
     8.7 +#include <xen/numa.h>
     8.8  #include <xsm/xsm.h>
     8.9  
    8.10  #define hap_enabled(d) (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
    8.11 @@ -99,8 +100,9 @@
    8.12  static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
    8.13  {
    8.14      mfn_t mfn;
    8.15 -    struct page_info *page = alloc_domheap_page(NULL);
    8.16 +    struct page_info *page;
    8.17  
    8.18 +    page = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
    8.19      if ( unlikely(page == NULL) )
    8.20      {
    8.21          d->arch.paging.log_dirty.failed_allocs++;
     9.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Apr 04 10:18:45 2008 +0100
     9.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Apr 04 10:48:01 2008 +0100
     9.3 @@ -36,6 +36,7 @@
     9.4  #include <asm/current.h>
     9.5  #include <asm/flushtlb.h>
     9.6  #include <asm/shadow.h>
     9.7 +#include <xen/numa.h>
     9.8  #include "private.h"
     9.9  
    9.10  
    9.11 @@ -1249,7 +1250,7 @@ static unsigned int sh_set_allocation(st
    9.12          {
    9.13              /* Need to allocate more memory from domheap */
    9.14              sp = (struct shadow_page_info *)
    9.15 -                alloc_domheap_pages(NULL, order, 0);
    9.16 +                alloc_domheap_pages(NULL, order, MEMF_node(domain_to_node(d)));
    9.17              if ( sp == NULL ) 
    9.18              { 
    9.19                  SHADOW_PRINTK("failed to allocate shadow pages.\n");
    10.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Apr 04 10:18:45 2008 +0100
    10.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Apr 04 10:48:01 2008 +0100
    10.3 @@ -59,7 +59,7 @@ void *alloc_xen_pagetable(void)
    10.4  
    10.5      if ( !early_boot )
    10.6      {
    10.7 -        struct page_info *pg = alloc_domheap_page(NULL);
    10.8 +        struct page_info *pg = alloc_domheap_page(NULL, 0);
    10.9          BUG_ON(pg == NULL);
   10.10          return page_to_virt(pg);
   10.11      }
   10.12 @@ -108,7 +108,7 @@ void __init paging_init(void)
   10.13      struct page_info *l1_pg, *l2_pg, *l3_pg;
   10.14  
   10.15      /* Create user-accessible L2 directory to map the MPT for guests. */
   10.16 -    if ( (l3_pg = alloc_domheap_page(NULL)) == NULL )
   10.17 +    if ( (l3_pg = alloc_domheap_page(NULL, 0)) == NULL )
   10.18          goto nomem;
   10.19      l3_ro_mpt = page_to_virt(l3_pg);
   10.20      clear_page(l3_ro_mpt);
   10.21 @@ -134,7 +134,7 @@ void __init paging_init(void)
   10.22                 1UL << L2_PAGETABLE_SHIFT);
   10.23          if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
   10.24          {
   10.25 -            if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
   10.26 +            if ( (l2_pg = alloc_domheap_page(NULL, 0)) == NULL )
   10.27                  goto nomem;
   10.28              va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
   10.29              l2_ro_mpt = page_to_virt(l2_pg);
   10.30 @@ -154,7 +154,7 @@ void __init paging_init(void)
   10.31                   l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
   10.32      l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(
   10.33          HIRO_COMPAT_MPT_VIRT_START)]);
   10.34 -    if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
   10.35 +    if ( (l2_pg = alloc_domheap_page(NULL, 0)) == NULL )
   10.36          goto nomem;
   10.37      compat_idle_pg_table_l2 = l2_ro_mpt = page_to_virt(l2_pg);
   10.38      clear_page(l2_ro_mpt);
    11.1 --- a/xen/common/grant_table.c	Fri Apr 04 10:18:45 2008 +0100
    11.2 +++ b/xen/common/grant_table.c	Fri Apr 04 10:48:01 2008 +0100
    11.3 @@ -1102,7 +1102,7 @@ gnttab_transfer(
    11.4              struct page_info *new_page;
    11.5              void *sp, *dp;
    11.6  
    11.7 -            new_page = alloc_domheap_pages(NULL, 0, MEMF_bits(max_bitsize));
    11.8 +            new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
    11.9              if ( new_page == NULL )
   11.10              {
   11.11                  gop.status = GNTST_address_too_big;
    12.1 --- a/xen/common/memory.c	Fri Apr 04 10:18:45 2008 +0100
    12.2 +++ b/xen/common/memory.c	Fri Apr 04 10:48:01 2008 +0100
    12.3 @@ -21,6 +21,7 @@
    12.4  #include <xen/errno.h>
    12.5  #include <asm/current.h>
    12.6  #include <asm/hardirq.h>
    12.7 +#include <xen/numa.h>
    12.8  #include <public/memory.h>
    12.9  #include <xsm/xsm.h>
   12.10  
   12.11 @@ -37,19 +38,13 @@ struct memop_args {
   12.12      int          preempted;  /* Was the hypercall preempted? */
   12.13  };
   12.14  
   12.15 -static unsigned int select_local_cpu(struct domain *d)
   12.16 -{
   12.17 -    struct vcpu *v = d->vcpu[0];
   12.18 -    return (v ? v->processor : 0);
   12.19 -}
   12.20 -
   12.21  static void increase_reservation(struct memop_args *a)
   12.22  {
   12.23      struct page_info *page;
   12.24      unsigned long i;
   12.25      xen_pfn_t mfn;
   12.26      struct domain *d = a->domain;
   12.27 -    unsigned int cpu = select_local_cpu(d);
   12.28 +    unsigned int node = domain_to_node(d);
   12.29  
   12.30      if ( !guest_handle_is_null(a->extent_list) &&
   12.31           !guest_handle_okay(a->extent_list, a->nr_extents) )
   12.32 @@ -67,7 +62,8 @@ static void increase_reservation(struct 
   12.33              goto out;
   12.34          }
   12.35  
   12.36 -        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
   12.37 +        page = alloc_domheap_pages(
   12.38 +            d, a->extent_order, a->memflags | MEMF_node(node));
   12.39          if ( unlikely(page == NULL) ) 
   12.40          {
   12.41              gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
   12.42 @@ -96,7 +92,7 @@ static void populate_physmap(struct memo
   12.43      unsigned long i, j;
   12.44      xen_pfn_t gpfn, mfn;
   12.45      struct domain *d = a->domain;
   12.46 -    unsigned int cpu = select_local_cpu(d);
   12.47 +    unsigned int node = domain_to_node(d);
   12.48  
   12.49      if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
   12.50          return;
   12.51 @@ -116,7 +112,8 @@ static void populate_physmap(struct memo
   12.52          if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
   12.53              goto out;
   12.54  
   12.55 -        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
   12.56 +        page = alloc_domheap_pages(
   12.57 +            d, a->extent_order, a->memflags | MEMF_node(node));
   12.58          if ( unlikely(page == NULL) ) 
   12.59          {
   12.60              gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
   12.61 @@ -296,7 +293,7 @@ static long memory_exchange(XEN_GUEST_HA
   12.62      unsigned long in_chunk_order, out_chunk_order;
   12.63      xen_pfn_t     gpfn, gmfn, mfn;
   12.64      unsigned long i, j, k;
   12.65 -    unsigned int  memflags = 0, cpu;
   12.66 +    unsigned int  memflags = 0;
   12.67      long          rc = 0;
   12.68      struct domain *d;
   12.69      struct page_info *page;
   12.70 @@ -351,8 +348,7 @@ static long memory_exchange(XEN_GUEST_HA
   12.71  
   12.72      memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
   12.73          d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
   12.74 -
   12.75 -    cpu = select_local_cpu(d);
   12.76 +    memflags |= MEMF_node(domain_to_node(d));
   12.77  
   12.78      for ( i = (exch.nr_exchanged >> in_chunk_order);
   12.79            i < (exch.in.nr_extents >> in_chunk_order);
   12.80 @@ -401,8 +397,7 @@ static long memory_exchange(XEN_GUEST_HA
   12.81          /* Allocate a chunk's worth of anonymous output pages. */
   12.82          for ( j = 0; j < (1UL << out_chunk_order); j++ )
   12.83          {
   12.84 -            page = __alloc_domheap_pages(
   12.85 -                NULL, cpu, exch.out.extent_order, memflags);
   12.86 +            page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
   12.87              if ( unlikely(page == NULL) )
   12.88              {
   12.89                  rc = -ENOMEM;
    13.1 --- a/xen/common/page_alloc.c	Fri Apr 04 10:18:45 2008 +0100
    13.2 +++ b/xen/common/page_alloc.c	Fri Apr 04 10:48:01 2008 +0100
    13.3 @@ -36,6 +36,7 @@
    13.4  #include <xen/numa.h>
    13.5  #include <xen/nodemask.h>
    13.6  #include <asm/page.h>
    13.7 +#include <asm/numa.h>
    13.8  #include <asm/flushtlb.h>
    13.9  
   13.10  /*
   13.11 @@ -328,14 +329,17 @@ static void init_node_heap(int node)
   13.12  /* Allocate 2^@order contiguous pages. */
   13.13  static struct page_info *alloc_heap_pages(
   13.14      unsigned int zone_lo, unsigned int zone_hi,
   13.15 -    unsigned int cpu, unsigned int order)
   13.16 +    unsigned int node, unsigned int order)
   13.17  {
   13.18      unsigned int i, j, zone;
   13.19 -    unsigned int node = cpu_to_node(cpu), num_nodes = num_online_nodes();
   13.20 +    unsigned int num_nodes = num_online_nodes();
   13.21      unsigned long request = 1UL << order;
   13.22      cpumask_t extra_cpus_mask, mask;
   13.23      struct page_info *pg;
   13.24  
   13.25 +    if ( node == NUMA_NO_NODE )
   13.26 +        node = cpu_to_node(smp_processor_id());
   13.27 +
   13.28      ASSERT(node >= 0);
   13.29      ASSERT(node < num_nodes);
   13.30      ASSERT(zone_lo <= zone_hi);
   13.31 @@ -670,7 +674,8 @@ void *alloc_xenheap_pages(unsigned int o
   13.32  
   13.33      ASSERT(!in_irq());
   13.34  
   13.35 -    pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, smp_processor_id(), order);
   13.36 +    pg = alloc_heap_pages(
   13.37 +        MEMZONE_XEN, MEMZONE_XEN, cpu_to_node(smp_processor_id()), order);
   13.38      if ( unlikely(pg == NULL) )
   13.39          goto no_memory;
   13.40  
   13.41 @@ -778,12 +783,12 @@ int assign_pages(
   13.42  }
   13.43  
   13.44  
   13.45 -struct page_info *__alloc_domheap_pages(
   13.46 -    struct domain *d, unsigned int cpu, unsigned int order, 
   13.47 -    unsigned int memflags)
   13.48 +struct page_info *alloc_domheap_pages(
   13.49 +    struct domain *d, unsigned int order, unsigned int memflags)
   13.50  {
   13.51      struct page_info *pg = NULL;
   13.52      unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
   13.53 +    unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
   13.54  
   13.55      ASSERT(!in_irq());
   13.56  
   13.57 @@ -797,7 +802,7 @@ struct page_info *__alloc_domheap_pages(
   13.58  
   13.59      if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
   13.60      {
   13.61 -        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
   13.62 +        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, node, order);
   13.63  
   13.64          /* Failure? Then check if we can fall back to the DMA pool. */
   13.65          if ( unlikely(pg == NULL) &&
   13.66 @@ -811,7 +816,7 @@ struct page_info *__alloc_domheap_pages(
   13.67  
   13.68      if ( (pg == NULL) &&
   13.69           ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
   13.70 -                                 cpu, order)) == NULL) )
   13.71 +                                 node, order)) == NULL) )
   13.72           return NULL;
   13.73  
   13.74      if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
   13.75 @@ -823,12 +828,6 @@ struct page_info *__alloc_domheap_pages(
   13.76      return pg;
   13.77  }
   13.78  
   13.79 -struct page_info *alloc_domheap_pages(
   13.80 -    struct domain *d, unsigned int order, unsigned int flags)
   13.81 -{
   13.82 -    return __alloc_domheap_pages(d, smp_processor_id(), order, flags);
   13.83 -}
   13.84 -
   13.85  void free_domheap_pages(struct page_info *pg, unsigned int order)
   13.86  {
   13.87      int            i, drop_dom_ref;
    14.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Fri Apr 04 10:18:45 2008 +0100
    14.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Fri Apr 04 10:48:01 2008 +0100
    14.3 @@ -24,6 +24,7 @@
    14.4  #include <xen/xmalloc.h>
    14.5  #include <xen/domain_page.h>
    14.6  #include <xen/iommu.h>
    14.7 +#include <xen/numa.h>
    14.8  #include "iommu.h"
    14.9  #include "dmar.h"
   14.10  #include "../pci-direct.h"
   14.11 @@ -269,7 +270,8 @@ static struct page_info *addr_to_dma_pag
   14.12  
   14.13          if ( dma_pte_addr(*pte) == 0 )
   14.14          {
   14.15 -            pg = alloc_domheap_page(NULL);
   14.16 +            pg = alloc_domheap_page(
   14.17 +                NULL, MEMF_node(domain_to_node(domain)));
   14.18              vaddr = map_domain_page(page_to_mfn(pg));
   14.19              if ( !vaddr )
   14.20              {
    15.1 --- a/xen/include/asm-x86/numa.h	Fri Apr 04 10:18:45 2008 +0100
    15.2 +++ b/xen/include/asm-x86/numa.h	Fri Apr 04 10:48:01 2008 +0100
    15.3 @@ -73,6 +73,5 @@ static inline __attribute__((pure)) int 
    15.4  #define clear_node_cpumask(cpu) do {} while (0)
    15.5  #endif
    15.6  
    15.7 -#define NUMA_NO_NODE 0xff
    15.8  
    15.9  #endif
    16.1 --- a/xen/include/xen/mm.h	Fri Apr 04 10:18:45 2008 +0100
    16.2 +++ b/xen/include/xen/mm.h	Fri Apr 04 10:48:01 2008 +0100
    16.3 @@ -54,14 +54,11 @@ void free_xenheap_pages(void *v, unsigne
    16.4  void init_domheap_pages(paddr_t ps, paddr_t pe);
    16.5  struct page_info *alloc_domheap_pages(
    16.6      struct domain *d, unsigned int order, unsigned int memflags);
    16.7 -struct page_info *__alloc_domheap_pages(
    16.8 -    struct domain *d, unsigned int cpu, unsigned int order, 
    16.9 -    unsigned int memflags);
   16.10  void free_domheap_pages(struct page_info *pg, unsigned int order);
   16.11  unsigned long avail_domheap_pages_region(
   16.12      unsigned int node, unsigned int min_width, unsigned int max_width);
   16.13  unsigned long avail_domheap_pages(void);
   16.14 -#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
   16.15 +#define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
   16.16  #define free_domheap_page(p)  (free_domheap_pages(p,0))
   16.17  
   16.18  void scrub_heap_pages(void);
   16.19 @@ -75,6 +72,8 @@ int assign_pages(
   16.20  /* memflags: */
   16.21  #define _MEMF_no_refcount 0
   16.22  #define  MEMF_no_refcount (1U<<_MEMF_no_refcount)
   16.23 +#define _MEMF_node        8
   16.24 +#define  MEMF_node(n)     ((((n)+1)&0xff)<<_MEMF_node)
   16.25  #define _MEMF_bits        24
   16.26  #define  MEMF_bits(n)     ((n)<<_MEMF_bits)
   16.27  
    17.1 --- a/xen/include/xen/numa.h	Fri Apr 04 10:18:45 2008 +0100
    17.2 +++ b/xen/include/xen/numa.h	Fri Apr 04 10:48:01 2008 +0100
    17.3 @@ -8,6 +8,13 @@
    17.4  #define NODES_SHIFT     0
    17.5  #endif
    17.6  
    17.7 +#define NUMA_NO_NODE    0xFF
    17.8 +
    17.9  #define MAX_NUMNODES    (1 << NODES_SHIFT)
   17.10  
   17.11 +#define vcpu_to_node(v) (cpu_to_node[(v)->processor])
   17.12 +
   17.13 +#define domain_to_node(d) \
   17.14 +  (((d)->vcpu[0] != NULL) ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
   17.15 +
   17.16  #endif /* _XEN_NUMA_H */