ia64/xen-unstable

changeset 14098:c64aa7fb7712

xen memory allocator: hide generic allocator routines

This patch doesn't introduce functional changes, but simply moves code
around to make the unused (outside of the page allocator) heap alloc
functions taking an explicit zone parameter static without having to
forward-prototype them in their source file.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Fri Feb 23 16:55:56 2007 +0000 (2007-02-23)
parents 6253b8d32eb9
children b6df5e64b6c4
files xen/common/page_alloc.c xen/include/xen/mm.h
line diff
     1.1 --- a/xen/common/page_alloc.c	Fri Feb 23 16:36:55 2007 +0000
     1.2 +++ b/xen/common/page_alloc.c	Fri Feb 23 16:55:56 2007 +0000
     1.3 @@ -322,78 +322,11 @@ static unsigned long avail[NR_ZONES][MAX
     1.4  
     1.5  static DEFINE_SPINLOCK(heap_lock);
     1.6  
     1.7 -void end_boot_allocator(void)
     1.8 +/* Allocate 2^@order contiguous pages. */
     1.9 +static struct page_info *alloc_heap_pages(
    1.10 +    unsigned int zone, unsigned int cpu, unsigned int order)
    1.11  {
    1.12 -    unsigned long i, j, k;
    1.13 -    int curr_free, next_free;
    1.14 -
    1.15 -    memset(avail, 0, sizeof(avail));
    1.16 -
    1.17 -    for ( i = 0; i < NR_ZONES; i++ )
    1.18 -        for ( j = 0; j < MAX_NUMNODES; j++ )
    1.19 -            for ( k = 0; k <= MAX_ORDER; k++ )
    1.20 -                INIT_LIST_HEAD(&heap[i][j][k]);
    1.21 -
    1.22 -    /* Pages that are free now go to the domain sub-allocator. */
    1.23 -    if ( (curr_free = next_free = !allocated_in_map(first_valid_mfn)) )
    1.24 -        map_alloc(first_valid_mfn, 1);
    1.25 -    for ( i = first_valid_mfn; i < max_page; i++ )
    1.26 -    {
    1.27 -        curr_free = next_free;
    1.28 -        next_free = !allocated_in_map(i+1);
    1.29 -        if ( next_free )
    1.30 -            map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
    1.31 -        if ( curr_free )
    1.32 -            init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1);
    1.33 -    }
    1.34 -
    1.35 -    printk("Domain heap initialised: DMA width %u bits\n", dma_bitsize);
    1.36 -}
    1.37 -
    1.38 -/* 
    1.39 - * Hand the specified arbitrary page range to the specified heap zone
    1.40 - * checking the node_id of the previous page.  If they differ and the
    1.41 - * latter is not on a MAX_ORDER boundary, then we reserve the page by
    1.42 - * not freeing it to the buddy allocator.
    1.43 - */
    1.44 -#define MAX_ORDER_ALIGNED (1UL << (MAX_ORDER))
    1.45 -void init_heap_pages(
    1.46 -    unsigned int zone, struct page_info *pg, unsigned long nr_pages)
    1.47 -{
    1.48 -    unsigned int nid_curr, nid_prev;
    1.49 -    unsigned long i;
    1.50 -
    1.51 -    ASSERT(zone < NR_ZONES);
    1.52 -
    1.53 -    if ( likely(page_to_mfn(pg) != 0) )
    1.54 -        nid_prev = phys_to_nid(page_to_maddr(pg-1));
    1.55 -    else
    1.56 -        nid_prev = phys_to_nid(page_to_maddr(pg));
    1.57 -
    1.58 -    for ( i = 0; i < nr_pages; i++ )
    1.59 -    {
    1.60 -        nid_curr = phys_to_nid(page_to_maddr(pg+i));
    1.61 -
    1.62 -        /*
    1.63 -         * free pages of the same node, or if they differ, but are on a
    1.64 -         * MAX_ORDER alignement boundary (which already get reserved)
    1.65 -         */
    1.66 -         if ( (nid_curr == nid_prev) || (page_to_maddr(pg+i) &
    1.67 -                                         MAX_ORDER_ALIGNED) )
    1.68 -             free_heap_pages(zone, pg+i, 0);
    1.69 -         else
    1.70 -             printk("Reserving non-aligned node boundary @ mfn %lu\n",
    1.71 -                    page_to_mfn(pg+i));
    1.72 -
    1.73 -        nid_prev = nid_curr;
    1.74 -    }
    1.75 -}
    1.76 -
    1.77 -/* Allocate 2^@order contiguous pages. */
    1.78 -struct page_info *alloc_heap_pages(unsigned int zone, unsigned int cpu,
    1.79 -                                   unsigned int order)
    1.80 -{
    1.81 -    unsigned int i,j, node = cpu_to_node(cpu), num_nodes = num_online_nodes();
    1.82 +    unsigned int i, j, node = cpu_to_node(cpu), num_nodes = num_online_nodes();
    1.83      unsigned int request = (1UL << order);
    1.84      struct page_info *pg;
    1.85  
    1.86 @@ -452,13 +385,12 @@ struct page_info *alloc_heap_pages(unsig
    1.87      return pg;
    1.88  }
    1.89  
    1.90 -
    1.91  /* Free 2^@order set of pages. */
    1.92 -void free_heap_pages(
    1.93 +static void free_heap_pages(
    1.94      unsigned int zone, struct page_info *pg, unsigned int order)
    1.95  {
    1.96      unsigned long mask;
    1.97 -    int node = phys_to_nid(page_to_maddr(pg));
    1.98 +    unsigned int node = phys_to_nid(page_to_maddr(pg));
    1.99  
   1.100      ASSERT(zone < NR_ZONES);
   1.101      ASSERT(order <= MAX_ORDER);
   1.102 @@ -505,6 +437,87 @@ void free_heap_pages(
   1.103      spin_unlock(&heap_lock);
   1.104  }
   1.105  
   1.106 +/*
   1.107 + * Hand the specified arbitrary page range to the specified heap zone
   1.108 + * checking the node_id of the previous page.  If they differ and the
   1.109 + * latter is not on a MAX_ORDER boundary, then we reserve the page by
   1.110 + * not freeing it to the buddy allocator.
   1.111 + */
   1.112 +#define MAX_ORDER_ALIGNED (1UL << (MAX_ORDER))
   1.113 +void init_heap_pages(
   1.114 +    unsigned int zone, struct page_info *pg, unsigned long nr_pages)
   1.115 +{
   1.116 +    unsigned int nid_curr, nid_prev;
   1.117 +    unsigned long i;
   1.118 +
   1.119 +    ASSERT(zone < NR_ZONES);
   1.120 +
   1.121 +    if ( likely(page_to_mfn(pg) != 0) )
   1.122 +        nid_prev = phys_to_nid(page_to_maddr(pg-1));
   1.123 +    else
   1.124 +        nid_prev = phys_to_nid(page_to_maddr(pg));
   1.125 +
   1.126 +    for ( i = 0; i < nr_pages; i++ )
   1.127 +    {
   1.128 +        nid_curr = phys_to_nid(page_to_maddr(pg+i));
   1.129 +
   1.130 +        /*
   1.131 +         * free pages of the same node, or if they differ, but are on a
   1.132 +         * MAX_ORDER alignement boundary (which already get reserved)
   1.133 +         */
   1.134 +         if ( (nid_curr == nid_prev) || (page_to_maddr(pg+i) &
   1.135 +                                         MAX_ORDER_ALIGNED) )
   1.136 +             free_heap_pages(zone, pg+i, 0);
   1.137 +         else
   1.138 +             printk("Reserving non-aligned node boundary @ mfn %lu\n",
   1.139 +                    page_to_mfn(pg+i));
   1.140 +
   1.141 +        nid_prev = nid_curr;
   1.142 +    }
   1.143 +}
   1.144 +
   1.145 +static unsigned long avail_heap_pages(
   1.146 +    int zone, int node)
   1.147 +{
   1.148 +    unsigned int i, j, num_nodes = num_online_nodes();
   1.149 +    unsigned long free_pages = 0;
   1.150 +
   1.151 +    for (i=0; i<NR_ZONES; i++)
   1.152 +        if ( (zone == -1) || (zone == i) )
   1.153 +            for (j=0; j < num_nodes; j++)
   1.154 +                if ( (node == -1) || (node == j) )
   1.155 +                    free_pages += avail[i][j];
   1.156 +
   1.157 +    return free_pages;
   1.158 +}
   1.159 +
   1.160 +void end_boot_allocator(void)
   1.161 +{
   1.162 +    unsigned long i, j, k;
   1.163 +    int curr_free, next_free;
   1.164 +
   1.165 +    memset(avail, 0, sizeof(avail));
   1.166 +
   1.167 +    for ( i = 0; i < NR_ZONES; i++ )
   1.168 +        for ( j = 0; j < MAX_NUMNODES; j++ )
   1.169 +            for ( k = 0; k <= MAX_ORDER; k++ )
   1.170 +                INIT_LIST_HEAD(&heap[i][j][k]);
   1.171 +
   1.172 +    /* Pages that are free now go to the domain sub-allocator. */
   1.173 +    if ( (curr_free = next_free = !allocated_in_map(first_valid_mfn)) )
   1.174 +        map_alloc(first_valid_mfn, 1);
   1.175 +    for ( i = first_valid_mfn; i < max_page; i++ )
   1.176 +    {
   1.177 +        curr_free = next_free;
   1.178 +        next_free = !allocated_in_map(i+1);
   1.179 +        if ( next_free )
   1.180 +            map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
   1.181 +        if ( curr_free )
   1.182 +            init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1);
   1.183 +    }
   1.184 +
   1.185 +    printk("Domain heap initialised: DMA width %u bits\n", dma_bitsize);
   1.186 +}
   1.187  
   1.188  /*
   1.189   * Scrub all unallocated pages in all heap zones. This function is more
   1.190 @@ -769,7 +782,7 @@ struct page_info *__alloc_domheap_pages(
   1.191      return pg;
   1.192  }
   1.193  
   1.194 -inline struct page_info *alloc_domheap_pages(
   1.195 +struct page_info *alloc_domheap_pages(
   1.196      struct domain *d, unsigned int order, unsigned int flags)
   1.197  {
   1.198      return __alloc_domheap_pages(d, smp_processor_id(), order, flags);
   1.199 @@ -848,20 +861,6 @@ void free_domheap_pages(struct page_info
   1.200  }
   1.201  
   1.202  
   1.203 -unsigned long avail_heap_pages(int zone, int node)
   1.204 -{
   1.205 -    int i,j, num_nodes = num_online_nodes();
   1.206 -    unsigned long free_pages = 0;
   1.207 -   
   1.208 -    for (i=0; i<NR_ZONES; i++)
   1.209 -        if ( (zone == -1) || (zone == i) )
   1.210 -            for (j=0; j < num_nodes; j++)
   1.211 -                if ( (node == -1) || (node == j) )
   1.212 -                    free_pages += avail[i][j];            
   1.213 -
   1.214 -    return free_pages;
   1.215 -}
   1.216 -
   1.217  unsigned long avail_domheap_pages(void)
   1.218  {
   1.219      unsigned long avail_nrm, avail_dma;
     2.1 --- a/xen/include/xen/mm.h	Fri Feb 23 16:36:55 2007 +0000
     2.2 +++ b/xen/include/xen/mm.h	Fri Feb 23 16:55:56 2007 +0000
     2.3 @@ -46,15 +46,6 @@ unsigned long alloc_boot_low_pages(
     2.4  int reserve_boot_pages(unsigned long first_pfn, unsigned long nr_pfns);
     2.5  void end_boot_allocator(void);
     2.6  
     2.7 -/* Generic allocator. These functions are *not* interrupt-safe. */
     2.8 -void init_heap_pages(
     2.9 -    unsigned int zone, struct page_info *pg, unsigned long nr_pages);
    2.10 -struct page_info *alloc_heap_pages(
    2.11 -    unsigned int zone, unsigned int cpu, unsigned int order);
    2.12 -void free_heap_pages(
    2.13 -    unsigned int zone, struct page_info *pg, unsigned int order);
    2.14 -void scrub_heap_pages(void);
    2.15 -
    2.16  /* Xen suballocator. These functions are interrupt-safe. */
    2.17  void init_xenheap_pages(paddr_t ps, paddr_t pe);
    2.18  void *alloc_xenheap_pages(unsigned int order);
    2.19 @@ -71,10 +62,11 @@ struct page_info *__alloc_domheap_pages(
    2.20      unsigned int memflags);
    2.21  void free_domheap_pages(struct page_info *pg, unsigned int order);
    2.22  unsigned long avail_domheap_pages(void);
    2.23 -unsigned long avail_heap_pages(int zone, int node);
    2.24  #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
    2.25  #define free_domheap_page(p)  (free_domheap_pages(p,0))
    2.26  
    2.27 +void scrub_heap_pages(void);
    2.28 +
    2.29  int assign_pages(
    2.30      struct domain *d,
    2.31      struct page_info *pg,