ia64/xen-unstable

changeset 19062:108b45539cda

page_alloc: Clean up free_heap_pages and init_heap_pages interfaces.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 20 14:30:42 2009 +0000 (2009-01-20)
parents 681af1946724
children adee46c3fbfa
files xen/common/page_alloc.c
line diff
     1.1 --- a/xen/common/page_alloc.c	Tue Jan 20 13:57:20 2009 +0000
     1.2 +++ b/xen/common/page_alloc.c	Tue Jan 20 14:30:42 2009 +0000
     1.3 @@ -70,8 +70,6 @@ integer_param("dma_bits", dma_bitsize);
     1.4  #define scrub_page(p) clear_page(p)
     1.5  #endif
     1.6  
     1.7 -#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 0 : ((b) - PAGE_SHIFT - 1))
     1.8 -
     1.9  static DEFINE_SPINLOCK(page_scrub_lock);
    1.10  LIST_HEAD(page_scrub_list);
    1.11  static unsigned long scrub_pages;
    1.12 @@ -262,7 +260,9 @@ unsigned long __init alloc_boot_pages(
    1.13  #define MEMZONE_XEN 0
    1.14  #define NR_ZONES    (PADDR_BITS - PAGE_SHIFT)
    1.15  
    1.16 -#define pfn_dom_zone_type(_pfn) (fls(_pfn) - 1)
    1.17 +#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 0 : ((b) - PAGE_SHIFT - 1))
    1.18 +#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN :  \
    1.19 +                          (fls(page_to_mfn(pg)) - 1))
    1.20  
    1.21  typedef struct list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
    1.22  static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
    1.23 @@ -399,13 +399,13 @@ static struct page_info *alloc_heap_page
    1.24  
    1.25  /* Free 2^@order set of pages. */
    1.26  static void free_heap_pages(
    1.27 -    unsigned int zone, struct page_info *pg, unsigned int order)
    1.28 +    struct page_info *pg, unsigned int order)
    1.29  {
    1.30      unsigned long mask;
    1.31      unsigned int i, node = phys_to_nid(page_to_maddr(pg));
    1.32 +    unsigned int zone = page_to_zone(pg);
    1.33      struct domain *d;
    1.34  
    1.35 -    ASSERT(zone < NR_ZONES);
    1.36      ASSERT(order <= MAX_ORDER);
    1.37      ASSERT(node >= 0);
    1.38      ASSERT(node < num_online_nodes());
    1.39 @@ -484,17 +484,12 @@ static void free_heap_pages(
    1.40   */
    1.41  #define MAX_ORDER_ALIGNED (1UL << (MAX_ORDER))
    1.42  static void init_heap_pages(
    1.43 -    unsigned int zone, struct page_info *pg, unsigned long nr_pages)
    1.44 +    struct page_info *pg, unsigned long nr_pages)
    1.45  {
    1.46      unsigned int nid_curr, nid_prev;
    1.47      unsigned long i;
    1.48  
    1.49 -    ASSERT(zone < NR_ZONES);
    1.50 -
    1.51 -    if ( likely(page_to_mfn(pg) != 0) )
    1.52 -        nid_prev = phys_to_nid(page_to_maddr(pg-1));
    1.53 -    else
    1.54 -        nid_prev = phys_to_nid(page_to_maddr(pg));
    1.55 +    nid_prev = phys_to_nid(page_to_maddr(pg-1));
    1.56  
    1.57      for ( i = 0; i < nr_pages; i++ )
    1.58      {
    1.59 @@ -509,7 +504,7 @@ static void init_heap_pages(
    1.60           */
    1.61           if ( (nid_curr == nid_prev) || (page_to_maddr(pg+i) &
    1.62                                           MAX_ORDER_ALIGNED) )
    1.63 -             free_heap_pages(zone, pg+i, 0);
    1.64 +             free_heap_pages(pg+i, 0);
    1.65           else
    1.66               printk("Reserving non-aligned node boundary @ mfn %lu\n",
    1.67                      page_to_mfn(pg+i));
    1.68 @@ -555,7 +550,7 @@ void __init end_boot_allocator(void)
    1.69          if ( next_free )
    1.70              map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
    1.71          if ( curr_free )
    1.72 -            init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1);
    1.73 +            init_heap_pages(mfn_to_page(i), 1);
    1.74      }
    1.75  
    1.76      if ( !dma_bitsize && (num_online_nodes() > 1) )
    1.77 @@ -656,7 +651,7 @@ void init_xenheap_pages(paddr_t ps, padd
    1.78      if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
    1.79          pe -= PAGE_SIZE;
    1.80  
    1.81 -    init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
    1.82 +    init_heap_pages(maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
    1.83  }
    1.84  
    1.85  
    1.86 @@ -690,7 +685,7 @@ void free_xenheap_pages(void *v, unsigne
    1.87  
    1.88      memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
    1.89  
    1.90 -    free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
    1.91 +    free_heap_pages(virt_to_page(v), order);
    1.92  }
    1.93  
    1.94  #else
    1.95 @@ -738,7 +733,7 @@ void free_xenheap_pages(void *v, unsigne
    1.96      for ( i = 0; i < (1u << order); i++ )
    1.97          pg[i].count_info &= ~PGC_xen_heap;
    1.98  
    1.99 -    free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
   1.100 +    free_heap_pages(pg, order);
   1.101  }
   1.102  
   1.103  #endif
   1.104 @@ -751,28 +746,14 @@ void free_xenheap_pages(void *v, unsigne
   1.105  
   1.106  void init_domheap_pages(paddr_t ps, paddr_t pe)
   1.107  {
   1.108 -    unsigned long s_tot, e_tot;
   1.109 -    unsigned int zone;
   1.110 +    unsigned long smfn, emfn;
   1.111  
   1.112      ASSERT(!in_irq());
   1.113  
   1.114 -    s_tot = round_pgup(ps) >> PAGE_SHIFT;
   1.115 -    e_tot = round_pgdown(pe) >> PAGE_SHIFT;
   1.116 -
   1.117 -    zone = fls(s_tot) - 1;
   1.118 -    BUG_ON(zone <= MEMZONE_XEN);
   1.119 +    smfn = round_pgup(ps) >> PAGE_SHIFT;
   1.120 +    emfn = round_pgdown(pe) >> PAGE_SHIFT;
   1.121  
   1.122 -    while ( s_tot < e_tot )
   1.123 -    {
   1.124 -        unsigned long end = e_tot;
   1.125 -
   1.126 -        BUILD_BUG_ON(NR_ZONES > BITS_PER_LONG);
   1.127 -        if ( zone < BITS_PER_LONG - 1 && end > 1UL << (zone + 1) )
   1.128 -            end = 1UL << (zone + 1);
   1.129 -        init_heap_pages(zone, mfn_to_page(s_tot), end - s_tot);
   1.130 -        s_tot = end;
   1.131 -        zone++;
   1.132 -    }
   1.133 +    init_heap_pages(mfn_to_page(smfn), emfn - smfn);
   1.134  }
   1.135  
   1.136  
   1.137 @@ -853,7 +834,7 @@ struct page_info *alloc_domheap_pages(
   1.138  
   1.139      if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
   1.140      {
   1.141 -        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
   1.142 +        free_heap_pages(pg, order);
   1.143          return NULL;
   1.144      }
   1.145      
   1.146 @@ -898,7 +879,7 @@ void free_domheap_pages(struct page_info
   1.147  
   1.148          if ( likely(!d->is_dying) )
   1.149          {
   1.150 -            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
   1.151 +            free_heap_pages(pg, order);
   1.152          }
   1.153          else
   1.154          {
   1.155 @@ -920,7 +901,7 @@ void free_domheap_pages(struct page_info
   1.156      else
   1.157      {
   1.158          /* Freeing anonymous domain-heap pages. */
   1.159 -        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
   1.160 +        free_heap_pages(pg, order);
   1.161          drop_dom_ref = 0;
   1.162      }
   1.163  
   1.164 @@ -1041,7 +1022,7 @@ static void page_scrub_softirq(void)
   1.165              p = map_domain_page(page_to_mfn(pg));
   1.166              scrub_page(p);
   1.167              unmap_domain_page(p);
   1.168 -            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
   1.169 +            free_heap_pages(pg, 0);
   1.170          }
   1.171      } while ( (NOW() - start) < MILLISECS(1) );
   1.172