ia64/xen-unstable
changeset 14102:70098102f84d
xen memory allocator: per-bit-width heap zones
Replace the 3-zone scheme of the heap allocator with one with one
where zones are distinguished by their bit widths.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Replace the 3-zone scheme of the heap allocator with one with one
where zones are distinguished by their bit widths.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author | kfraser@localhost.localdomain |
---|---|
date | Fri Feb 23 17:01:38 2007 +0000 (2007-02-23) |
parents | b6df5e64b6c4 |
children | ee4850bc895b |
files | xen/common/page_alloc.c |
line diff
1.1 --- a/xen/common/page_alloc.c Fri Feb 23 16:57:34 2007 +0000 1.2 +++ b/xen/common/page_alloc.c Fri Feb 23 17:01:38 2007 +0000 1.3 @@ -53,16 +53,18 @@ unsigned long max_dma_mfn = (1UL << (CON 1.4 static void parse_dma_bits(char *s) 1.5 { 1.6 unsigned int v = simple_strtol(s, NULL, 0); 1.7 - if ( v >= (sizeof(long)*8 + PAGE_SHIFT) ) 1.8 + if ( v >= (BITS_PER_LONG + PAGE_SHIFT) ) 1.9 { 1.10 - dma_bitsize = sizeof(long)*8 + PAGE_SHIFT; 1.11 + dma_bitsize = BITS_PER_LONG + PAGE_SHIFT; 1.12 max_dma_mfn = ~0UL; 1.13 } 1.14 - else 1.15 + else if ( v > PAGE_SHIFT ) 1.16 { 1.17 dma_bitsize = v; 1.18 max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1; 1.19 } 1.20 + else 1.21 + printk("Invalid dma_bits value of %u ignored.\n", v); 1.22 } 1.23 custom_param("dma_bits", parse_dma_bits); 1.24 1.25 @@ -309,12 +311,13 @@ unsigned long alloc_boot_pages( 1.26 */ 1.27 1.28 #define MEMZONE_XEN 0 1.29 -#define MEMZONE_DOM 1 1.30 -#define MEMZONE_DMADOM 2 1.31 -#define NR_ZONES 3 1.32 +#ifdef PADDR_BITS 1.33 +#define NR_ZONES (PADDR_BITS - PAGE_SHIFT) 1.34 +#else 1.35 +#define NR_ZONES (BITS_PER_LONG - PAGE_SHIFT) 1.36 +#endif 1.37 1.38 -#define pfn_dom_zone_type(_pfn) \ 1.39 - (((_pfn) <= max_dma_mfn) ? MEMZONE_DMADOM : MEMZONE_DOM) 1.40 +#define pfn_dom_zone_type(_pfn) (fls(_pfn) - 1) 1.41 1.42 static struct list_head heap[NR_ZONES][MAX_NUMNODES][MAX_ORDER+1]; 1.43 1.44 @@ -324,15 +327,17 @@ static DEFINE_SPINLOCK(heap_lock); 1.45 1.46 /* Allocate 2^@order contiguous pages. */ 1.47 static struct page_info *alloc_heap_pages( 1.48 - unsigned int zone, unsigned int cpu, unsigned int order) 1.49 + unsigned int zone_lo, unsigned zone_hi, 1.50 + unsigned int cpu, unsigned int order) 1.51 { 1.52 unsigned int i, j, node = cpu_to_node(cpu), num_nodes = num_online_nodes(); 1.53 - unsigned int request = (1UL << order); 1.54 + unsigned int zone, request = (1UL << order); 1.55 struct page_info *pg; 1.56 1.57 ASSERT(node >= 0); 1.58 ASSERT(node < num_nodes); 1.59 - ASSERT(zone < NR_ZONES); 1.60 + ASSERT(zone_lo <= zone_hi); 1.61 + ASSERT(zone_hi < NR_ZONES); 1.62 1.63 if ( unlikely(order > MAX_ORDER) ) 1.64 return NULL; 1.65 @@ -345,14 +350,17 @@ static struct page_info *alloc_heap_page 1.66 * needless computation on fast-path */ 1.67 for ( i = 0; i < num_nodes; i++ ) 1.68 { 1.69 - /* check if target node can support the allocation */ 1.70 - if ( avail[zone][node] >= request ) 1.71 + for ( zone = zone_hi; zone >= zone_lo; --zone ) 1.72 { 1.73 - /* Find smallest order which can satisfy the request. */ 1.74 - for ( j = order; j <= MAX_ORDER; j++ ) 1.75 + /* check if target node can support the allocation */ 1.76 + if ( avail[zone][node] >= request ) 1.77 { 1.78 - if ( !list_empty(&heap[zone][node][j]) ) 1.79 - goto found; 1.80 + /* Find smallest order which can satisfy the request. */ 1.81 + for ( j = order; j <= MAX_ORDER; j++ ) 1.82 + { 1.83 + if ( !list_empty(&heap[zone][node][j]) ) 1.84 + goto found; 1.85 + } 1.86 } 1.87 } 1.88 /* pick next node, wrapping around if needed */ 1.89 @@ -477,16 +485,17 @@ void init_heap_pages( 1.90 } 1.91 1.92 static unsigned long avail_heap_pages( 1.93 - int zone, int node) 1.94 + unsigned int zone_lo, unsigned int zone_hi, unsigned int node) 1.95 { 1.96 - unsigned int i, j, num_nodes = num_online_nodes(); 1.97 + unsigned int i, zone, num_nodes = num_online_nodes(); 1.98 unsigned long free_pages = 0; 1.99 1.100 - for (i=0; i<NR_ZONES; i++) 1.101 - if ( (zone == -1) || (zone == i) ) 1.102 - for (j=0; j < num_nodes; j++) 1.103 - if ( (node == -1) || (node == j) ) 1.104 - free_pages += avail[i][j]; 1.105 + if ( zone_hi >= NR_ZONES ) 1.106 + zone_hi = NR_ZONES - 1; 1.107 + for ( zone = zone_lo; zone <= zone_hi; zone++ ) 1.108 + for ( i = 0; i < num_nodes; i++ ) 1.109 + if ( (node == -1) || (node == i) ) 1.110 + free_pages += avail[zone][i]; 1.111 1.112 return free_pages; 1.113 } 1.114 @@ -606,7 +615,7 @@ void *alloc_xenheap_pages(unsigned int o 1.115 int i; 1.116 1.117 local_irq_save(flags); 1.118 - pg = alloc_heap_pages(MEMZONE_XEN, smp_processor_id(), order); 1.119 + pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, smp_processor_id(), order); 1.120 local_irq_restore(flags); 1.121 1.122 if ( unlikely(pg == NULL) ) 1.123 @@ -651,22 +660,26 @@ void free_xenheap_pages(void *v, unsigne 1.124 1.125 void init_domheap_pages(paddr_t ps, paddr_t pe) 1.126 { 1.127 - unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm; 1.128 + unsigned long s_tot, e_tot; 1.129 + unsigned int zone; 1.130 1.131 ASSERT(!in_irq()); 1.132 1.133 s_tot = round_pgup(ps) >> PAGE_SHIFT; 1.134 e_tot = round_pgdown(pe) >> PAGE_SHIFT; 1.135 1.136 - s_dma = min(s_tot, max_dma_mfn + 1); 1.137 - e_dma = min(e_tot, max_dma_mfn + 1); 1.138 - if ( s_dma < e_dma ) 1.139 - init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma); 1.140 + zone = fls(s_tot); 1.141 + BUG_ON(zone <= MEMZONE_XEN + 1); 1.142 + for ( --zone; s_tot < e_tot; ++zone ) 1.143 + { 1.144 + unsigned long end = e_tot; 1.145 1.146 - s_nrm = max(s_tot, max_dma_mfn + 1); 1.147 - e_nrm = max(e_tot, max_dma_mfn + 1); 1.148 - if ( s_nrm < e_nrm ) 1.149 - init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm); 1.150 + BUILD_BUG_ON(NR_ZONES > BITS_PER_LONG); 1.151 + if ( zone < BITS_PER_LONG - 1 && end > 1UL << (zone + 1) ) 1.152 + end = 1UL << (zone + 1); 1.153 + init_heap_pages(zone, mfn_to_page(s_tot), end - s_tot); 1.154 + s_tot = end; 1.155 + } 1.156 } 1.157 1.158 1.159 @@ -733,17 +746,21 @@ struct page_info *__alloc_domheap_pages( 1.160 1.161 if ( !(memflags & MEMF_dma) ) 1.162 { 1.163 - pg = alloc_heap_pages(MEMZONE_DOM, cpu, order); 1.164 + pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, NR_ZONES - 1, cpu, order); 1.165 /* Failure? Then check if we can fall back to the DMA pool. */ 1.166 if ( unlikely(pg == NULL) && 1.167 ((order > MAX_ORDER) || 1.168 - (avail_heap_pages(MEMZONE_DMADOM,-1) < 1.169 + (avail_heap_pages(MEMZONE_XEN + 1, 1.170 + dma_bitsize - PAGE_SHIFT - 1, 1.171 + -1) < 1.172 (dma_emergency_pool_pages + (1UL << order)))) ) 1.173 return NULL; 1.174 } 1.175 1.176 if ( pg == NULL ) 1.177 - if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, cpu, order)) == NULL ) 1.178 + if ( (pg = alloc_heap_pages(MEMZONE_XEN + 1, 1.179 + dma_bitsize - PAGE_SHIFT - 1, 1.180 + cpu, order)) == NULL ) 1.181 return NULL; 1.182 1.183 mask = pg->u.free.cpumask; 1.184 @@ -865,9 +882,14 @@ unsigned long avail_domheap_pages(void) 1.185 { 1.186 unsigned long avail_nrm, avail_dma; 1.187 1.188 - avail_nrm = avail_heap_pages(MEMZONE_DOM,-1); 1.189 + avail_nrm = avail_heap_pages(dma_bitsize - PAGE_SHIFT, 1.190 + NR_ZONES - 1, 1.191 + -1); 1.192 1.193 - avail_dma = avail_heap_pages(MEMZONE_DMADOM,-1); 1.194 + avail_dma = avail_heap_pages(MEMZONE_XEN + 1, 1.195 + dma_bitsize - PAGE_SHIFT - 1, 1.196 + -1); 1.197 + 1.198 if ( avail_dma > dma_emergency_pool_pages ) 1.199 avail_dma -= dma_emergency_pool_pages; 1.200 else 1.201 @@ -878,18 +900,36 @@ unsigned long avail_domheap_pages(void) 1.202 1.203 unsigned long avail_nodeheap_pages(int node) 1.204 { 1.205 - return avail_heap_pages(-1, node); 1.206 + return avail_heap_pages(0, NR_ZONES - 1, node); 1.207 } 1.208 1.209 static void pagealloc_keyhandler(unsigned char key) 1.210 { 1.211 + unsigned int zone = MEMZONE_XEN; 1.212 + unsigned long total = 0; 1.213 + 1.214 printk("Physical memory information:\n"); 1.215 - printk(" Xen heap: %lukB free\n" 1.216 - " DMA heap: %lukB free\n" 1.217 - " Dom heap: %lukB free\n", 1.218 - avail_heap_pages(MEMZONE_XEN, -1) << (PAGE_SHIFT-10), 1.219 - avail_heap_pages(MEMZONE_DMADOM, -1) <<(PAGE_SHIFT-10), 1.220 - avail_heap_pages(MEMZONE_DOM, -1) <<(PAGE_SHIFT-10)); 1.221 + printk(" Xen heap: %lukB free\n", 1.222 + avail_heap_pages(zone, zone, -1) << (PAGE_SHIFT-10)); 1.223 + 1.224 + while ( ++zone < NR_ZONES ) 1.225 + { 1.226 + unsigned long n; 1.227 + 1.228 + if ( zone == dma_bitsize - PAGE_SHIFT ) 1.229 + { 1.230 + printk(" DMA heap: %lukB free\n", total << (PAGE_SHIFT-10)); 1.231 + total = 0; 1.232 + } 1.233 + 1.234 + if ( (n = avail_heap_pages(zone, zone, -1)) != 0 ) 1.235 + { 1.236 + total += n; 1.237 + printk(" heap[%02u]: %lukB free\n", zone, n << (PAGE_SHIFT-10)); 1.238 + } 1.239 + } 1.240 + 1.241 + printk(" Dom heap: %lukB free\n", total << (PAGE_SHIFT-10)); 1.242 } 1.243 1.244