direct-io.hg
changeset 13923:741adb202b82
[POWERPC][XEN] Mark heap memory based on boot_of.c's allocator.
- Explain why we have another allocator (that wasn't so hard now was it?).
- Create and export boot_of_mem_avail() to allow later code to iterate over the
allocator bitmap.
- Use boot_of_mem_avail() to place memory in the heap, instead of using globals
and making assumptions about the ordering of reserved areas.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
- Explain why we have another allocator (that wasn't so hard now was it?).
- Create and export boot_of_mem_avail() to allow later code to iterate over the
allocator bitmap.
- Use boot_of_mem_avail() to place memory in the heap, instead of using globals
and making assumptions about the ordering of reserved areas.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author | Hollis Blanchard <hollisb@us.ibm.com> |
---|---|
date | Fri Jan 19 15:36:17 2007 -0600 (2007-01-19) |
parents | 3773af71a14b |
children | 4f0353778233 |
files | xen/arch/powerpc/boot_of.c xen/arch/powerpc/memory.c xen/include/asm-powerpc/mm.h |
line diff
1.1 --- a/xen/arch/powerpc/boot_of.c Wed Jan 17 19:37:20 2007 -0500 1.2 +++ b/xen/arch/powerpc/boot_of.c Fri Jan 19 15:36:17 2007 -0600 1.3 @@ -43,6 +43,14 @@ static ulong of_msr; 1.4 static int of_out; 1.5 static ulong eomem; 1.6 1.7 +/* Track memory during early boot with a limited per-page bitmap. We need an 1.8 + * allocator to tell us where we can place RTAS, our copy of the device tree. 1.9 + * We could examine the "available" properties in memory nodes, but we 1.10 + * apparently can't depend on firmware to update those when we call "claim". So 1.11 + * we need to track it ourselves. 1.12 + * We can't dynamically allocate the bitmap, because we would need something 1.13 + * to tell us where it's safe to allocate... 1.14 + */ 1.15 #define MEM_AVAILABLE_PAGES ((32 << 20) >> PAGE_SHIFT) 1.16 static DECLARE_BITMAP(mem_available_pages, MEM_AVAILABLE_PAGES); 1.17 1.18 @@ -532,6 +540,37 @@ static ulong boot_of_alloc(ulong size) 1.19 } 1.20 } 1.21 1.22 +int boot_of_mem_avail(int pos, ulong *startpage, ulong *endpage) 1.23 +{ 1.24 + ulong freebit; 1.25 + ulong usedbit; 1.26 + 1.27 + if (pos >= MEM_AVAILABLE_PAGES) 1.28 + /* Stop iterating. */ 1.29 + return -1; 1.30 + 1.31 + /* Find first free page. */ 1.32 + freebit = find_next_zero_bit(mem_available_pages, MEM_AVAILABLE_PAGES, pos); 1.33 + if (freebit >= MEM_AVAILABLE_PAGES) { 1.34 + /* We know everything after MEM_AVAILABLE_PAGES is still free. */ 1.35 + *startpage = MEM_AVAILABLE_PAGES << PAGE_SHIFT; 1.36 + *endpage = ~0UL; 1.37 + return freebit; 1.38 + } 1.39 + *startpage = freebit << PAGE_SHIFT; 1.40 + 1.41 + /* Now find first used page after that. */ 1.42 + usedbit = find_next_bit(mem_available_pages, MEM_AVAILABLE_PAGES, freebit); 1.43 + if (usedbit >= MEM_AVAILABLE_PAGES) { 1.44 + /* We know everything after MEM_AVAILABLE_PAGES is still free. */ 1.45 + *endpage = ~0UL; 1.46 + return usedbit; 1.47 + } 1.48 + 1.49 + *endpage = usedbit << PAGE_SHIFT; 1.50 + return usedbit; 1.51 +} 1.52 + 1.53 static ulong boot_of_mem_init(void) 1.54 { 1.55 int root;
2.1 --- a/xen/arch/powerpc/memory.c Wed Jan 17 19:37:20 2007 -0500 2.2 +++ b/xen/arch/powerpc/memory.c Fri Jan 19 15:36:17 2007 -0600 2.3 @@ -13,7 +13,7 @@ 2.4 * along with this program; if not, write to the Free Software 2.5 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 2.6 * 2.7 - * Copyright (C) IBM Corp. 2006 2.8 + * Copyright IBM Corp. 2006, 2007 2.9 * 2.10 * Authors: Dan Poff <poff@us.ibm.com> 2.11 * Jimi Xenidis <jimix@watson.ibm.com> 2.12 @@ -25,7 +25,7 @@ 2.13 #include "oftree.h" 2.14 #include "rtas.h" 2.15 2.16 -#undef DEBUG 2.17 +#define DEBUG 2.18 #ifdef DEBUG 2.19 #define DBG(fmt...) printk(fmt) 2.20 #else 2.21 @@ -42,8 +42,6 @@ integer_param("xenheap_megabytes", opt_x 2.22 unsigned long xenheap_phys_end; 2.23 static uint nr_pages; 2.24 static ulong xenheap_size; 2.25 -static ulong save_start; 2.26 -static ulong save_end; 2.27 2.28 struct membuf { 2.29 ulong start; 2.30 @@ -52,30 +50,6 @@ struct membuf { 2.31 2.32 typedef void (*walk_mem_fn)(struct membuf *, uint); 2.33 2.34 -static ulong free_xenheap(ulong start, ulong end) 2.35 -{ 2.36 - start = ALIGN_UP(start, PAGE_SIZE); 2.37 - end = ALIGN_DOWN(end, PAGE_SIZE); 2.38 - 2.39 - DBG("%s: 0x%lx - 0x%lx\n", __func__, start, end); 2.40 - 2.41 - /* need to do this better */ 2.42 - if (save_start <= end && save_start >= start) { 2.43 - DBG("%s: Go around the saved area: 0x%lx - 0x%lx\n", 2.44 - __func__, save_start, save_end); 2.45 - init_xenheap_pages(start, ALIGN_DOWN(save_start, PAGE_SIZE)); 2.46 - xenheap_size += ALIGN_DOWN(save_start, PAGE_SIZE) - start; 2.47 - 2.48 - init_xenheap_pages(ALIGN_UP(save_end, PAGE_SIZE), end); 2.49 - xenheap_size += end - ALIGN_UP(save_end, PAGE_SIZE); 2.50 - } else { 2.51 - init_xenheap_pages(start, end); 2.52 - xenheap_size += end - start; 2.53 - } 2.54 - 2.55 - return ALIGN_UP(end, PAGE_SIZE); 2.56 -} 2.57 - 2.58 static void set_max_page(struct membuf *mb, uint entries) 2.59 { 2.60 int i; 2.61 @@ -113,6 +87,7 @@ static void heap_init(struct membuf *mb, 2.62 start_blk = xenheap_phys_end; 2.63 } 2.64 2.65 + DBG("boot free: %016lx - %016lx\n", start_blk, end_blk); 2.66 init_boot_pages(start_blk, end_blk); 2.67 total_pages += (end_blk - start_blk) >> PAGE_SHIFT; 2.68 } 2.69 @@ -141,72 +116,31 @@ static void ofd_walk_mem(void *m, walk_m 2.70 } 2.71 } 2.72 2.73 -static void setup_xenheap(module_t *mod, int mcount) 2.74 -{ 2.75 - int i; 2.76 - ulong freemem; 2.77 - 2.78 - freemem = ALIGN_UP((ulong)_end, PAGE_SIZE); 2.79 - 2.80 - for (i = 0; i < mcount; i++) { 2.81 - u32 s; 2.82 - 2.83 - if (mod[i].mod_end == mod[i].mod_start) 2.84 - continue; 2.85 - 2.86 - s = ALIGN_DOWN(mod[i].mod_start, PAGE_SIZE); 2.87 - 2.88 - if (mod[i].mod_start > (ulong)_start && 2.89 - mod[i].mod_start < (ulong)_end) { 2.90 - /* mod was linked in */ 2.91 - continue; 2.92 - } 2.93 - 2.94 - if (s < freemem) 2.95 - panic("module addresses must assend\n"); 2.96 - 2.97 - free_xenheap(freemem, s); 2.98 - freemem = ALIGN_UP(mod[i].mod_end, PAGE_SIZE); 2.99 - 2.100 - } 2.101 - 2.102 - /* the rest of the xenheap, starting at the end of modules */ 2.103 - free_xenheap(freemem, xenheap_phys_end); 2.104 -} 2.105 - 2.106 void memory_init(module_t *mod, int mcount) 2.107 { 2.108 ulong eomem; 2.109 - ulong heap_start; 2.110 + ulong bitmap_start = ~0UL; 2.111 + ulong bitmap_end; 2.112 + ulong bitmap_size; 2.113 ulong xh_pages; 2.114 + ulong start; 2.115 + ulong end; 2.116 + int pos; 2.117 2.118 /* lets find out how much memory there is and set max_page */ 2.119 max_page = 0; 2.120 printk("Physical RAM map:\n"); 2.121 ofd_walk_mem((void *)oftree, set_max_page); 2.122 eomem = max_page << PAGE_SHIFT; 2.123 - 2.124 - if (eomem == 0){ 2.125 + if (eomem == 0) { 2.126 panic("ofd_walk_mem() failed\n"); 2.127 } 2.128 2.129 - /* find the portion of memory we need to keep safe */ 2.130 - save_start = oftree; 2.131 - save_end = oftree_end; 2.132 - if (rtas_base) { 2.133 - if (save_start > rtas_base) 2.134 - save_start = rtas_base; 2.135 - if (save_end < rtas_end) 2.136 - save_end = rtas_end; 2.137 - } 2.138 - 2.139 - /* minimum heap has to reach to the end of all Xen required memory */ 2.140 - xh_pages = ALIGN_UP(save_end, PAGE_SIZE) >> PAGE_SHIFT; 2.141 - xh_pages += opt_xenheap_megabytes << (20 - PAGE_SHIFT); 2.142 + xh_pages = opt_xenheap_megabytes << (20 - PAGE_SHIFT); 2.143 2.144 /* While we are allocating HTABS from The Xen Heap we need it to 2.145 * be larger */ 2.146 - xh_pages += nr_pages >> 5; 2.147 + xh_pages += nr_pages >> 5; 2.148 2.149 xenheap_phys_end = xh_pages << PAGE_SHIFT; 2.150 printk("End of Xen Area: %luMiB (%luKiB)\n", 2.151 @@ -214,17 +148,20 @@ void memory_init(module_t *mod, int mcou 2.152 2.153 printk("End of RAM: %luMiB (%luKiB)\n", eomem >> 20, eomem >> 10); 2.154 2.155 - /* Architecturally the first 4 pages are exception hendlers, we 2.156 - * will also be copying down some code there */ 2.157 - heap_start = 4 << PAGE_SHIFT; 2.158 - if (oftree < (ulong)_start) 2.159 - heap_start = ALIGN_UP(oftree_end, PAGE_SIZE); 2.160 - 2.161 - heap_start = init_boot_allocator(heap_start); 2.162 - if (heap_start > (ulong)_start) { 2.163 - panic("space below _start (%p) is not enough memory " 2.164 - "for heap (0x%lx)\n", _start, heap_start); 2.165 + /* The boot allocator requires one bit per page. Find a spot for it. */ 2.166 + bitmap_size = max_page / 8; 2.167 + pos = boot_of_mem_avail(0, &start, &end); 2.168 + while (pos >= 0) { 2.169 + if (end - start >= bitmap_size) { 2.170 + bitmap_start = start; 2.171 + bitmap_end = init_boot_allocator(bitmap_start); 2.172 + printk("boot allocator @ %lx - %lx\n", bitmap_start, bitmap_end); 2.173 + break; 2.174 + } 2.175 + pos = boot_of_mem_avail(pos, &start, &end); 2.176 } 2.177 + if (bitmap_start == ~0UL) 2.178 + panic("Couldn't find 0x%lx bytes for boot allocator.", bitmap_size); 2.179 2.180 /* allow everything else to be allocated */ 2.181 total_pages = 0; 2.182 @@ -242,12 +179,39 @@ void memory_init(module_t *mod, int mcou 2.183 2.184 numa_initmem_init(0, max_page); 2.185 2.186 + /* Domain heap gets all the unclaimed memory. */ 2.187 end_boot_allocator(); 2.188 2.189 - /* Add memory between the beginning of the heap and the beginning 2.190 - * of our text */ 2.191 - free_xenheap(heap_start, (ulong)_start); 2.192 - setup_xenheap(mod, mcount); 2.193 + /* Create initial xen heap by finding non-reserved memory. */ 2.194 + pos = boot_of_mem_avail(0, &start, &end); 2.195 + while (pos >= 0) { 2.196 + if (end == ~0UL) 2.197 + end = xenheap_phys_end; 2.198 + 2.199 + /* Problem: the bitmap itself is not reserved. */ 2.200 + if ((start >= bitmap_start) && (start < bitmap_end)) { 2.201 + /* Start is inside bitmap. */ 2.202 + start = bitmap_end; 2.203 + } 2.204 + if ((end > bitmap_start) && (end <= bitmap_end)) { 2.205 + /* End is inside bitmap. */ 2.206 + end = bitmap_start; 2.207 + } 2.208 + if ((start < bitmap_start) && (end > bitmap_end)) { 2.209 + /* Range encompasses bitmap. First free low part, then high. */ 2.210 + xenheap_size += bitmap_start - start; 2.211 + DBG("xenheap: %016lx - %016lx\n", start, bitmap_start); 2.212 + init_xenheap_pages(start, bitmap_start); 2.213 + start = bitmap_end; 2.214 + } 2.215 + 2.216 + xenheap_size += end - start; 2.217 + DBG("xenheap: %016lx - %016lx\n", start, end); 2.218 + init_xenheap_pages(start, end); 2.219 + 2.220 + pos = boot_of_mem_avail(pos, &start, &end); 2.221 + } 2.222 + 2.223 printk("Xen Heap: %luMiB (%luKiB)\n", 2.224 xenheap_size >> 20, xenheap_size >> 10); 2.225
3.1 --- a/xen/include/asm-powerpc/mm.h Wed Jan 17 19:37:20 2007 -0500 3.2 +++ b/xen/include/asm-powerpc/mm.h Fri Jan 19 15:36:17 2007 -0600 3.3 @@ -13,7 +13,7 @@ 3.4 * along with this program; if not, write to the Free Software 3.5 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 3.6 * 3.7 - * Copyright (C) IBM Corp. 2005, 2006 3.8 + * Copyright IBM Corp. 2005, 2006, 2007 3.9 * 3.10 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 3.11 * Jimi Xenidis <jimix@watson.ibm.com> 3.12 @@ -35,6 +35,7 @@ 3.13 #define memguard_unguard_range(_p,_l) ((void)0) 3.14 3.15 extern unsigned long xenheap_phys_end; 3.16 +extern int boot_of_mem_avail(int pos, ulong *start, ulong *end); 3.17 3.18 /* 3.19 * Per-page-frame information.