ia64/xen-unstable

view extras/mini-os/mm.c @ 6191:430ce2bade9b

Trivial fixes for a couple of xenlinux compile warnings.

Signed-off-by: <andrew.warfield@cl.cam.ac.uk>
author akw27@arcadians.cl.cam.ac.uk
date Mon Aug 15 13:50:39 2005 +0000 (2005-08-15)
parents 7561a06348cf
children 98a6eb458c78 0610add7c3fe 8799d14bef77 a9873d384da4
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 ****************************************************************************
5 *
6 * File: mm.c
7 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
8 * Changes:
9 *
10 * Date: Aug 2003
11 *
12 * Environment: Xen Minimal OS
13 * Description: memory management related functions
14 * contains buddy page allocator from Xen.
15 *
16 ****************************************************************************
17 * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
18 ****************************************************************************
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this software and associated documentation files (the "Software"), to
21 * deal in the Software without restriction, including without limitation the
22 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
23 * sell copies of the Software, and to permit persons to whom the Software is
24 * furnished to do so, subject to the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
35 * DEALINGS IN THE SOFTWARE.
36 */
38 #include <os.h>
39 #include <hypervisor.h>
40 #include <mm.h>
41 #include <types.h>
42 #include <lib.h>
44 unsigned long *phys_to_machine_mapping;
45 extern char *stack;
46 extern char _text, _etext, _edata, _end;
48 static void init_page_allocator(unsigned long min, unsigned long max);
50 void init_mm(void)
51 {
53 unsigned long start_pfn, max_pfn, max_free_pfn;
55 unsigned long *pgd = (unsigned long *)start_info.pt_base;
57 printk("MM: Init\n");
59 printk(" _text: %p\n", &_text);
60 printk(" _etext: %p\n", &_etext);
61 printk(" _edata: %p\n", &_edata);
62 printk(" stack start: %p\n", &stack);
63 printk(" _end: %p\n", &_end);
65 /* set up minimal memory infos */
66 start_pfn = PFN_UP(to_phys(&_end));
67 max_pfn = start_info.nr_pages;
69 printk(" start_pfn: %lx\n", start_pfn);
70 printk(" max_pfn: %lx\n", max_pfn);
72 /*
73 * we know where free tables start (start_pfn) and how many we
74 * have (max_pfn).
75 *
76 * Currently the hypervisor stores page tables it providesin the
77 * high region of the this memory range.
78 *
79 * next we work out how far down this goes (max_free_pfn)
80 *
81 * XXX this assumes the hypervisor provided page tables to be in
82 * the upper region of our initial memory. I don't know if this
83 * is always true.
84 */
86 max_free_pfn = PFN_DOWN(to_phys(pgd));
87 #ifdef __i386__
88 {
89 unsigned long *pgd = (unsigned long *)start_info.pt_base;
90 unsigned long pte;
91 int i;
92 printk(" pgd(pa(pgd)): %lx(%lx)", (u_long)pgd, to_phys(pgd));
94 for ( i = 0; i < (HYPERVISOR_VIRT_START>>22); i++ )
95 {
96 unsigned long pgde = *pgd++;
97 if ( !(pgde & 1) ) continue;
98 pte = machine_to_phys(pgde & PAGE_MASK);
99 printk(" PT(%x): %lx(%lx)", i, (u_long)to_virt(pte), pte);
100 if (PFN_DOWN(pte) <= max_free_pfn)
101 max_free_pfn = PFN_DOWN(pte);
102 }
103 }
104 max_free_pfn--;
105 printk(" max_free_pfn: %lx\n", max_free_pfn);
107 /*
108 * now we can initialise the page allocator
109 */
110 printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n",
111 (u_long)to_virt(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn),
112 (u_long)to_virt(PFN_PHYS(max_free_pfn)), PFN_PHYS(max_free_pfn));
113 init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_free_pfn));
114 #endif
117 /* Now initialise the physical->machine mapping table. */
120 printk("MM: done\n");
123 }
125 /*********************
126 * ALLOCATION BITMAP
127 * One bit per page of memory. Bit set => page is allocated.
128 */
130 static unsigned long *alloc_bitmap;
131 #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
133 #define allocated_in_map(_pn) \
134 (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1<<((_pn)&(PAGES_PER_MAPWORD-1))))
137 /*
138 * Hint regarding bitwise arithmetic in map_{alloc,free}:
139 * -(1<<n) sets all bits >= n.
140 * (1<<n)-1 sets all bits < n.
141 * Variable names in map_{alloc,free}:
142 * *_idx == Index into `alloc_bitmap' array.
143 * *_off == Bit offset within an element of the `alloc_bitmap' array.
144 */
146 static void map_alloc(unsigned long first_page, unsigned long nr_pages)
147 {
148 unsigned long start_off, end_off, curr_idx, end_idx;
150 curr_idx = first_page / PAGES_PER_MAPWORD;
151 start_off = first_page & (PAGES_PER_MAPWORD-1);
152 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
153 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
155 if ( curr_idx == end_idx )
156 {
157 alloc_bitmap[curr_idx] |= ((1<<end_off)-1) & -(1<<start_off);
158 }
159 else
160 {
161 alloc_bitmap[curr_idx] |= -(1<<start_off);
162 while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0L;
163 alloc_bitmap[curr_idx] |= (1<<end_off)-1;
164 }
165 }
168 static void map_free(unsigned long first_page, unsigned long nr_pages)
169 {
170 unsigned long start_off, end_off, curr_idx, end_idx;
172 curr_idx = first_page / PAGES_PER_MAPWORD;
173 start_off = first_page & (PAGES_PER_MAPWORD-1);
174 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
175 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
177 if ( curr_idx == end_idx )
178 {
179 alloc_bitmap[curr_idx] &= -(1<<end_off) | ((1<<start_off)-1);
180 }
181 else
182 {
183 alloc_bitmap[curr_idx] &= (1<<start_off)-1;
184 while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
185 alloc_bitmap[curr_idx] &= -(1<<end_off);
186 }
187 }
191 /*************************
192 * BINARY BUDDY ALLOCATOR
193 */
195 typedef struct chunk_head_st chunk_head_t;
196 typedef struct chunk_tail_st chunk_tail_t;
198 struct chunk_head_st {
199 chunk_head_t *next;
200 chunk_head_t **pprev;
201 int level;
202 };
204 struct chunk_tail_st {
205 int level;
206 };
208 /* Linked lists of free chunks of different powers-of-two in size. */
209 #define FREELIST_SIZE ((sizeof(void*)<<3)-PAGE_SHIFT)
210 static chunk_head_t *free_head[FREELIST_SIZE];
211 static chunk_head_t free_tail[FREELIST_SIZE];
212 #define FREELIST_EMPTY(_l) ((_l)->next == NULL)
214 #define round_pgdown(_p) ((_p)&PAGE_MASK)
215 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
218 /*
219 * Initialise allocator, placing addresses [@min,@max] in free pool.
220 * @min and @max are PHYSICAL addresses.
221 */
222 static void init_page_allocator(unsigned long min, unsigned long max)
223 {
224 int i;
225 unsigned long range, bitmap_size;
226 chunk_head_t *ch;
227 chunk_tail_t *ct;
229 for ( i = 0; i < FREELIST_SIZE; i++ )
230 {
231 free_head[i] = &free_tail[i];
232 free_tail[i].pprev = &free_head[i];
233 free_tail[i].next = NULL;
234 }
236 min = round_pgup (min);
237 max = round_pgdown(max);
239 /* Allocate space for the allocation bitmap. */
240 bitmap_size = (max+1) >> (PAGE_SHIFT+3);
241 bitmap_size = round_pgup(bitmap_size);
242 alloc_bitmap = (unsigned long *)to_virt(min);
243 min += bitmap_size;
244 range = max - min;
246 /* All allocated by default. */
247 memset(alloc_bitmap, ~0, bitmap_size);
248 /* Free up the memory we've been given to play with. */
249 map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT);
251 /* The buddy lists are addressed in high memory. */
252 min += VIRT_START;
253 max += VIRT_START;
255 while ( range != 0 )
256 {
257 /*
258 * Next chunk is limited by alignment of min, but also
259 * must not be bigger than remaining range.
260 */
261 for ( i = PAGE_SHIFT; (1<<(i+1)) <= range; i++ )
262 if ( min & (1<<i) ) break;
265 ch = (chunk_head_t *)min;
266 min += (1<<i);
267 range -= (1<<i);
268 ct = (chunk_tail_t *)min-1;
269 i -= PAGE_SHIFT;
270 ch->level = i;
271 ch->next = free_head[i];
272 ch->pprev = &free_head[i];
273 ch->next->pprev = &ch->next;
274 free_head[i] = ch;
275 ct->level = i;
276 }
277 }
280 /* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
281 unsigned long alloc_pages(int order)
282 {
283 int i;
284 chunk_head_t *alloc_ch, *spare_ch;
285 chunk_tail_t *spare_ct;
288 /* Find smallest order which can satisfy the request. */
289 for ( i = order; i < FREELIST_SIZE; i++ ) {
290 if ( !FREELIST_EMPTY(free_head[i]) )
291 break;
292 }
294 if ( i == FREELIST_SIZE ) goto no_memory;
296 /* Unlink a chunk. */
297 alloc_ch = free_head[i];
298 free_head[i] = alloc_ch->next;
299 alloc_ch->next->pprev = alloc_ch->pprev;
301 /* We may have to break the chunk a number of times. */
302 while ( i != order )
303 {
304 /* Split into two equal parts. */
305 i--;
306 spare_ch = (chunk_head_t *)((char *)alloc_ch + (1<<(i+PAGE_SHIFT)));
307 spare_ct = (chunk_tail_t *)((char *)spare_ch + (1<<(i+PAGE_SHIFT)))-1;
309 /* Create new header for spare chunk. */
310 spare_ch->level = i;
311 spare_ch->next = free_head[i];
312 spare_ch->pprev = &free_head[i];
313 spare_ct->level = i;
315 /* Link in the spare chunk. */
316 spare_ch->next->pprev = &spare_ch->next;
317 free_head[i] = spare_ch;
318 }
320 map_alloc(to_phys(alloc_ch)>>PAGE_SHIFT, 1<<order);
322 return((unsigned long)alloc_ch);
324 no_memory:
326 printk("Cannot handle page request order %d!\n", order);
328 return 0;
329 }