ia64/xen-unstable

view extras/mini-os/arch/x86/mm.c @ 18884:4ffd935c08a3

minios: Clip memory not usable by Mini-OS (above 1GB)

Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 05 13:06:57 2008 +0000 (2008-12-05)
parents d85714c0a742
children 1d4ce9e31fa0
line source
1 /*
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: mm.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos
10 *
11 * Date: Aug 2003, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: memory management related functions
15 * contains buddy page allocator from Xen.
16 *
17 ****************************************************************************
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this software and associated documentation files (the "Software"), to
20 * deal in the Software without restriction, including without limitation the
21 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
22 * sell copies of the Software, and to permit persons to whom the Software is
23 * furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
34 * DEALINGS IN THE SOFTWARE.
35 */
37 #include <os.h>
38 #include <hypervisor.h>
39 #include <mm.h>
40 #include <types.h>
41 #include <lib.h>
42 #include <xmalloc.h>
43 #include <xen/memory.h>
45 #ifdef MM_DEBUG
46 #define DEBUG(_f, _a...) \
47 printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
48 #else
49 #define DEBUG(_f, _a...) ((void)0)
50 #endif
52 unsigned long *phys_to_machine_mapping;
53 unsigned long mfn_zero;
54 extern char stack[];
55 extern void page_walk(unsigned long virt_addr);
57 void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
58 unsigned long offset, unsigned long level)
59 {
60 pgentry_t *tab = (pgentry_t *)start_info.pt_base;
61 unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
62 pgentry_t prot_e, prot_t;
63 mmu_update_t mmu_updates[1];
65 prot_e = prot_t = 0;
66 DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
67 "prev_l_mfn=%lx, offset=%lx",
68 level, *pt_pfn, prev_l_mfn, offset);
70 /* We need to clear the page, otherwise we might fail to map it
71 as a page table page */
72 memset((void*) pt_page, 0, PAGE_SIZE);
74 switch ( level )
75 {
76 case L1_FRAME:
77 prot_e = L1_PROT;
78 prot_t = L2_PROT;
79 break;
80 case L2_FRAME:
81 prot_e = L2_PROT;
82 prot_t = L3_PROT;
83 break;
84 #if defined(__x86_64__)
85 case L3_FRAME:
86 prot_e = L3_PROT;
87 prot_t = L4_PROT;
88 break;
89 #endif
90 default:
91 printk("new_pt_frame() called with invalid level number %d\n", level);
92 do_exit();
93 break;
94 }
96 /* Update the entry */
97 #if defined(__x86_64__)
98 tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
99 #endif
100 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
102 mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) +
103 sizeof(pgentry_t) * l1_table_offset(pt_page);
104 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
105 (prot_e & ~_PAGE_RW);
106 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
107 {
108 printk("PTE for new page table page could not be updated\n");
109 do_exit();
110 }
112 /* Now fill the new page table page with entries.
113 Update the page directory as well. */
114 mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
115 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
116 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
117 {
118 printk("ERROR: mmu_update failed\n");
119 do_exit();
120 }
122 *pt_pfn += 1;
123 }
125 /* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
126 static int need_pt_frame(unsigned long virt_address, int level)
127 {
128 unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
129 #if defined(__x86_64__)
130 unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
131 #else
132 unsigned long hyp_virt_end = 0xffffffff;
133 #endif
135 /* In general frames will _not_ be needed if they were already
136 allocated to map the hypervisor into our VA space */
137 #if defined(__x86_64__)
138 if(level == L3_FRAME)
139 {
140 if(l4_table_offset(virt_address) >=
141 l4_table_offset(hyp_virt_start) &&
142 l4_table_offset(virt_address) <=
143 l4_table_offset(hyp_virt_end))
144 return 0;
145 return 1;
146 } else
147 #endif
149 if(level == L2_FRAME)
150 {
151 #if defined(__x86_64__)
152 if(l4_table_offset(virt_address) >=
153 l4_table_offset(hyp_virt_start) &&
154 l4_table_offset(virt_address) <=
155 l4_table_offset(hyp_virt_end))
156 #endif
157 if(l3_table_offset(virt_address) >=
158 l3_table_offset(hyp_virt_start) &&
159 l3_table_offset(virt_address) <=
160 l3_table_offset(hyp_virt_end))
161 return 0;
163 return 1;
164 } else
166 /* Always need l1 frames */
167 if(level == L1_FRAME)
168 return 1;
170 printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
171 level, hyp_virt_start, hyp_virt_end);
172 return -1;
173 }
175 void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
176 {
177 unsigned long start_address, end_address;
178 unsigned long pfn_to_map, pt_pfn = *start_pfn;
179 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
180 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
181 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
182 unsigned long offset;
183 int count = 0;
185 pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
187 if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
188 {
189 printk("WARNING: Mini-OS trying to use Xen virtual space. "
190 "Truncating memory from %dMB to ",
191 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
192 *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
193 printk("%dMB\n",
194 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
195 }
197 start_address = (unsigned long)pfn_to_virt(pfn_to_map);
198 end_address = (unsigned long)pfn_to_virt(*max_pfn);
200 /* We worked out the virtual memory range to map, now mapping loop */
201 printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
203 while(start_address < end_address)
204 {
205 tab = (pgentry_t *)start_info.pt_base;
206 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
208 #if defined(__x86_64__)
209 offset = l4_table_offset(start_address);
210 /* Need new L3 pt frame */
211 if(!(start_address & L3_MASK))
212 if(need_pt_frame(start_address, L3_FRAME))
213 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
215 page = tab[offset];
216 mfn = pte_to_mfn(page);
217 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
218 #endif
219 offset = l3_table_offset(start_address);
220 /* Need new L2 pt frame */
221 if(!(start_address & L2_MASK))
222 if(need_pt_frame(start_address, L2_FRAME))
223 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
225 page = tab[offset];
226 mfn = pte_to_mfn(page);
227 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
228 offset = l2_table_offset(start_address);
229 /* Need new L1 pt frame */
230 if(!(start_address & L1_MASK))
231 if(need_pt_frame(start_address, L1_FRAME))
232 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
234 page = tab[offset];
235 mfn = pte_to_mfn(page);
236 offset = l1_table_offset(start_address);
238 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
239 mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
240 count++;
241 if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
242 {
243 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
244 {
245 printk("PTE could not be updated\n");
246 do_exit();
247 }
248 count = 0;
249 }
250 start_address += PAGE_SIZE;
251 }
253 *start_pfn = pt_pfn;
254 }
256 extern void shared_info;
257 static void set_readonly(void *text, void *etext)
258 {
259 unsigned long start_address = ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
260 unsigned long end_address = (unsigned long) etext;
261 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
262 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
263 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
264 unsigned long offset;
265 int count = 0;
267 printk("setting %p-%p readonly\n", text, etext);
269 while (start_address + PAGE_SIZE <= end_address) {
270 tab = (pgentry_t *)start_info.pt_base;
271 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
273 #if defined(__x86_64__)
274 offset = l4_table_offset(start_address);
275 page = tab[offset];
276 mfn = pte_to_mfn(page);
277 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
278 #endif
279 offset = l3_table_offset(start_address);
280 page = tab[offset];
281 mfn = pte_to_mfn(page);
282 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
283 offset = l2_table_offset(start_address);
284 page = tab[offset];
285 mfn = pte_to_mfn(page);
286 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
288 offset = l1_table_offset(start_address);
290 if (start_address != (unsigned long)&shared_info) {
291 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
292 mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
293 count++;
294 } else
295 printk("skipped %p\n", start_address);
297 start_address += PAGE_SIZE;
299 if (count == L1_PAGETABLE_ENTRIES || start_address + PAGE_SIZE > end_address)
300 {
301 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
302 {
303 printk("PTE could not be updated\n");
304 do_exit();
305 }
306 count = 0;
307 }
308 }
310 {
311 mmuext_op_t op = {
312 .cmd = MMUEXT_TLB_FLUSH_ALL,
313 };
314 int count;
315 HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
316 }
317 }
319 void mem_test(unsigned long *start_add, unsigned long *end_add)
320 {
321 unsigned long mask = 0x10000;
322 unsigned long *pointer;
324 for(pointer = start_add; pointer < end_add; pointer++)
325 {
326 if(!(((unsigned long)pointer) & 0xfffff))
327 {
328 printk("Writing to %lx\n", pointer);
329 page_walk((unsigned long)pointer);
330 }
331 *pointer = (unsigned long)pointer & ~mask;
332 }
334 for(pointer = start_add; pointer < end_add; pointer++)
335 {
336 if(((unsigned long)pointer & ~mask) != *pointer)
337 printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
338 (unsigned long)pointer,
339 *pointer,
340 ((unsigned long)pointer & ~mask));
341 }
343 }
345 static pgentry_t *get_pgt(unsigned long addr)
346 {
347 unsigned long mfn;
348 pgentry_t *tab;
349 unsigned offset;
351 tab = (pgentry_t *)start_info.pt_base;
352 mfn = virt_to_mfn(start_info.pt_base);
354 #if defined(__x86_64__)
355 offset = l4_table_offset(addr);
356 if (!(tab[offset] & _PAGE_PRESENT))
357 return NULL;
358 mfn = pte_to_mfn(tab[offset]);
359 tab = mfn_to_virt(mfn);
360 #endif
361 offset = l3_table_offset(addr);
362 if (!(tab[offset] & _PAGE_PRESENT))
363 return NULL;
364 mfn = pte_to_mfn(tab[offset]);
365 tab = mfn_to_virt(mfn);
366 offset = l2_table_offset(addr);
367 if (!(tab[offset] & _PAGE_PRESENT))
368 return NULL;
369 mfn = pte_to_mfn(tab[offset]);
370 tab = mfn_to_virt(mfn);
371 offset = l1_table_offset(addr);
372 return &tab[offset];
373 }
375 pgentry_t *need_pgt(unsigned long addr)
376 {
377 unsigned long mfn;
378 pgentry_t *tab;
379 unsigned long pt_pfn;
380 unsigned offset;
382 tab = (pgentry_t *)start_info.pt_base;
383 mfn = virt_to_mfn(start_info.pt_base);
385 #if defined(__x86_64__)
386 offset = l4_table_offset(addr);
387 if (!(tab[offset] & _PAGE_PRESENT)) {
388 pt_pfn = virt_to_pfn(alloc_page());
389 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
390 }
391 ASSERT(tab[offset] & _PAGE_PRESENT);
392 mfn = pte_to_mfn(tab[offset]);
393 tab = mfn_to_virt(mfn);
394 #endif
395 offset = l3_table_offset(addr);
396 if (!(tab[offset] & _PAGE_PRESENT)) {
397 pt_pfn = virt_to_pfn(alloc_page());
398 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
399 }
400 ASSERT(tab[offset] & _PAGE_PRESENT);
401 mfn = pte_to_mfn(tab[offset]);
402 tab = mfn_to_virt(mfn);
403 offset = l2_table_offset(addr);
404 if (!(tab[offset] & _PAGE_PRESENT)) {
405 pt_pfn = virt_to_pfn(alloc_page());
406 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
407 }
408 ASSERT(tab[offset] & _PAGE_PRESENT);
409 mfn = pte_to_mfn(tab[offset]);
410 tab = mfn_to_virt(mfn);
412 offset = l1_table_offset(addr);
413 return &tab[offset];
414 }
416 static unsigned long demand_map_area_start;
417 #ifdef __x86_64__
418 #define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
419 #else
420 #define DEMAND_MAP_PAGES ((2ULL << 30) / PAGE_SIZE)
421 #endif
423 #ifndef HAVE_LIBC
424 #define HEAP_PAGES 0
425 #else
426 unsigned long heap, brk, heap_mapped, heap_end;
427 #ifdef __x86_64__
428 #define HEAP_PAGES ((128ULL << 30) / PAGE_SIZE)
429 #else
430 #define HEAP_PAGES ((1ULL << 30) / PAGE_SIZE)
431 #endif
432 #endif
434 void arch_init_demand_mapping_area(unsigned long cur_pfn)
435 {
436 cur_pfn++;
438 demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
439 cur_pfn += DEMAND_MAP_PAGES;
440 printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start, pfn_to_virt(cur_pfn));
442 #ifdef HAVE_LIBC
443 cur_pfn++;
444 heap_mapped = brk = heap = (unsigned long) pfn_to_virt(cur_pfn);
445 cur_pfn += HEAP_PAGES;
446 heap_end = (unsigned long) pfn_to_virt(cur_pfn);
447 printk("Heap resides at %lx-%lx.\n", brk, heap_end);
448 #endif
449 }
451 #define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
452 void do_map_frames(unsigned long addr,
453 unsigned long *f, unsigned long n, unsigned long stride,
454 unsigned long increment, domid_t id, int may_fail, unsigned long prot)
455 {
456 pgentry_t *pgt = NULL;
457 unsigned long done = 0;
458 unsigned long i;
459 int rc;
461 while (done < n) {
462 unsigned long todo;
464 if (may_fail)
465 todo = 1;
466 else
467 todo = n - done;
469 if (todo > MAP_BATCH)
470 todo = MAP_BATCH;
472 {
473 mmu_update_t mmu_updates[todo];
475 for (i = 0; i < todo; i++, addr += PAGE_SIZE, pgt++) {
476 if (!pgt || !(addr & L1_MASK))
477 pgt = need_pgt(addr);
478 mmu_updates[i].ptr = virt_to_mach(pgt);
479 mmu_updates[i].val = ((pgentry_t)(f[(done + i) * stride] + (done + i) * increment) << PAGE_SHIFT) | prot;
480 }
482 rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
483 if (rc < 0) {
484 if (may_fail)
485 f[done * stride] |= 0xF0000000;
486 else {
487 printk("Map %ld (%lx, ...) at %p failed: %d.\n", todo, f[done * stride] + done * increment, addr, rc);
488 do_exit();
489 }
490 }
491 }
493 done += todo;
494 }
495 }
497 unsigned long allocate_ondemand(unsigned long n, unsigned long alignment)
498 {
499 unsigned long x;
500 unsigned long y = 0;
502 /* Find a properly aligned run of n contiguous frames */
503 for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
504 unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
505 pgentry_t *pgt = get_pgt(addr);
506 for (y = 0; y < n; y++, addr += PAGE_SIZE) {
507 if (!(addr & L1_MASK))
508 pgt = get_pgt(addr);
509 if (pgt) {
510 if (*pgt & _PAGE_PRESENT)
511 break;
512 pgt++;
513 }
514 }
515 if (y == n)
516 break;
517 }
518 if (y != n) {
519 printk("Failed to find %ld frames!\n", n);
520 return 0;
521 }
522 return demand_map_area_start + x * PAGE_SIZE;
523 }
525 void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
526 unsigned long increment, unsigned long alignment, domid_t id,
527 int may_fail, unsigned long prot)
528 {
529 unsigned long addr = allocate_ondemand(n, alignment);
531 if (!addr)
532 return NULL;
534 /* Found it at x. Map it in. */
535 do_map_frames(addr, f, n, stride, increment, id, may_fail, prot);
537 return (void *)addr;
538 }
540 static void clear_bootstrap(void)
541 {
542 pte_t nullpte = { };
544 /* Use first page as the CoW zero page */
545 memset(&_text, 0, PAGE_SIZE);
546 mfn_zero = virt_to_mfn((unsigned long) &_text);
547 if (HYPERVISOR_update_va_mapping(0, nullpte, UVMF_INVLPG))
548 printk("Unable to unmap NULL page\n");
549 }
551 void arch_init_p2m(unsigned long max_pfn)
552 {
553 #define L1_P2M_SHIFT 9
554 #define L2_P2M_SHIFT 18
555 #define L3_P2M_SHIFT 27
556 #define L1_P2M_ENTRIES (1 << L1_P2M_SHIFT)
557 #define L2_P2M_ENTRIES (1 << (L2_P2M_SHIFT - L1_P2M_SHIFT))
558 #define L3_P2M_ENTRIES (1 << (L3_P2M_SHIFT - L2_P2M_SHIFT))
559 #define L1_P2M_MASK (L1_P2M_ENTRIES - 1)
560 #define L2_P2M_MASK (L2_P2M_ENTRIES - 1)
561 #define L3_P2M_MASK (L3_P2M_ENTRIES - 1)
563 unsigned long *l1_list = NULL, *l2_list = NULL, *l3_list;
564 unsigned long pfn;
566 l3_list = (unsigned long *)alloc_page();
567 for(pfn=0; pfn<max_pfn; pfn++)
568 {
569 if(!(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)))
570 {
571 l2_list = (unsigned long*)alloc_page();
572 if((pfn >> L3_P2M_SHIFT) > 0)
573 {
574 printk("Error: Too many pfns.\n");
575 do_exit();
576 }
577 l3_list[(pfn >> L2_P2M_SHIFT)] = virt_to_mfn(l2_list);
578 }
579 if(!(pfn % (L1_P2M_ENTRIES)))
580 {
581 l1_list = (unsigned long*)alloc_page();
582 l2_list[(pfn >> L1_P2M_SHIFT) & L2_P2M_MASK] =
583 virt_to_mfn(l1_list);
584 }
586 l1_list[pfn & L1_P2M_MASK] = pfn_to_mfn(pfn);
587 }
588 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
589 virt_to_mfn(l3_list);
590 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
591 }
593 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p)
594 {
596 unsigned long start_pfn, max_pfn, virt_pfns;
598 printk(" _text: %p\n", &_text);
599 printk(" _etext: %p\n", &_etext);
600 printk(" _erodata: %p\n", &_erodata);
601 printk(" _edata: %p\n", &_edata);
602 printk(" stack start: %p\n", stack);
603 printk(" _end: %p\n", &_end);
605 /* First page follows page table pages and 3 more pages (store page etc) */
606 start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
607 start_info.nr_pt_frames + 3;
608 max_pfn = start_info.nr_pages;
610 /* We need room for demand mapping and heap, clip available memory */
611 virt_pfns = DEMAND_MAP_PAGES + HEAP_PAGES;
612 if (max_pfn + virt_pfns + 1 < max_pfn)
613 max_pfn = -(virt_pfns + 1);
615 printk(" start_pfn: %lx\n", start_pfn);
616 printk(" max_pfn: %lx\n", max_pfn);
618 build_pagetable(&start_pfn, &max_pfn);
619 clear_bootstrap();
620 set_readonly(&_text, &_erodata);
622 *start_pfn_p = start_pfn;
623 *max_pfn_p = max_pfn;
624 }