ia64/xen-unstable

view extras/mini-os/arch/x86/mm.c @ 17829:cc4e471bbc08

minios: Fix >4GB machine addresses

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 10 16:59:24 2008 +0100 (2008-06-10)
parents c93a913c221f
children c8d9ade45781
line source
1 /*
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: mm.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos
10 *
11 * Date: Aug 2003, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: memory management related functions
15 * contains buddy page allocator from Xen.
16 *
17 ****************************************************************************
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this software and associated documentation files (the "Software"), to
20 * deal in the Software without restriction, including without limitation the
21 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
22 * sell copies of the Software, and to permit persons to whom the Software is
23 * furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
34 * DEALINGS IN THE SOFTWARE.
35 */
37 #include <os.h>
38 #include <hypervisor.h>
39 #include <mm.h>
40 #include <types.h>
41 #include <lib.h>
42 #include <xmalloc.h>
43 #include <xen/memory.h>
45 #ifdef MM_DEBUG
46 #define DEBUG(_f, _a...) \
47 printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
48 #else
49 #define DEBUG(_f, _a...) ((void)0)
50 #endif
52 unsigned long *phys_to_machine_mapping;
53 unsigned long mfn_zero;
54 extern char stack[];
55 extern void page_walk(unsigned long virt_addr);
57 void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
58 unsigned long offset, unsigned long level)
59 {
60 pgentry_t *tab = (pgentry_t *)start_info.pt_base;
61 unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
62 pgentry_t prot_e, prot_t;
63 mmu_update_t mmu_updates[1];
65 prot_e = prot_t = 0;
66 DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
67 "prev_l_mfn=%lx, offset=%lx",
68 level, *pt_pfn, prev_l_mfn, offset);
70 /* We need to clear the page, otherwise we might fail to map it
71 as a page table page */
72 memset((void*) pt_page, 0, PAGE_SIZE);
74 switch ( level )
75 {
76 case L1_FRAME:
77 prot_e = L1_PROT;
78 prot_t = L2_PROT;
79 break;
80 case L2_FRAME:
81 prot_e = L2_PROT;
82 prot_t = L3_PROT;
83 break;
84 #if defined(__x86_64__)
85 case L3_FRAME:
86 prot_e = L3_PROT;
87 prot_t = L4_PROT;
88 break;
89 #endif
90 default:
91 printk("new_pt_frame() called with invalid level number %d\n", level);
92 do_exit();
93 break;
94 }
96 /* Update the entry */
97 #if defined(__x86_64__)
98 tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
99 #endif
100 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
102 mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) +
103 sizeof(pgentry_t) * l1_table_offset(pt_page);
104 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
105 (prot_e & ~_PAGE_RW);
106 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
107 {
108 printk("PTE for new page table page could not be updated\n");
109 do_exit();
110 }
112 /* Now fill the new page table page with entries.
113 Update the page directory as well. */
114 mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
115 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
116 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
117 {
118 printk("ERROR: mmu_update failed\n");
119 do_exit();
120 }
122 *pt_pfn += 1;
123 }
125 /* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
126 static int need_pt_frame(unsigned long virt_address, int level)
127 {
128 unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
129 #if defined(__x86_64__)
130 unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
131 #else
132 unsigned long hyp_virt_end = 0xffffffff;
133 #endif
135 /* In general frames will _not_ be needed if they were already
136 allocated to map the hypervisor into our VA space */
137 #if defined(__x86_64__)
138 if(level == L3_FRAME)
139 {
140 if(l4_table_offset(virt_address) >=
141 l4_table_offset(hyp_virt_start) &&
142 l4_table_offset(virt_address) <=
143 l4_table_offset(hyp_virt_end))
144 return 0;
145 return 1;
146 } else
147 #endif
149 if(level == L2_FRAME)
150 {
151 #if defined(__x86_64__)
152 if(l4_table_offset(virt_address) >=
153 l4_table_offset(hyp_virt_start) &&
154 l4_table_offset(virt_address) <=
155 l4_table_offset(hyp_virt_end))
156 #endif
157 if(l3_table_offset(virt_address) >=
158 l3_table_offset(hyp_virt_start) &&
159 l3_table_offset(virt_address) <=
160 l3_table_offset(hyp_virt_end))
161 return 0;
163 return 1;
164 } else
166 /* Always need l1 frames */
167 if(level == L1_FRAME)
168 return 1;
170 printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
171 level, hyp_virt_start, hyp_virt_end);
172 return -1;
173 }
175 void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
176 {
177 unsigned long start_address, end_address;
178 unsigned long pfn_to_map, pt_pfn = *start_pfn;
179 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
180 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
181 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
182 unsigned long offset;
183 int count = 0;
185 pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
187 if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
188 {
189 printk("WARNING: Mini-OS trying to use Xen virtual space. "
190 "Truncating memory from %dMB to ",
191 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
192 *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
193 printk("%dMB\n",
194 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
195 }
197 start_address = (unsigned long)pfn_to_virt(pfn_to_map);
198 end_address = (unsigned long)pfn_to_virt(*max_pfn);
200 /* We worked out the virtual memory range to map, now mapping loop */
201 printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
203 while(start_address < end_address)
204 {
205 tab = (pgentry_t *)start_info.pt_base;
206 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
208 #if defined(__x86_64__)
209 offset = l4_table_offset(start_address);
210 /* Need new L3 pt frame */
211 if(!(start_address & L3_MASK))
212 if(need_pt_frame(start_address, L3_FRAME))
213 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
215 page = tab[offset];
216 mfn = pte_to_mfn(page);
217 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
218 #endif
219 offset = l3_table_offset(start_address);
220 /* Need new L2 pt frame */
221 if(!(start_address & L2_MASK))
222 if(need_pt_frame(start_address, L2_FRAME))
223 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
225 page = tab[offset];
226 mfn = pte_to_mfn(page);
227 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
228 offset = l2_table_offset(start_address);
229 /* Need new L1 pt frame */
230 if(!(start_address & L1_MASK))
231 if(need_pt_frame(start_address, L1_FRAME))
232 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
234 page = tab[offset];
235 mfn = pte_to_mfn(page);
236 offset = l1_table_offset(start_address);
238 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
239 mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
240 count++;
241 if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
242 {
243 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
244 {
245 printk("PTE could not be updated\n");
246 do_exit();
247 }
248 count = 0;
249 }
250 start_address += PAGE_SIZE;
251 }
253 *start_pfn = pt_pfn;
254 }
256 extern void shared_info;
257 static void set_readonly(void *text, void *etext)
258 {
259 unsigned long start_address = ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
260 unsigned long end_address = (unsigned long) etext;
261 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
262 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
263 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
264 unsigned long offset;
265 int count = 0;
267 printk("setting %p-%p readonly\n", text, etext);
269 while (start_address + PAGE_SIZE <= end_address) {
270 tab = (pgentry_t *)start_info.pt_base;
271 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
273 #if defined(__x86_64__)
274 offset = l4_table_offset(start_address);
275 page = tab[offset];
276 mfn = pte_to_mfn(page);
277 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
278 #endif
279 offset = l3_table_offset(start_address);
280 page = tab[offset];
281 mfn = pte_to_mfn(page);
282 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
283 offset = l2_table_offset(start_address);
284 page = tab[offset];
285 mfn = pte_to_mfn(page);
286 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
288 offset = l1_table_offset(start_address);
290 if (start_address != (unsigned long)&shared_info) {
291 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
292 mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
293 count++;
294 } else
295 printk("skipped %p\n", start_address);
297 start_address += PAGE_SIZE;
299 if (count == L1_PAGETABLE_ENTRIES || start_address + PAGE_SIZE > end_address)
300 {
301 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
302 {
303 printk("PTE could not be updated\n");
304 do_exit();
305 }
306 count = 0;
307 }
308 }
310 {
311 mmuext_op_t op = {
312 .cmd = MMUEXT_TLB_FLUSH_ALL,
313 };
314 int count;
315 HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
316 }
317 }
319 void mem_test(unsigned long *start_add, unsigned long *end_add)
320 {
321 unsigned long mask = 0x10000;
322 unsigned long *pointer;
324 for(pointer = start_add; pointer < end_add; pointer++)
325 {
326 if(!(((unsigned long)pointer) & 0xfffff))
327 {
328 printk("Writing to %lx\n", pointer);
329 page_walk((unsigned long)pointer);
330 }
331 *pointer = (unsigned long)pointer & ~mask;
332 }
334 for(pointer = start_add; pointer < end_add; pointer++)
335 {
336 if(((unsigned long)pointer & ~mask) != *pointer)
337 printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
338 (unsigned long)pointer,
339 *pointer,
340 ((unsigned long)pointer & ~mask));
341 }
343 }
345 static pgentry_t *get_pgt(unsigned long addr)
346 {
347 unsigned long mfn;
348 pgentry_t *tab;
349 unsigned offset;
351 tab = (pgentry_t *)start_info.pt_base;
352 mfn = virt_to_mfn(start_info.pt_base);
354 #if defined(__x86_64__)
355 offset = l4_table_offset(addr);
356 if (!(tab[offset] & _PAGE_PRESENT))
357 return NULL;
358 mfn = pte_to_mfn(tab[offset]);
359 tab = mfn_to_virt(mfn);
360 #endif
361 offset = l3_table_offset(addr);
362 if (!(tab[offset] & _PAGE_PRESENT))
363 return NULL;
364 mfn = pte_to_mfn(tab[offset]);
365 tab = mfn_to_virt(mfn);
366 offset = l2_table_offset(addr);
367 if (!(tab[offset] & _PAGE_PRESENT))
368 return NULL;
369 mfn = pte_to_mfn(tab[offset]);
370 tab = mfn_to_virt(mfn);
371 offset = l1_table_offset(addr);
372 return &tab[offset];
373 }
375 static pgentry_t *need_pgt(unsigned long addr)
376 {
377 unsigned long mfn;
378 pgentry_t *tab;
379 unsigned long pt_pfn;
380 unsigned offset;
382 tab = (pgentry_t *)start_info.pt_base;
383 mfn = virt_to_mfn(start_info.pt_base);
385 #if defined(__x86_64__)
386 offset = l4_table_offset(addr);
387 if (!(tab[offset] & _PAGE_PRESENT)) {
388 pt_pfn = virt_to_pfn(alloc_page());
389 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
390 }
391 ASSERT(tab[offset] & _PAGE_PRESENT);
392 mfn = pte_to_mfn(tab[offset]);
393 tab = mfn_to_virt(mfn);
394 #endif
395 offset = l3_table_offset(addr);
396 if (!(tab[offset] & _PAGE_PRESENT)) {
397 pt_pfn = virt_to_pfn(alloc_page());
398 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
399 }
400 ASSERT(tab[offset] & _PAGE_PRESENT);
401 mfn = pte_to_mfn(tab[offset]);
402 tab = mfn_to_virt(mfn);
403 offset = l2_table_offset(addr);
404 if (!(tab[offset] & _PAGE_PRESENT)) {
405 pt_pfn = virt_to_pfn(alloc_page());
406 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
407 }
408 ASSERT(tab[offset] & _PAGE_PRESENT);
409 mfn = pte_to_mfn(tab[offset]);
410 tab = mfn_to_virt(mfn);
412 offset = l1_table_offset(addr);
413 return &tab[offset];
414 }
416 static unsigned long demand_map_area_start;
417 #ifdef __x86_64__
418 #define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
419 #else
420 #define DEMAND_MAP_PAGES ((2ULL << 30) / PAGE_SIZE)
421 #endif
423 #ifdef HAVE_LIBC
424 unsigned long heap, brk, heap_mapped, heap_end;
425 #ifdef __x86_64__
426 #define HEAP_PAGES ((128ULL << 30) / PAGE_SIZE)
427 #else
428 #define HEAP_PAGES ((1ULL << 30) / PAGE_SIZE)
429 #endif
430 #endif
432 void arch_init_demand_mapping_area(unsigned long cur_pfn)
433 {
434 cur_pfn++;
436 demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
437 cur_pfn += DEMAND_MAP_PAGES;
438 printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start, pfn_to_virt(cur_pfn));
440 #ifdef HAVE_LIBC
441 cur_pfn++;
442 heap_mapped = brk = heap = (unsigned long) pfn_to_virt(cur_pfn);
443 cur_pfn += HEAP_PAGES;
444 heap_end = (unsigned long) pfn_to_virt(cur_pfn);
445 printk("Heap resides at %lx-%lx.\n", brk, heap_end);
446 #endif
447 }
449 #define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
450 void do_map_frames(unsigned long addr,
451 unsigned long *f, unsigned long n, unsigned long stride,
452 unsigned long increment, domid_t id, int may_fail, unsigned long prot)
453 {
454 pgentry_t *pgt = NULL;
455 unsigned long done = 0;
456 unsigned long i;
457 int rc;
459 while (done < n) {
460 unsigned long todo;
462 if (may_fail)
463 todo = 1;
464 else
465 todo = n - done;
467 if (todo > MAP_BATCH)
468 todo = MAP_BATCH;
470 {
471 mmu_update_t mmu_updates[todo];
473 for (i = 0; i < todo; i++, addr += PAGE_SIZE, pgt++) {
474 if (!pgt || !(addr & L1_MASK))
475 pgt = need_pgt(addr);
476 mmu_updates[i].ptr = virt_to_mach(pgt);
477 mmu_updates[i].val = ((pgentry_t)(f[(done + i) * stride] + (done + i) * increment) << PAGE_SHIFT) | prot;
478 }
480 rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
481 if (rc < 0) {
482 if (may_fail)
483 f[done * stride] |= 0xF0000000;
484 else {
485 printk("Map %ld (%lx, ...) at %p failed: %d.\n", todo, f[done * stride] + done * increment, addr, rc);
486 do_exit();
487 }
488 }
489 }
491 done += todo;
492 }
493 }
495 void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
496 unsigned long increment, unsigned long alignment, domid_t id,
497 int may_fail, unsigned long prot)
498 {
499 unsigned long x;
500 unsigned long y = 0;
502 /* Find a properly aligned run of n contiguous frames */
503 for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
504 unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
505 pgentry_t *pgt = get_pgt(addr);
506 for (y = 0; y < n; y++, addr += PAGE_SIZE) {
507 if (!(addr & L1_MASK))
508 pgt = get_pgt(addr);
509 if (pgt) {
510 if (*pgt & _PAGE_PRESENT)
511 break;
512 pgt++;
513 }
514 }
515 if (y == n)
516 break;
517 }
518 if (y != n) {
519 printk("Failed to find %ld frames!\n", n);
520 return NULL;
521 }
523 /* Found it at x. Map it in. */
524 do_map_frames(demand_map_area_start + x * PAGE_SIZE, f, n, stride, increment, id, may_fail, prot);
526 return (void *)(unsigned long)(demand_map_area_start + x * PAGE_SIZE);
527 }
529 static void clear_bootstrap(void)
530 {
531 xen_pfn_t mfns[] = { virt_to_mfn(&shared_info) };
532 int n = sizeof(mfns)/sizeof(*mfns);
533 pte_t nullpte = { };
535 /* Use first page as the CoW zero page */
536 memset(&_text, 0, PAGE_SIZE);
537 mfn_zero = pfn_to_mfn((unsigned long) &_text);
538 if (HYPERVISOR_update_va_mapping((unsigned long) &_text, nullpte, UVMF_INVLPG))
539 printk("Unable to unmap first page\n");
541 if (free_physical_pages(mfns, n) != n)
542 printk("Unable to free bootstrap pages\n");
543 }
545 void arch_init_p2m(unsigned long max_pfn)
546 {
547 #define L1_P2M_SHIFT 9
548 #define L2_P2M_SHIFT 18
549 #define L3_P2M_SHIFT 27
550 #define L1_P2M_ENTRIES (1 << L1_P2M_SHIFT)
551 #define L2_P2M_ENTRIES (1 << (L2_P2M_SHIFT - L1_P2M_SHIFT))
552 #define L3_P2M_ENTRIES (1 << (L3_P2M_SHIFT - L2_P2M_SHIFT))
553 #define L1_P2M_MASK (L1_P2M_ENTRIES - 1)
554 #define L2_P2M_MASK (L2_P2M_ENTRIES - 1)
555 #define L3_P2M_MASK (L3_P2M_ENTRIES - 1)
557 unsigned long *l1_list, *l2_list, *l3_list;
558 unsigned long pfn;
560 l3_list = (unsigned long *)alloc_page();
561 for(pfn=0; pfn<max_pfn; pfn++)
562 {
563 if(!(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)))
564 {
565 l2_list = (unsigned long*)alloc_page();
566 if((pfn >> L3_P2M_SHIFT) > 0)
567 {
568 printk("Error: Too many pfns.\n");
569 do_exit();
570 }
571 l3_list[(pfn >> L2_P2M_SHIFT)] = virt_to_mfn(l2_list);
572 }
573 if(!(pfn % (L1_P2M_ENTRIES)))
574 {
575 l1_list = (unsigned long*)alloc_page();
576 l2_list[(pfn >> L1_P2M_SHIFT) & L2_P2M_MASK] =
577 virt_to_mfn(l1_list);
578 }
580 l1_list[pfn & L1_P2M_MASK] = pfn_to_mfn(pfn);
581 }
582 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
583 virt_to_mfn(l3_list);
584 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
585 }
587 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p)
588 {
590 unsigned long start_pfn, max_pfn;
592 printk(" _text: %p\n", &_text);
593 printk(" _etext: %p\n", &_etext);
594 printk(" _erodata: %p\n", &_erodata);
595 printk(" _edata: %p\n", &_edata);
596 printk(" stack start: %p\n", stack);
597 printk(" _end: %p\n", &_end);
599 /* First page follows page table pages and 3 more pages (store page etc) */
600 start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
601 start_info.nr_pt_frames + 3;
602 max_pfn = start_info.nr_pages;
604 printk(" start_pfn: %lx\n", start_pfn);
605 printk(" max_pfn: %lx\n", max_pfn);
607 build_pagetable(&start_pfn, &max_pfn);
608 clear_bootstrap();
609 set_readonly(&_text, &_erodata);
611 *start_pfn_p = start_pfn;
612 *max_pfn_p = max_pfn;
613 }