ia64/xen-unstable

view extras/mini-os/arch/x86/mm.c @ 17042:a905c582a406

Add stubdomain support. See stubdom/README for usage details.

- Move PAGE_SIZE and STACK_SIZE into __PAGE_SIZE and __STACK_SIZE in
arch_limits.h so as to permit getting them from there without
pulling all the internal Mini-OS defines.
- Setup a xen-elf cross-compilation environment in stubdom/cross-root
- Add a POSIX layer on top of Mini-OS by linking against the newlib C
library and lwIP, and implementing the Unixish part in mini-os/lib/sys.c
- Cross-compile zlib and libpci too.
- Add an xs.h-compatible layer on top of Mini-OS' xenbus.
- Cross-compile libxc with an additional xc_minios.c and a few things
disabled.
- Cross-compile ioemu with an additional block-vbd, but without sound,
tpm and other details. A few hacks are needed:
- Align ide and scsi buffers at least on sector size to permit
direct transmission to the block backend. While we are at it, just
page-align it to possibly save a segment. Also, limit the scsi
buffer size because of limitations of the block paravirtualization
protocol.
- Allocate big tables dynamically rather that letting them go to
bss: when Mini-OS gets installed in memory, bss is not lazily
allocated, and doing so during Mini-OS is unnecessarily trick while
we can simply use malloc.
- Had to change the Mini-OS compilation somehow, so as to export
Mini-OS compilation flags to the Makefiles of libxc and ioemu.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 12 14:35:39 2008 +0000 (2008-02-12)
parents 56307d5809cc
children 681cfd0eda78
line source
1 /*
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: mm.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos
10 *
11 * Date: Aug 2003, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: memory management related functions
15 * contains buddy page allocator from Xen.
16 *
17 ****************************************************************************
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this software and associated documentation files (the "Software"), to
20 * deal in the Software without restriction, including without limitation the
21 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
22 * sell copies of the Software, and to permit persons to whom the Software is
23 * furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
34 * DEALINGS IN THE SOFTWARE.
35 */
37 #include <os.h>
38 #include <hypervisor.h>
39 #include <mm.h>
40 #include <types.h>
41 #include <lib.h>
42 #include <xmalloc.h>
43 #include <xen/memory.h>
45 #ifdef MM_DEBUG
46 #define DEBUG(_f, _a...) \
47 printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
48 #else
49 #define DEBUG(_f, _a...) ((void)0)
50 #endif
52 unsigned long *phys_to_machine_mapping;
53 unsigned long mfn_zero;
54 extern char stack[];
55 extern void page_walk(unsigned long virt_addr);
57 void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
58 unsigned long offset, unsigned long level)
59 {
60 pgentry_t *tab = (pgentry_t *)start_info.pt_base;
61 unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
62 unsigned long prot_e, prot_t, pincmd;
63 mmu_update_t mmu_updates[1];
64 struct mmuext_op pin_request;
66 prot_e = prot_t = pincmd = 0;
67 DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
68 "prev_l_mfn=%lx, offset=%lx",
69 level, *pt_pfn, prev_l_mfn, offset);
71 /* We need to clear the page, otherwise we might fail to map it
72 as a page table page */
73 memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE);
75 switch ( level )
76 {
77 case L1_FRAME:
78 prot_e = L1_PROT;
79 prot_t = L2_PROT;
80 pincmd = MMUEXT_PIN_L1_TABLE;
81 break;
82 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
83 case L2_FRAME:
84 prot_e = L2_PROT;
85 prot_t = L3_PROT;
86 pincmd = MMUEXT_PIN_L2_TABLE;
87 break;
88 #endif
89 #if defined(__x86_64__)
90 case L3_FRAME:
91 prot_e = L3_PROT;
92 prot_t = L4_PROT;
93 pincmd = MMUEXT_PIN_L3_TABLE;
94 break;
95 #endif
96 default:
97 printk("new_pt_frame() called with invalid level number %d\n", level);
98 do_exit();
99 break;
100 }
102 /* Update the entry */
103 #if defined(__x86_64__)
104 tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
105 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
106 #endif
107 #if defined(CONFIG_X86_PAE)
108 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
109 #endif
111 mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] & PAGE_MASK) +
112 sizeof(pgentry_t) * l1_table_offset(pt_page);
113 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
114 (prot_e & ~_PAGE_RW);
115 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
116 {
117 printk("PTE for new page table page could not be updated\n");
118 do_exit();
119 }
121 /* Pin the page to provide correct protection */
122 pin_request.cmd = pincmd;
123 pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
124 if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
125 {
126 printk("ERROR: pinning failed\n");
127 do_exit();
128 }
130 /* Now fill the new page table page with entries.
131 Update the page directory as well. */
132 mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
133 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
134 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
135 {
136 printk("ERROR: mmu_update failed\n");
137 do_exit();
138 }
140 *pt_pfn += 1;
141 }
143 /* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
144 static int need_pt_frame(unsigned long virt_address, int level)
145 {
146 unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
147 #if defined(__x86_64__)
148 unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
149 #else
150 unsigned long hyp_virt_end = 0xffffffff;
151 #endif
153 /* In general frames will _not_ be needed if they were already
154 allocated to map the hypervisor into our VA space */
155 #if defined(__x86_64__)
156 if(level == L3_FRAME)
157 {
158 if(l4_table_offset(virt_address) >=
159 l4_table_offset(hyp_virt_start) &&
160 l4_table_offset(virt_address) <=
161 l4_table_offset(hyp_virt_end))
162 return 0;
163 return 1;
164 } else
165 #endif
167 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
168 if(level == L2_FRAME)
169 {
170 #if defined(__x86_64__)
171 if(l4_table_offset(virt_address) >=
172 l4_table_offset(hyp_virt_start) &&
173 l4_table_offset(virt_address) <=
174 l4_table_offset(hyp_virt_end))
175 #endif
176 if(l3_table_offset(virt_address) >=
177 l3_table_offset(hyp_virt_start) &&
178 l3_table_offset(virt_address) <=
179 l3_table_offset(hyp_virt_end))
180 return 0;
182 return 1;
183 } else
184 #endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
186 /* Always need l1 frames */
187 if(level == L1_FRAME)
188 return 1;
190 printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
191 level, hyp_virt_start, hyp_virt_end);
192 return -1;
193 }
195 void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
196 {
197 unsigned long start_address, end_address;
198 unsigned long pfn_to_map, pt_pfn = *start_pfn;
199 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
200 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
201 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
202 unsigned long offset;
203 int count = 0;
205 pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
207 if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
208 {
209 printk("WARNING: Mini-OS trying to use Xen virtual space. "
210 "Truncating memory from %dMB to ",
211 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
212 *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
213 printk("%dMB\n",
214 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
215 }
217 start_address = (unsigned long)pfn_to_virt(pfn_to_map);
218 end_address = (unsigned long)pfn_to_virt(*max_pfn);
220 /* We worked out the virtual memory range to map, now mapping loop */
221 printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
223 while(start_address < end_address)
224 {
225 tab = (pgentry_t *)start_info.pt_base;
226 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
228 #if defined(__x86_64__)
229 offset = l4_table_offset(start_address);
230 /* Need new L3 pt frame */
231 if(!(start_address & L3_MASK))
232 if(need_pt_frame(start_address, L3_FRAME))
233 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
235 page = tab[offset];
236 mfn = pte_to_mfn(page);
237 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
238 #endif
239 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
240 offset = l3_table_offset(start_address);
241 /* Need new L2 pt frame */
242 if(!(start_address & L2_MASK))
243 if(need_pt_frame(start_address, L2_FRAME))
244 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
246 page = tab[offset];
247 mfn = pte_to_mfn(page);
248 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
249 #endif
250 offset = l2_table_offset(start_address);
251 /* Need new L1 pt frame */
252 if(!(start_address & L1_MASK))
253 if(need_pt_frame(start_address, L1_FRAME))
254 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
256 page = tab[offset];
257 mfn = pte_to_mfn(page);
258 offset = l1_table_offset(start_address);
260 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
261 mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
262 count++;
263 if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
264 {
265 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
266 {
267 printk("PTE could not be updated\n");
268 do_exit();
269 }
270 count = 0;
271 }
272 start_address += PAGE_SIZE;
273 }
275 *start_pfn = pt_pfn;
276 }
278 extern void shared_info;
279 static void set_readonly(void *text, void *etext)
280 {
281 unsigned long start_address = ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
282 unsigned long end_address = (unsigned long) etext;
283 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
284 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
285 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
286 unsigned long offset;
287 int count = 0;
289 printk("setting %p-%p readonly\n", text, etext);
291 while (start_address + PAGE_SIZE <= end_address) {
292 tab = (pgentry_t *)start_info.pt_base;
293 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
295 #if defined(__x86_64__)
296 offset = l4_table_offset(start_address);
297 page = tab[offset];
298 mfn = pte_to_mfn(page);
299 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
300 #endif
301 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
302 offset = l3_table_offset(start_address);
303 page = tab[offset];
304 mfn = pte_to_mfn(page);
305 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
306 #endif
307 offset = l2_table_offset(start_address);
308 page = tab[offset];
309 mfn = pte_to_mfn(page);
310 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
312 offset = l1_table_offset(start_address);
314 if (start_address != (unsigned long)&shared_info) {
315 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
316 mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
317 count++;
318 } else
319 printk("skipped %p\n", start_address);
321 start_address += PAGE_SIZE;
323 if (count == L1_PAGETABLE_ENTRIES || start_address + PAGE_SIZE > end_address)
324 {
325 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
326 {
327 printk("PTE could not be updated\n");
328 do_exit();
329 }
330 count = 0;
331 }
332 }
334 {
335 mmuext_op_t op = {
336 .cmd = MMUEXT_TLB_FLUSH_ALL,
337 };
338 int count;
339 HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
340 }
341 }
343 void mem_test(unsigned long *start_add, unsigned long *end_add)
344 {
345 unsigned long mask = 0x10000;
346 unsigned long *pointer;
348 for(pointer = start_add; pointer < end_add; pointer++)
349 {
350 if(!(((unsigned long)pointer) & 0xfffff))
351 {
352 printk("Writing to %lx\n", pointer);
353 page_walk((unsigned long)pointer);
354 }
355 *pointer = (unsigned long)pointer & ~mask;
356 }
358 for(pointer = start_add; pointer < end_add; pointer++)
359 {
360 if(((unsigned long)pointer & ~mask) != *pointer)
361 printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
362 (unsigned long)pointer,
363 *pointer,
364 ((unsigned long)pointer & ~mask));
365 }
367 }
369 static pgentry_t *get_pgt(unsigned long addr)
370 {
371 unsigned long mfn;
372 pgentry_t *tab;
373 unsigned offset;
375 tab = (pgentry_t *)start_info.pt_base;
376 mfn = virt_to_mfn(start_info.pt_base);
378 #if defined(__x86_64__)
379 offset = l4_table_offset(addr);
380 if (!(tab[offset] & _PAGE_PRESENT))
381 return NULL;
382 mfn = pte_to_mfn(tab[offset]);
383 tab = mfn_to_virt(mfn);
384 #endif
385 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
386 offset = l3_table_offset(addr);
387 if (!(tab[offset] & _PAGE_PRESENT))
388 return NULL;
389 mfn = pte_to_mfn(tab[offset]);
390 tab = mfn_to_virt(mfn);
391 #endif
392 offset = l2_table_offset(addr);
393 if (!(tab[offset] & _PAGE_PRESENT))
394 return NULL;
395 mfn = pte_to_mfn(tab[offset]);
396 tab = mfn_to_virt(mfn);
397 offset = l1_table_offset(addr);
398 return &tab[offset];
399 }
401 static pgentry_t *need_pgt(unsigned long addr)
402 {
403 unsigned long mfn;
404 pgentry_t *tab;
405 unsigned long pt_pfn;
406 unsigned offset;
408 tab = (pgentry_t *)start_info.pt_base;
409 mfn = virt_to_mfn(start_info.pt_base);
411 #if defined(__x86_64__)
412 offset = l4_table_offset(addr);
413 if (!(tab[offset] & _PAGE_PRESENT)) {
414 pt_pfn = virt_to_pfn(alloc_page());
415 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
416 }
417 ASSERT(tab[offset] & _PAGE_PRESENT);
418 mfn = pte_to_mfn(tab[offset]);
419 tab = mfn_to_virt(mfn);
420 #endif
421 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
422 offset = l3_table_offset(addr);
423 if (!(tab[offset] & _PAGE_PRESENT)) {
424 pt_pfn = virt_to_pfn(alloc_page());
425 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
426 }
427 ASSERT(tab[offset] & _PAGE_PRESENT);
428 mfn = pte_to_mfn(tab[offset]);
429 tab = mfn_to_virt(mfn);
430 #endif
431 offset = l2_table_offset(addr);
432 if (!(tab[offset] & _PAGE_PRESENT)) {
433 pt_pfn = virt_to_pfn(alloc_page());
434 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
435 }
436 ASSERT(tab[offset] & _PAGE_PRESENT);
437 mfn = pte_to_mfn(tab[offset]);
438 tab = mfn_to_virt(mfn);
440 offset = l1_table_offset(addr);
441 return &tab[offset];
442 }
444 static unsigned long demand_map_area_start;
445 #ifdef __x86_64__
446 #define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
447 #else
448 #define DEMAND_MAP_PAGES ((2ULL << 30) / PAGE_SIZE)
449 #endif
451 #ifdef HAVE_LIBC
452 unsigned long heap, brk, heap_mapped, heap_end;
453 #ifdef __x86_64__
454 #define HEAP_PAGES ((128ULL << 30) / PAGE_SIZE)
455 #else
456 #define HEAP_PAGES ((1ULL << 30) / PAGE_SIZE)
457 #endif
458 #endif
460 void arch_init_demand_mapping_area(unsigned long cur_pfn)
461 {
462 cur_pfn++;
464 demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
465 cur_pfn += DEMAND_MAP_PAGES;
466 printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start, pfn_to_virt(cur_pfn));
468 #ifdef HAVE_LIBC
469 cur_pfn++;
470 heap_mapped = brk = heap = (unsigned long) pfn_to_virt(cur_pfn);
471 cur_pfn += HEAP_PAGES;
472 heap_end = (unsigned long) pfn_to_virt(cur_pfn);
473 printk("Heap resides at %lx-%lx.\n", brk, heap_end);
474 #endif
475 }
477 #define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
478 void do_map_frames(unsigned long addr,
479 unsigned long *f, unsigned long n, unsigned long stride,
480 unsigned long increment, domid_t id, int may_fail, unsigned long prot)
481 {
482 pgentry_t *pgt = NULL;
483 unsigned long done = 0;
484 unsigned long i;
485 int rc;
487 while (done < n) {
488 unsigned long todo;
490 if (may_fail)
491 todo = 1;
492 else
493 todo = n - done;
495 if (todo > MAP_BATCH)
496 todo = MAP_BATCH;
498 {
499 mmu_update_t mmu_updates[todo];
501 for (i = 0; i < todo; i++, addr += PAGE_SIZE, pgt++) {
502 if (!pgt || !(addr & L1_MASK))
503 pgt = need_pgt(addr);
504 mmu_updates[i].ptr = virt_to_mach(pgt);
505 mmu_updates[i].val = ((f[(done + i) * stride] + (done + i) * increment) << PAGE_SHIFT) | prot;
506 }
508 rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
509 if (rc < 0) {
510 if (may_fail)
511 f[done * stride] |= 0xF0000000;
512 else {
513 printk("Map %ld (%lx, ...) at %p failed: %d.\n", todo, f[done * stride] + done * increment, addr, rc);
514 do_exit();
515 }
516 }
517 }
519 done += todo;
520 }
521 }
523 void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
524 unsigned long increment, unsigned long alignment, domid_t id,
525 int may_fail, unsigned long prot)
526 {
527 unsigned long x;
528 unsigned long y = 0;
530 /* Find a properly aligned run of n contiguous frames */
531 for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
532 unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
533 pgentry_t *pgt = get_pgt(addr);
534 for (y = 0; y < n; y++, addr += PAGE_SIZE) {
535 if (!(addr & L1_MASK))
536 pgt = get_pgt(addr);
537 if (pgt) {
538 if (*pgt & _PAGE_PRESENT)
539 break;
540 pgt++;
541 }
542 }
543 if (y == n)
544 break;
545 }
546 if (y != n) {
547 printk("Failed to find %ld frames!\n", n);
548 return NULL;
549 }
551 /* Found it at x. Map it in. */
552 do_map_frames(demand_map_area_start + x * PAGE_SIZE, f, n, stride, increment, id, may_fail, prot);
554 return (void *)(unsigned long)(demand_map_area_start + x * PAGE_SIZE);
555 }
557 static void clear_bootstrap(void)
558 {
559 struct xen_memory_reservation reservation;
560 xen_pfn_t mfns[] = { virt_to_mfn(&shared_info) };
561 int n = sizeof(mfns)/sizeof(*mfns);
562 pte_t nullpte = { };
564 /* Use first page as the CoW zero page */
565 memset(&_text, 0, PAGE_SIZE);
566 mfn_zero = pfn_to_mfn((unsigned long) &_text);
567 if (HYPERVISOR_update_va_mapping((unsigned long) &_text, nullpte, UVMF_INVLPG))
568 printk("Unable to unmap first page\n");
570 set_xen_guest_handle(reservation.extent_start, mfns);
571 reservation.nr_extents = n;
572 reservation.extent_order = 0;
573 reservation.domid = DOMID_SELF;
574 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != n)
575 printk("Unable to free bootstrap pages\n");
576 }
578 void arch_init_p2m(unsigned long max_pfn)
579 {
580 #define L1_P2M_SHIFT 9
581 #define L2_P2M_SHIFT 18
582 #define L3_P2M_SHIFT 27
583 #define L1_P2M_ENTRIES (1 << L1_P2M_SHIFT)
584 #define L2_P2M_ENTRIES (1 << (L2_P2M_SHIFT - L1_P2M_SHIFT))
585 #define L3_P2M_ENTRIES (1 << (L3_P2M_SHIFT - L2_P2M_SHIFT))
586 #define L1_P2M_MASK (L1_P2M_ENTRIES - 1)
587 #define L2_P2M_MASK (L2_P2M_ENTRIES - 1)
588 #define L3_P2M_MASK (L3_P2M_ENTRIES - 1)
590 unsigned long *l1_list, *l2_list, *l3_list;
591 unsigned long pfn;
593 l3_list = (unsigned long *)alloc_page();
594 for(pfn=0; pfn<max_pfn; pfn++)
595 {
596 if(!(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)))
597 {
598 l2_list = (unsigned long*)alloc_page();
599 if((pfn >> L3_P2M_SHIFT) > 0)
600 {
601 printk("Error: Too many pfns.\n");
602 do_exit();
603 }
604 l3_list[(pfn >> L2_P2M_SHIFT)] = virt_to_mfn(l2_list);
605 }
606 if(!(pfn % (L1_P2M_ENTRIES)))
607 {
608 l1_list = (unsigned long*)alloc_page();
609 l2_list[(pfn >> L1_P2M_SHIFT) & L2_P2M_MASK] =
610 virt_to_mfn(l1_list);
611 }
613 l1_list[pfn & L1_P2M_MASK] = pfn_to_mfn(pfn);
614 }
615 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
616 virt_to_mfn(l3_list);
617 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
618 }
620 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p)
621 {
623 unsigned long start_pfn, max_pfn;
625 printk(" _text: %p\n", &_text);
626 printk(" _etext: %p\n", &_etext);
627 printk(" _erodata: %p\n", &_erodata);
628 printk(" _edata: %p\n", &_edata);
629 printk(" stack start: %p\n", stack);
630 printk(" _end: %p\n", &_end);
632 /* First page follows page table pages and 3 more pages (store page etc) */
633 start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
634 start_info.nr_pt_frames + 3;
635 max_pfn = start_info.nr_pages;
637 printk(" start_pfn: %lx\n", start_pfn);
638 printk(" max_pfn: %lx\n", max_pfn);
640 build_pagetable(&start_pfn, &max_pfn);
641 clear_bootstrap();
642 set_readonly(&_text, &_erodata);
644 *start_pfn_p = start_pfn;
645 *max_pfn_p = max_pfn;
646 }