ia64/xen-unstable

view extras/mini-os/arch/x86/mm.c @ 16490:3fdbdd131fc7

[Mini-OS] Catch NULL dereferences

Unmap page 0 (only used early at boot) so as to catch NULL dereferences.

Signed-off-by: Samuel Thibault <samuel.thibault@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 28 12:42:17 2007 +0000 (2007-11-28)
parents bf85b467ee89
children 8101b65014e8
line source
1 /*
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: mm.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos
10 *
11 * Date: Aug 2003, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: memory management related functions
15 * contains buddy page allocator from Xen.
16 *
17 ****************************************************************************
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this software and associated documentation files (the "Software"), to
20 * deal in the Software without restriction, including without limitation the
21 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
22 * sell copies of the Software, and to permit persons to whom the Software is
23 * furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
34 * DEALINGS IN THE SOFTWARE.
35 */
37 #include <os.h>
38 #include <hypervisor.h>
39 #include <mm.h>
40 #include <types.h>
41 #include <lib.h>
42 #include <xmalloc.h>
44 #ifdef MM_DEBUG
45 #define DEBUG(_f, _a...) \
46 printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
47 #else
48 #define DEBUG(_f, _a...) ((void)0)
49 #endif
51 unsigned long *phys_to_machine_mapping;
52 extern char stack[];
53 extern void page_walk(unsigned long virt_addr);
55 void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
56 unsigned long offset, unsigned long level)
57 {
58 pgentry_t *tab = (pgentry_t *)start_info.pt_base;
59 unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
60 unsigned long prot_e, prot_t, pincmd;
61 mmu_update_t mmu_updates[1];
62 struct mmuext_op pin_request;
64 prot_e = prot_t = pincmd = 0;
65 DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
66 "prev_l_mfn=%lx, offset=%lx",
67 level, *pt_pfn, prev_l_mfn, offset);
69 /* We need to clear the page, otherwise we might fail to map it
70 as a page table page */
71 memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE);
73 switch ( level )
74 {
75 case L1_FRAME:
76 prot_e = L1_PROT;
77 prot_t = L2_PROT;
78 pincmd = MMUEXT_PIN_L1_TABLE;
79 break;
80 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
81 case L2_FRAME:
82 prot_e = L2_PROT;
83 prot_t = L3_PROT;
84 pincmd = MMUEXT_PIN_L2_TABLE;
85 break;
86 #endif
87 #if defined(__x86_64__)
88 case L3_FRAME:
89 prot_e = L3_PROT;
90 prot_t = L4_PROT;
91 pincmd = MMUEXT_PIN_L3_TABLE;
92 break;
93 #endif
94 default:
95 printk("new_pt_frame() called with invalid level number %d\n", level);
96 do_exit();
97 break;
98 }
100 /* Update the entry */
101 #if defined(__x86_64__)
102 tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
103 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
104 #endif
105 #if defined(CONFIG_X86_PAE)
106 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
107 #endif
109 mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] & PAGE_MASK) +
110 sizeof(pgentry_t) * l1_table_offset(pt_page);
111 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
112 (prot_e & ~_PAGE_RW);
113 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
114 {
115 printk("PTE for new page table page could not be updated\n");
116 do_exit();
117 }
119 /* Pin the page to provide correct protection */
120 pin_request.cmd = pincmd;
121 pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
122 if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
123 {
124 printk("ERROR: pinning failed\n");
125 do_exit();
126 }
128 /* Now fill the new page table page with entries.
129 Update the page directory as well. */
130 mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
131 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
132 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
133 {
134 printk("ERROR: mmu_update failed\n");
135 do_exit();
136 }
138 *pt_pfn += 1;
139 }
141 /* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
142 static int need_pt_frame(unsigned long virt_address, int level)
143 {
144 unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
145 #if defined(__x86_64__)
146 unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
147 #else
148 unsigned long hyp_virt_end = 0xffffffff;
149 #endif
151 /* In general frames will _not_ be needed if they were already
152 allocated to map the hypervisor into our VA space */
153 #if defined(__x86_64__)
154 if(level == L3_FRAME)
155 {
156 if(l4_table_offset(virt_address) >=
157 l4_table_offset(hyp_virt_start) &&
158 l4_table_offset(virt_address) <=
159 l4_table_offset(hyp_virt_end))
160 return 0;
161 return 1;
162 } else
163 #endif
165 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
166 if(level == L2_FRAME)
167 {
168 #if defined(__x86_64__)
169 if(l4_table_offset(virt_address) >=
170 l4_table_offset(hyp_virt_start) &&
171 l4_table_offset(virt_address) <=
172 l4_table_offset(hyp_virt_end))
173 #endif
174 if(l3_table_offset(virt_address) >=
175 l3_table_offset(hyp_virt_start) &&
176 l3_table_offset(virt_address) <=
177 l3_table_offset(hyp_virt_end))
178 return 0;
180 return 1;
181 } else
182 #endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
184 /* Always need l1 frames */
185 if(level == L1_FRAME)
186 return 1;
188 printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
189 level, hyp_virt_start, hyp_virt_end);
190 return -1;
191 }
193 void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
194 {
195 unsigned long start_address, end_address;
196 unsigned long pfn_to_map, pt_pfn = *start_pfn;
197 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
198 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
199 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
200 unsigned long offset;
201 int count = 0;
203 pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
205 if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
206 {
207 printk("WARNING: Mini-OS trying to use Xen virtual space. "
208 "Truncating memory from %dMB to ",
209 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
210 *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
211 printk("%dMB\n",
212 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
213 }
215 start_address = (unsigned long)pfn_to_virt(pfn_to_map);
216 end_address = (unsigned long)pfn_to_virt(*max_pfn);
218 /* We worked out the virtual memory range to map, now mapping loop */
219 printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
221 while(start_address < end_address)
222 {
223 tab = (pgentry_t *)start_info.pt_base;
224 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
226 #if defined(__x86_64__)
227 offset = l4_table_offset(start_address);
228 /* Need new L3 pt frame */
229 if(!(start_address & L3_MASK))
230 if(need_pt_frame(start_address, L3_FRAME))
231 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
233 page = tab[offset];
234 mfn = pte_to_mfn(page);
235 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
236 #endif
237 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
238 offset = l3_table_offset(start_address);
239 /* Need new L2 pt frame */
240 if(!(start_address & L2_MASK))
241 if(need_pt_frame(start_address, L2_FRAME))
242 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
244 page = tab[offset];
245 mfn = pte_to_mfn(page);
246 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
247 #endif
248 offset = l2_table_offset(start_address);
249 /* Need new L1 pt frame */
250 if(!(start_address & L1_MASK))
251 if(need_pt_frame(start_address, L1_FRAME))
252 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
254 page = tab[offset];
255 mfn = pte_to_mfn(page);
256 offset = l1_table_offset(start_address);
258 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
259 mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
260 count++;
261 if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
262 {
263 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
264 {
265 printk("PTE could not be updated\n");
266 do_exit();
267 }
268 count = 0;
269 }
270 start_address += PAGE_SIZE;
271 }
273 if (HYPERVISOR_update_va_mapping(0, (pte_t) {}, UVMF_INVLPG))
274 printk("Unable to unmap page 0\n");
276 *start_pfn = pt_pfn;
277 }
280 void mem_test(unsigned long *start_add, unsigned long *end_add)
281 {
282 unsigned long mask = 0x10000;
283 unsigned long *pointer;
285 for(pointer = start_add; pointer < end_add; pointer++)
286 {
287 if(!(((unsigned long)pointer) & 0xfffff))
288 {
289 printk("Writing to %lx\n", pointer);
290 page_walk((unsigned long)pointer);
291 }
292 *pointer = (unsigned long)pointer & ~mask;
293 }
295 for(pointer = start_add; pointer < end_add; pointer++)
296 {
297 if(((unsigned long)pointer & ~mask) != *pointer)
298 printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
299 (unsigned long)pointer,
300 *pointer,
301 ((unsigned long)pointer & ~mask));
302 }
304 }
306 static pgentry_t *demand_map_pgt;
307 static void *demand_map_area_start;
309 void arch_init_demand_mapping_area(unsigned long max_pfn)
310 {
311 unsigned long mfn;
312 pgentry_t *tab;
313 unsigned long start_addr;
314 unsigned long pt_pfn;
315 unsigned offset;
317 /* Round up to four megs. + 1024 rather than + 1023 since we want
318 to be sure we don't end up in the same place we started. */
319 max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1);
320 if (max_pfn == 0 ||
321 (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >=
322 HYPERVISOR_VIRT_START) {
323 printk("Too much memory; no room for demand map hole.\n");
324 do_exit();
325 }
327 demand_map_area_start = pfn_to_virt(max_pfn);
328 printk("Demand map pfns start at %lx (%p).\n", max_pfn,
329 demand_map_area_start);
330 start_addr = (unsigned long)demand_map_area_start;
332 tab = (pgentry_t *)start_info.pt_base;
333 mfn = virt_to_mfn(start_info.pt_base);
334 pt_pfn = virt_to_pfn(alloc_page());
336 #if defined(__x86_64__)
337 offset = l4_table_offset(start_addr);
338 if (!(tab[offset] & _PAGE_PRESENT)) {
339 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
340 pt_pfn = virt_to_pfn(alloc_page());
341 }
342 ASSERT(tab[offset] & _PAGE_PRESENT);
343 mfn = pte_to_mfn(tab[offset]);
344 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
345 #endif
346 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
347 offset = l3_table_offset(start_addr);
348 if (!(tab[offset] & _PAGE_PRESENT)) {
349 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
350 pt_pfn = virt_to_pfn(alloc_page());
351 }
352 ASSERT(tab[offset] & _PAGE_PRESENT);
353 mfn = pte_to_mfn(tab[offset]);
354 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
355 #endif
356 offset = l2_table_offset(start_addr);
357 if (tab[offset] & _PAGE_PRESENT) {
358 printk("Demand map area already has a page table covering it?\n");
359 BUG();
360 }
361 demand_map_pgt = pfn_to_virt(pt_pfn);
362 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
363 ASSERT(tab[offset] & _PAGE_PRESENT);
364 printk("Initialised demand area.\n");
365 }
367 void *map_frames(unsigned long *f, unsigned long n)
368 {
369 unsigned long x;
370 unsigned long y = 0;
371 mmu_update_t mmu_updates[16];
372 int rc;
374 if (n > 16) {
375 printk("Tried to map too many (%ld) frames at once.\n", n);
376 return NULL;
377 }
379 /* Find a run of n contiguous frames */
380 for (x = 0; x <= 1024 - n; x += y + 1) {
381 for (y = 0; y < n; y++)
382 if (demand_map_pgt[x+y] & _PAGE_PRESENT)
383 break;
384 if (y == n)
385 break;
386 }
387 if (y != n) {
388 printk("Failed to map %ld frames!\n", n);
389 return NULL;
390 }
392 /* Found it at x. Map it in. */
393 for (y = 0; y < n; y++) {
394 mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]);
395 mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT;
396 }
398 rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF);
399 if (rc < 0) {
400 printk("Map %ld failed: %d.\n", n, rc);
401 return NULL;
402 } else {
403 return (void *)(unsigned long)((unsigned long)demand_map_area_start +
404 x * PAGE_SIZE);
405 }
406 }
409 void arch_init_p2m(unsigned long max_pfn)
410 {
411 #define L1_P2M_SHIFT 9
412 #define L2_P2M_SHIFT 18
413 #define L3_P2M_SHIFT 27
414 #define L1_P2M_ENTRIES (1 << L1_P2M_SHIFT)
415 #define L2_P2M_ENTRIES (1 << (L2_P2M_SHIFT - L1_P2M_SHIFT))
416 #define L3_P2M_ENTRIES (1 << (L3_P2M_SHIFT - L2_P2M_SHIFT))
417 #define L1_P2M_MASK (L1_P2M_ENTRIES - 1)
418 #define L2_P2M_MASK (L2_P2M_ENTRIES - 1)
419 #define L3_P2M_MASK (L3_P2M_ENTRIES - 1)
421 unsigned long *l1_list, *l2_list, *l3_list;
422 unsigned long pfn;
424 l3_list = (unsigned long *)alloc_page();
425 for(pfn=0; pfn<max_pfn; pfn++)
426 {
427 if(!(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)))
428 {
429 l2_list = (unsigned long*)alloc_page();
430 if((pfn >> L3_P2M_SHIFT) > 0)
431 {
432 printk("Error: Too many pfns.\n");
433 do_exit();
434 }
435 l3_list[(pfn >> L2_P2M_SHIFT)] = virt_to_mfn(l2_list);
436 }
437 if(!(pfn % (L1_P2M_ENTRIES)))
438 {
439 l1_list = (unsigned long*)alloc_page();
440 l2_list[(pfn >> L1_P2M_SHIFT) & L2_P2M_MASK] =
441 virt_to_mfn(l1_list);
442 }
444 l1_list[pfn & L1_P2M_MASK] = pfn_to_mfn(pfn);
445 }
446 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
447 virt_to_mfn(l3_list);
448 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
449 }
451 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p)
452 {
454 unsigned long start_pfn, max_pfn;
456 printk(" _text: %p\n", &_text);
457 printk(" _etext: %p\n", &_etext);
458 printk(" _edata: %p\n", &_edata);
459 printk(" stack start: %p\n", stack);
460 printk(" _end: %p\n", &_end);
462 /* First page follows page table pages and 3 more pages (store page etc) */
463 start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
464 start_info.nr_pt_frames + 3;
465 max_pfn = start_info.nr_pages;
467 printk(" start_pfn: %lx\n", start_pfn);
468 printk(" max_pfn: %lx\n", max_pfn);
470 build_pagetable(&start_pfn, &max_pfn);
472 *start_pfn_p = start_pfn;
473 *max_pfn_p = max_pfn;
474 }