ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/mm/init.c @ 6552:a9873d384da4

Merge.
author adsharma@los-vmm.sc.intel.com
date Thu Aug 25 12:24:48 2005 -0700 (2005-08-25)
parents 112d44270733 fa0754a9f64f
children dfaf788ab18c
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
42 #include <asm-xen/hypervisor.h>
44 extern unsigned long *contiguous_bitmap;
46 #if defined(CONFIG_SWIOTLB)
47 extern void swiotlb_init(void);
48 int swiotlb;
49 EXPORT_SYMBOL(swiotlb);
50 #endif
52 unsigned int __VMALLOC_RESERVE = 128 << 20;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55 unsigned long highstart_pfn, highend_pfn;
57 static int noinline do_test_wp_bit(void);
59 /*
60 * Creates a middle page table and puts a pointer to it in the
61 * given global directory entry. This only returns the gd entry
62 * in non-PAE compilation mode, since the middle layer is folded.
63 */
64 static pmd_t * __init one_md_table_init(pgd_t *pgd)
65 {
66 pud_t *pud;
67 pmd_t *pmd_table;
69 #ifdef CONFIG_X86_PAE
70 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
71 make_page_readonly(pmd_table);
72 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
73 pud = pud_offset(pgd, 0);
74 if (pmd_table != pmd_offset(pud, 0))
75 BUG();
76 #else
77 pud = pud_offset(pgd, 0);
78 pmd_table = pmd_offset(pud, 0);
79 #endif
81 return pmd_table;
82 }
84 /*
85 * Create a page table and place a pointer to it in a middle page
86 * directory entry.
87 */
88 static pte_t * __init one_page_table_init(pmd_t *pmd)
89 {
90 if (pmd_none(*pmd)) {
91 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
92 make_page_readonly(page_table);
93 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
94 if (page_table != pte_offset_kernel(pmd, 0))
95 BUG();
97 return page_table;
98 }
100 return pte_offset_kernel(pmd, 0);
101 }
103 /*
104 * This function initializes a certain range of kernel virtual memory
105 * with new bootmem page tables, everywhere page tables are missing in
106 * the given range.
107 */
109 /*
110 * NOTE: The pagetables are allocated contiguous on the physical space
111 * so we can cache the place of the first one and move around without
112 * checking the pgd every time.
113 */
114 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
115 {
116 pgd_t *pgd;
117 pud_t *pud;
118 pmd_t *pmd;
119 int pgd_idx, pmd_idx;
120 unsigned long vaddr;
122 vaddr = start;
123 pgd_idx = pgd_index(vaddr);
124 pmd_idx = pmd_index(vaddr);
125 pgd = pgd_base + pgd_idx;
127 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
128 if (pgd_none(*pgd))
129 one_md_table_init(pgd);
130 pud = pud_offset(pgd, vaddr);
131 pmd = pmd_offset(pud, vaddr);
132 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
133 if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd))
134 one_page_table_init(pmd);
136 vaddr += PMD_SIZE;
137 }
138 pmd_idx = 0;
139 }
140 }
142 static inline int is_kernel_text(unsigned long addr)
143 {
144 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
145 return 1;
146 return 0;
147 }
149 /*
150 * This maps the physical memory to kernel virtual address space, a total
151 * of max_low_pfn pages, by creating page tables starting from address
152 * PAGE_OFFSET.
153 */
154 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
155 {
156 unsigned long pfn;
157 pgd_t *pgd;
158 pmd_t *pmd;
159 pte_t *pte;
160 int pgd_idx, pmd_idx, pte_ofs;
162 unsigned long max_ram_pfn = xen_start_info.nr_pages;
163 if (max_ram_pfn > max_low_pfn)
164 max_ram_pfn = max_low_pfn;
166 pgd_idx = pgd_index(PAGE_OFFSET);
167 pgd = pgd_base + pgd_idx;
168 pfn = 0;
169 pmd_idx = pmd_index(PAGE_OFFSET);
170 pte_ofs = pte_index(PAGE_OFFSET);
172 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
173 #ifdef CONFIG_XEN
174 /*
175 * Native linux hasn't PAE-paging enabled yet at this
176 * point. When running as xen domain we are in PAE
177 * mode already, thus we can't simply hook a empty
178 * pmd. That would kill the mappings we are currently
179 * using ...
180 */
181 pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
182 #else
183 pmd = one_md_table_init(pgd);
184 #endif
185 if (pfn >= max_low_pfn)
186 continue;
187 pmd += pmd_idx;
188 for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
189 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
190 if (address >= HYPERVISOR_VIRT_START)
191 continue;
193 /* Map with big pages if possible, otherwise create normal page tables. */
194 if (cpu_has_pse) {
195 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
197 if (is_kernel_text(address) || is_kernel_text(address2))
198 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
199 else
200 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
201 pfn += PTRS_PER_PTE;
202 } else {
203 pte = one_page_table_init(pmd);
205 pte += pte_ofs;
206 for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
207 /* XEN: Only map initial RAM allocation. */
208 if ((pfn >= max_ram_pfn) || pte_present(*pte))
209 continue;
210 if (is_kernel_text(address))
211 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
212 else
213 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
214 }
215 pte_ofs = 0;
216 }
217 }
218 pmd_idx = 0;
219 }
220 }
222 static inline int page_kills_ppro(unsigned long pagenr)
223 {
224 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
225 return 1;
226 return 0;
227 }
229 extern int is_available_memory(efi_memory_desc_t *);
231 static inline int page_is_ram(unsigned long pagenr)
232 {
233 int i;
234 unsigned long addr, end;
236 if (efi_enabled) {
237 efi_memory_desc_t *md;
239 for (i = 0; i < memmap.nr_map; i++) {
240 md = &memmap.map[i];
241 if (!is_available_memory(md))
242 continue;
243 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
244 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
246 if ((pagenr >= addr) && (pagenr < end))
247 return 1;
248 }
249 return 0;
250 }
252 for (i = 0; i < e820.nr_map; i++) {
254 if (e820.map[i].type != E820_RAM) /* not usable memory */
255 continue;
256 /*
257 * !!!FIXME!!! Some BIOSen report areas as RAM that
258 * are not. Notably the 640->1Mb area. We need a sanity
259 * check here.
260 */
261 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
262 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
263 if ((pagenr >= addr) && (pagenr < end))
264 return 1;
265 }
266 return 0;
267 }
269 #ifdef CONFIG_HIGHMEM
270 pte_t *kmap_pte;
271 pgprot_t kmap_prot;
273 #define kmap_get_fixmap_pte(vaddr) \
274 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
276 static void __init kmap_init(void)
277 {
278 unsigned long kmap_vstart;
280 /* cache the first kmap pte */
281 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
282 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
284 kmap_prot = PAGE_KERNEL;
285 }
287 static void __init permanent_kmaps_init(pgd_t *pgd_base)
288 {
289 pgd_t *pgd;
290 pud_t *pud;
291 pmd_t *pmd;
292 pte_t *pte;
293 unsigned long vaddr;
295 vaddr = PKMAP_BASE;
296 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
298 pgd = swapper_pg_dir + pgd_index(vaddr);
299 pud = pud_offset(pgd, vaddr);
300 pmd = pmd_offset(pud, vaddr);
301 pte = pte_offset_kernel(pmd, vaddr);
302 pkmap_page_table = pte;
303 }
305 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
306 {
307 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
308 ClearPageReserved(page);
309 set_bit(PG_highmem, &page->flags);
310 set_page_count(page, 1);
311 if (pfn < xen_start_info.nr_pages)
312 __free_page(page);
313 totalhigh_pages++;
314 } else
315 SetPageReserved(page);
316 }
318 #ifndef CONFIG_DISCONTIGMEM
319 static void __init set_highmem_pages_init(int bad_ppro)
320 {
321 int pfn;
322 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
323 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
324 totalram_pages += totalhigh_pages;
325 }
326 #else
327 extern void set_highmem_pages_init(int);
328 #endif /* !CONFIG_DISCONTIGMEM */
330 #else
331 #define kmap_init() do { } while (0)
332 #define permanent_kmaps_init(pgd_base) do { } while (0)
333 #define set_highmem_pages_init(bad_ppro) do { } while (0)
334 #endif /* CONFIG_HIGHMEM */
336 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
337 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
339 #ifndef CONFIG_DISCONTIGMEM
340 #define remap_numa_kva() do {} while (0)
341 #else
342 extern void __init remap_numa_kva(void);
343 #endif
345 pgd_t *swapper_pg_dir;
347 static void __init pagetable_init (void)
348 {
349 unsigned long vaddr;
350 pgd_t *pgd_base = (pgd_t *)xen_start_info.pt_base;
351 int i;
353 swapper_pg_dir = pgd_base;
354 init_mm.pgd = pgd_base;
355 for (i = 0; i < NR_CPUS; i++)
356 per_cpu(cur_pgd, i) = pgd_base;
358 /* Enable PSE if available */
359 if (cpu_has_pse) {
360 set_in_cr4(X86_CR4_PSE);
361 }
363 /* Enable PGE if available */
364 if (cpu_has_pge) {
365 set_in_cr4(X86_CR4_PGE);
366 __PAGE_KERNEL |= _PAGE_GLOBAL;
367 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
368 }
370 init_mm.context.pinned = 1;
371 kernel_physical_mapping_init(pgd_base);
372 remap_numa_kva();
374 /*
375 * Fixed mappings, only the page table structure has to be
376 * created - mappings will be set by set_fixmap():
377 */
378 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
379 page_table_range_init(vaddr, 0, pgd_base);
381 permanent_kmaps_init(pgd_base);
382 }
384 #if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
385 /*
386 * Swap suspend & friends need this for resume because things like the intel-agp
387 * driver might have split up a kernel 4MB mapping.
388 */
389 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
390 __attribute__ ((aligned (PAGE_SIZE)));
392 static inline void save_pg_dir(void)
393 {
394 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
395 }
396 #else
397 static inline void save_pg_dir(void)
398 {
399 }
400 #endif
402 void zap_low_mappings (void)
403 {
404 int i;
406 save_pg_dir();
408 /*
409 * Zap initial low-memory mappings.
410 *
411 * Note that "pgd_clear()" doesn't do it for
412 * us, because pgd_clear() is a no-op on i386.
413 */
414 for (i = 0; i < USER_PTRS_PER_PGD; i++)
415 #if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
416 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
417 #else
418 set_pgd(swapper_pg_dir+i, __pgd(0));
419 #endif
420 flush_tlb_all();
421 }
423 static int disable_nx __initdata = 0;
424 u64 __supported_pte_mask = ~_PAGE_NX;
426 /*
427 * noexec = on|off
428 *
429 * Control non executable mappings.
430 *
431 * on Enable
432 * off Disable
433 */
434 void __init noexec_setup(const char *str)
435 {
436 if (!strncmp(str, "on",2) && cpu_has_nx) {
437 __supported_pte_mask |= _PAGE_NX;
438 disable_nx = 0;
439 } else if (!strncmp(str,"off",3)) {
440 disable_nx = 1;
441 __supported_pte_mask &= ~_PAGE_NX;
442 }
443 }
445 int nx_enabled = 0;
446 #ifdef CONFIG_X86_PAE
448 static void __init set_nx(void)
449 {
450 unsigned int v[4], l, h;
452 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
453 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
454 if ((v[3] & (1 << 20)) && !disable_nx) {
455 rdmsr(MSR_EFER, l, h);
456 l |= EFER_NX;
457 wrmsr(MSR_EFER, l, h);
458 nx_enabled = 1;
459 __supported_pte_mask |= _PAGE_NX;
460 }
461 }
462 }
464 /*
465 * Enables/disables executability of a given kernel page and
466 * returns the previous setting.
467 */
468 int __init set_kernel_exec(unsigned long vaddr, int enable)
469 {
470 pte_t *pte;
471 int ret = 1;
473 if (!nx_enabled)
474 goto out;
476 pte = lookup_address(vaddr);
477 BUG_ON(!pte);
479 if (!pte_exec_kernel(*pte))
480 ret = 0;
482 if (enable)
483 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
484 else
485 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
486 __flush_tlb_all();
487 out:
488 return ret;
489 }
491 #endif
493 /*
494 * paging_init() sets up the page tables - note that the first 8MB are
495 * already mapped by head.S.
496 *
497 * This routines also unmaps the page at virtual kernel address 0, so
498 * that we can trap those pesky NULL-reference errors in the kernel.
499 */
500 void __init paging_init(void)
501 {
502 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
503 int i;
504 #endif
506 #ifdef CONFIG_X86_PAE
507 set_nx();
508 if (nx_enabled)
509 printk("NX (Execute Disable) protection: active\n");
510 #endif
512 pagetable_init();
514 #if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
515 /*
516 * We will bail out later - printk doesn't work right now so
517 * the user would just see a hanging kernel.
518 * when running as xen domain we are already in PAE mode at
519 * this point.
520 */
521 if (cpu_has_pae)
522 set_in_cr4(X86_CR4_PAE);
523 #endif
524 __flush_tlb_all();
526 kmap_init();
528 /* Switch to the real shared_info page, and clear the dummy page. */
529 set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
530 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
531 memset(empty_zero_page, 0, sizeof(empty_zero_page));
533 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
534 /* Setup mapping of lower 1st MB */
535 for (i = 0; i < NR_FIX_ISAMAPS; i++)
536 if (xen_start_info.flags & SIF_PRIVILEGED)
537 set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
538 else
539 __set_fixmap(FIX_ISAMAP_BEGIN - i,
540 virt_to_machine(empty_zero_page),
541 PAGE_KERNEL_RO);
542 #endif
543 }
545 /*
546 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
547 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
548 * used to involve black magic jumps to work around some nasty CPU bugs,
549 * but fortunately the switch to using exceptions got rid of all that.
550 */
552 static void __init test_wp_bit(void)
553 {
554 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
556 /* Any page-aligned address will do, the test is non-destructive */
557 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
558 boot_cpu_data.wp_works_ok = do_test_wp_bit();
559 clear_fixmap(FIX_WP_TEST);
561 if (!boot_cpu_data.wp_works_ok) {
562 printk("No.\n");
563 #ifdef CONFIG_X86_WP_WORKS_OK
564 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
565 #endif
566 } else {
567 printk("Ok.\n");
568 }
569 }
571 static void __init set_max_mapnr_init(void)
572 {
573 #ifdef CONFIG_HIGHMEM
574 num_physpages = highend_pfn;
575 #else
576 num_physpages = max_low_pfn;
577 #endif
578 #ifndef CONFIG_DISCONTIGMEM
579 max_mapnr = num_physpages;
580 #endif
581 }
583 static struct kcore_list kcore_mem, kcore_vmalloc;
585 void __init mem_init(void)
586 {
587 extern int ppro_with_ram_bug(void);
588 int codesize, reservedpages, datasize, initsize;
589 int tmp;
590 int bad_ppro;
591 unsigned long pfn;
593 contiguous_bitmap = alloc_bootmem_low_pages(
594 (max_low_pfn + 2*BITS_PER_LONG) >> 3);
595 BUG_ON(!contiguous_bitmap);
596 memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
598 #if defined(CONFIG_SWIOTLB)
599 swiotlb_init();
600 #endif
602 #ifndef CONFIG_DISCONTIGMEM
603 if (!mem_map)
604 BUG();
605 #endif
607 bad_ppro = ppro_with_ram_bug();
609 #ifdef CONFIG_HIGHMEM
610 /* check that fixmap and pkmap do not overlap */
611 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
612 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
613 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
614 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
615 BUG();
616 }
617 #endif
619 set_max_mapnr_init();
621 #ifdef CONFIG_HIGHMEM
622 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
623 #else
624 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
625 #endif
626 printk("vmalloc area: %lx-%lx, maxmem %lx\n",
627 VMALLOC_START,VMALLOC_END,MAXMEM);
628 BUG_ON(VMALLOC_START > VMALLOC_END);
630 /* this will put all low memory onto the freelists */
631 totalram_pages += free_all_bootmem();
632 /* XEN: init and count low-mem pages outside initial allocation. */
633 for (pfn = xen_start_info.nr_pages; pfn < max_low_pfn; pfn++) {
634 ClearPageReserved(&mem_map[pfn]);
635 set_page_count(&mem_map[pfn], 1);
636 totalram_pages++;
637 }
639 reservedpages = 0;
640 for (tmp = 0; tmp < max_low_pfn; tmp++)
641 /*
642 * Only count reserved RAM pages
643 */
644 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
645 reservedpages++;
647 set_highmem_pages_init(bad_ppro);
649 codesize = (unsigned long) &_etext - (unsigned long) &_text;
650 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
651 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
653 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
654 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
655 VMALLOC_END-VMALLOC_START);
657 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
658 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
659 num_physpages << (PAGE_SHIFT-10),
660 codesize >> 10,
661 reservedpages << (PAGE_SHIFT-10),
662 datasize >> 10,
663 initsize >> 10,
664 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
665 );
667 #ifdef CONFIG_X86_PAE
668 if (!cpu_has_pae)
669 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
670 #endif
671 if (boot_cpu_data.wp_works_ok < 0)
672 test_wp_bit();
674 /*
675 * Subtle. SMP is doing it's boot stuff late (because it has to
676 * fork idle threads) - but it also needs low mappings for the
677 * protected-mode entry to work. We zap these entries only after
678 * the WP-bit has been tested.
679 */
680 #ifndef CONFIG_SMP
681 zap_low_mappings();
682 #endif
683 }
685 kmem_cache_t *pgd_cache;
686 kmem_cache_t *pmd_cache;
688 void __init pgtable_cache_init(void)
689 {
690 if (PTRS_PER_PMD > 1) {
691 pmd_cache = kmem_cache_create("pmd",
692 PTRS_PER_PMD*sizeof(pmd_t),
693 PTRS_PER_PMD*sizeof(pmd_t),
694 0,
695 pmd_ctor,
696 NULL);
697 if (!pmd_cache)
698 panic("pgtable_cache_init(): cannot create pmd cache");
699 }
700 pgd_cache = kmem_cache_create("pgd",
701 #if 0 /* How the heck _this_ works in native linux ??? */
702 PTRS_PER_PGD*sizeof(pgd_t),
703 PTRS_PER_PGD*sizeof(pgd_t),
704 #else
705 PAGE_SIZE,
706 PAGE_SIZE,
707 #endif
708 0,
709 pgd_ctor,
710 pgd_dtor);
711 if (!pgd_cache)
712 panic("pgtable_cache_init(): Cannot create pgd cache");
713 }
715 /*
716 * This function cannot be __init, since exceptions don't work in that
717 * section. Put this after the callers, so that it cannot be inlined.
718 */
719 static int noinline do_test_wp_bit(void)
720 {
721 char tmp_reg;
722 int flag;
724 __asm__ __volatile__(
725 " movb %0,%1 \n"
726 "1: movb %1,%0 \n"
727 " xorl %2,%2 \n"
728 "2: \n"
729 ".section __ex_table,\"a\"\n"
730 " .align 4 \n"
731 " .long 1b,2b \n"
732 ".previous \n"
733 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
734 "=q" (tmp_reg),
735 "=r" (flag)
736 :"2" (1)
737 :"memory");
739 return flag;
740 }
742 void free_initmem(void)
743 {
744 unsigned long addr;
746 addr = (unsigned long)(&__init_begin);
747 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
748 ClearPageReserved(virt_to_page(addr));
749 set_page_count(virt_to_page(addr), 1);
750 memset((void *)addr, 0xcc, PAGE_SIZE);
751 free_page(addr);
752 totalram_pages++;
753 }
754 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
755 }
757 #ifdef CONFIG_BLK_DEV_INITRD
758 void free_initrd_mem(unsigned long start, unsigned long end)
759 {
760 if (start < end)
761 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
762 for (; start < end; start += PAGE_SIZE) {
763 ClearPageReserved(virt_to_page(start));
764 set_page_count(virt_to_page(start), 1);
765 free_page(start);
766 totalram_pages++;
767 }
768 }
769 #endif