ia64/linux-2.6.18-xen.hg

view arch/i386/mm/init-xen.c @ 708:e410857fd83c

Remove contiguous_bitmap[] as it's no longer needed.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 22 14:55:29 2008 +0100 (2008-10-22)
parents f619448beab5
children
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/poison.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/scatterlist.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/dma.h>
41 #include <asm/fixmap.h>
42 #include <asm/e820.h>
43 #include <asm/apic.h>
44 #include <asm/tlb.h>
45 #include <asm/tlbflush.h>
46 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
48 #include <asm/swiotlb.h>
50 unsigned int __VMALLOC_RESERVE = 128 << 20;
52 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 unsigned long highstart_pfn, highend_pfn;
55 static int noinline do_test_wp_bit(void);
57 /*
58 * Creates a middle page table and puts a pointer to it in the
59 * given global directory entry. This only returns the gd entry
60 * in non-PAE compilation mode, since the middle layer is folded.
61 */
62 static pmd_t * __init one_md_table_init(pgd_t *pgd)
63 {
64 pud_t *pud;
65 pmd_t *pmd_table;
67 #ifdef CONFIG_X86_PAE
68 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
69 make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
70 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
71 pud = pud_offset(pgd, 0);
72 if (pmd_table != pmd_offset(pud, 0))
73 BUG();
74 #else
75 pud = pud_offset(pgd, 0);
76 pmd_table = pmd_offset(pud, 0);
77 #endif
79 return pmd_table;
80 }
82 /*
83 * Create a page table and place a pointer to it in a middle page
84 * directory entry.
85 */
86 static pte_t * __init one_page_table_init(pmd_t *pmd)
87 {
88 if (pmd_none(*pmd)) {
89 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
90 make_lowmem_page_readonly(page_table,
91 XENFEAT_writable_page_tables);
92 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
93 if (page_table != pte_offset_kernel(pmd, 0))
94 BUG();
96 return page_table;
97 }
99 return pte_offset_kernel(pmd, 0);
100 }
102 /*
103 * This function initializes a certain range of kernel virtual memory
104 * with new bootmem page tables, everywhere page tables are missing in
105 * the given range.
106 */
108 /*
109 * NOTE: The pagetables are allocated contiguous on the physical space
110 * so we can cache the place of the first one and move around without
111 * checking the pgd every time.
112 */
113 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
114 {
115 pgd_t *pgd;
116 pud_t *pud;
117 pmd_t *pmd;
118 int pgd_idx, pmd_idx;
119 unsigned long vaddr;
121 vaddr = start;
122 pgd_idx = pgd_index(vaddr);
123 pmd_idx = pmd_index(vaddr);
124 pgd = pgd_base + pgd_idx;
126 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
127 if (pgd_none(*pgd))
128 one_md_table_init(pgd);
129 pud = pud_offset(pgd, vaddr);
130 pmd = pmd_offset(pud, vaddr);
131 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
132 if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
133 one_page_table_init(pmd);
135 vaddr += PMD_SIZE;
136 }
137 pmd_idx = 0;
138 }
139 }
141 static inline int is_kernel_text(unsigned long addr)
142 {
143 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
144 return 1;
145 return 0;
146 }
148 /*
149 * This maps the physical memory to kernel virtual address space, a total
150 * of max_low_pfn pages, by creating page tables starting from address
151 * PAGE_OFFSET.
152 */
153 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
154 {
155 unsigned long pfn;
156 pgd_t *pgd;
157 pmd_t *pmd;
158 pte_t *pte;
159 int pgd_idx, pmd_idx, pte_ofs;
161 unsigned long max_ram_pfn = xen_start_info->nr_pages;
162 if (max_ram_pfn > max_low_pfn)
163 max_ram_pfn = max_low_pfn;
165 pgd_idx = pgd_index(PAGE_OFFSET);
166 pgd = pgd_base + pgd_idx;
167 pfn = 0;
168 pmd_idx = pmd_index(PAGE_OFFSET);
169 pte_ofs = pte_index(PAGE_OFFSET);
171 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
172 #ifdef CONFIG_XEN
173 /*
174 * Native linux hasn't PAE-paging enabled yet at this
175 * point. When running as xen domain we are in PAE
176 * mode already, thus we can't simply hook a empty
177 * pmd. That would kill the mappings we are currently
178 * using ...
179 */
180 pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
181 #else
182 pmd = one_md_table_init(pgd);
183 #endif
184 if (pfn >= max_low_pfn)
185 continue;
186 pmd += pmd_idx;
187 for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
188 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
189 if (address >= hypervisor_virt_start)
190 continue;
192 /* Map with big pages if possible, otherwise create normal page tables. */
193 if (cpu_has_pse) {
194 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
196 if (is_kernel_text(address) || is_kernel_text(address2))
197 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
198 else
199 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
200 pfn += PTRS_PER_PTE;
201 } else {
202 pte = one_page_table_init(pmd);
204 pte += pte_ofs;
205 for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
206 /* XEN: Only map initial RAM allocation. */
207 if ((pfn >= max_ram_pfn) || pte_present(*pte))
208 continue;
209 if (is_kernel_text(address))
210 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
211 else
212 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
213 }
214 pte_ofs = 0;
215 }
216 }
217 pmd_idx = 0;
218 }
219 }
221 #ifndef CONFIG_XEN
223 static inline int page_kills_ppro(unsigned long pagenr)
224 {
225 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
226 return 1;
227 return 0;
228 }
230 #else
232 #define page_kills_ppro(p) 0
234 #endif
236 extern int is_available_memory(efi_memory_desc_t *);
238 int page_is_ram(unsigned long pagenr)
239 {
240 int i;
241 unsigned long addr, end;
243 if (efi_enabled) {
244 efi_memory_desc_t *md;
245 void *p;
247 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
248 md = p;
249 if (!is_available_memory(md))
250 continue;
251 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
252 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
254 if ((pagenr >= addr) && (pagenr < end))
255 return 1;
256 }
257 return 0;
258 }
260 for (i = 0; i < e820.nr_map; i++) {
262 if (e820.map[i].type != E820_RAM) /* not usable memory */
263 continue;
264 /*
265 * !!!FIXME!!! Some BIOSen report areas as RAM that
266 * are not. Notably the 640->1Mb area. We need a sanity
267 * check here.
268 */
269 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
270 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
271 if ((pagenr >= addr) && (pagenr < end))
272 return 1;
273 }
274 return 0;
275 }
277 #ifdef CONFIG_HIGHMEM
278 pte_t *kmap_pte;
279 pgprot_t kmap_prot;
281 #define kmap_get_fixmap_pte(vaddr) \
282 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
284 static void __init kmap_init(void)
285 {
286 unsigned long kmap_vstart;
288 /* cache the first kmap pte */
289 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
290 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
292 kmap_prot = PAGE_KERNEL;
293 }
295 static void __init permanent_kmaps_init(pgd_t *pgd_base)
296 {
297 pgd_t *pgd;
298 pud_t *pud;
299 pmd_t *pmd;
300 pte_t *pte;
301 unsigned long vaddr;
303 vaddr = PKMAP_BASE;
304 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
306 pgd = swapper_pg_dir + pgd_index(vaddr);
307 pud = pud_offset(pgd, vaddr);
308 pmd = pmd_offset(pud, vaddr);
309 pte = pte_offset_kernel(pmd, vaddr);
310 pkmap_page_table = pte;
311 }
313 static void __meminit free_new_highpage(struct page *page, int pfn)
314 {
315 init_page_count(page);
316 if (pfn < xen_start_info->nr_pages)
317 __free_page(page);
318 totalhigh_pages++;
319 }
321 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
322 {
323 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
324 ClearPageReserved(page);
325 free_new_highpage(page, pfn);
326 } else
327 SetPageReserved(page);
328 }
330 static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
331 {
332 free_new_highpage(page, pfn);
333 totalram_pages++;
334 #ifdef CONFIG_FLATMEM
335 max_mapnr = max(pfn, max_mapnr);
336 #endif
337 num_physpages++;
338 return 0;
339 }
341 /*
342 * Not currently handling the NUMA case.
343 * Assuming single node and all memory that
344 * has been added dynamically that would be
345 * onlined here is in HIGHMEM
346 */
347 void online_page(struct page *page)
348 {
349 ClearPageReserved(page);
350 add_one_highpage_hotplug(page, page_to_pfn(page));
351 }
354 #ifdef CONFIG_NUMA
355 extern void set_highmem_pages_init(int);
356 #else
357 static void __init set_highmem_pages_init(int bad_ppro)
358 {
359 int pfn;
360 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
361 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
362 totalram_pages += totalhigh_pages;
363 }
364 #endif /* CONFIG_FLATMEM */
366 #else
367 #define kmap_init() do { } while (0)
368 #define permanent_kmaps_init(pgd_base) do { } while (0)
369 #define set_highmem_pages_init(bad_ppro) do { } while (0)
370 #endif /* CONFIG_HIGHMEM */
372 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
373 EXPORT_SYMBOL(__PAGE_KERNEL);
374 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
376 #ifdef CONFIG_NUMA
377 extern void __init remap_numa_kva(void);
378 #else
379 #define remap_numa_kva() do {} while (0)
380 #endif
382 pgd_t *swapper_pg_dir;
384 static void __init pagetable_init (void)
385 {
386 unsigned long vaddr;
387 pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
389 /* Enable PSE if available */
390 if (cpu_has_pse) {
391 set_in_cr4(X86_CR4_PSE);
392 }
394 /* Enable PGE if available */
395 if (cpu_has_pge) {
396 set_in_cr4(X86_CR4_PGE);
397 __PAGE_KERNEL |= _PAGE_GLOBAL;
398 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
399 }
401 kernel_physical_mapping_init(pgd_base);
402 remap_numa_kva();
404 /*
405 * Fixed mappings, only the page table structure has to be
406 * created - mappings will be set by set_fixmap():
407 */
408 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
409 page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
411 permanent_kmaps_init(pgd_base);
412 }
414 #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
415 /*
416 * Swap suspend & friends need this for resume because things like the intel-agp
417 * driver might have split up a kernel 4MB mapping.
418 */
419 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
420 __attribute__ ((aligned (PAGE_SIZE)));
422 static inline void save_pg_dir(void)
423 {
424 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
425 }
426 #else
427 static inline void save_pg_dir(void)
428 {
429 }
430 #endif
432 void zap_low_mappings (void)
433 {
434 int i;
436 save_pg_dir();
438 /*
439 * Zap initial low-memory mappings.
440 *
441 * Note that "pgd_clear()" doesn't do it for
442 * us, because pgd_clear() is a no-op on i386.
443 */
444 for (i = 0; i < USER_PTRS_PER_PGD; i++)
445 #if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
446 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
447 #else
448 set_pgd(swapper_pg_dir+i, __pgd(0));
449 #endif
450 flush_tlb_all();
451 }
453 static int disable_nx __initdata = 0;
454 u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
455 EXPORT_SYMBOL(__supported_pte_mask);
457 /*
458 * noexec = on|off
459 *
460 * Control non executable mappings.
461 *
462 * on Enable
463 * off Disable
464 */
465 void __init noexec_setup(const char *str)
466 {
467 if (!strncmp(str, "on",2) && cpu_has_nx) {
468 __supported_pte_mask |= _PAGE_NX;
469 disable_nx = 0;
470 } else if (!strncmp(str,"off",3)) {
471 disable_nx = 1;
472 __supported_pte_mask &= ~_PAGE_NX;
473 }
474 }
476 int nx_enabled = 0;
477 #ifdef CONFIG_X86_PAE
479 static void __init set_nx(void)
480 {
481 unsigned int v[4], l, h;
483 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
484 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
485 if ((v[3] & (1 << 20)) && !disable_nx) {
486 rdmsr(MSR_EFER, l, h);
487 l |= EFER_NX;
488 wrmsr(MSR_EFER, l, h);
489 nx_enabled = 1;
490 __supported_pte_mask |= _PAGE_NX;
491 }
492 }
493 }
495 /*
496 * Enables/disables executability of a given kernel page and
497 * returns the previous setting.
498 */
499 int __init set_kernel_exec(unsigned long vaddr, int enable)
500 {
501 pte_t *pte;
502 int ret = 1;
504 if (!nx_enabled)
505 goto out;
507 pte = lookup_address(vaddr);
508 BUG_ON(!pte);
510 if (!pte_exec_kernel(*pte))
511 ret = 0;
513 if (enable)
514 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
515 else
516 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
517 __flush_tlb_all();
518 out:
519 return ret;
520 }
522 #endif
524 /*
525 * paging_init() sets up the page tables - note that the first 8MB are
526 * already mapped by head.S.
527 *
528 * This routines also unmaps the page at virtual kernel address 0, so
529 * that we can trap those pesky NULL-reference errors in the kernel.
530 */
531 void __init paging_init(void)
532 {
533 int i;
535 #ifdef CONFIG_X86_PAE
536 set_nx();
537 if (nx_enabled)
538 printk("NX (Execute Disable) protection: active\n");
539 #endif
541 pagetable_init();
543 #if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
544 /*
545 * We will bail out later - printk doesn't work right now so
546 * the user would just see a hanging kernel.
547 * when running as xen domain we are already in PAE mode at
548 * this point.
549 */
550 if (cpu_has_pae)
551 set_in_cr4(X86_CR4_PAE);
552 #endif
553 __flush_tlb_all();
555 kmap_init();
557 /* Switch to the real shared_info page, and clear the
558 * dummy page. */
559 set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
560 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
561 memset(empty_zero_page, 0, sizeof(empty_zero_page));
563 /* Setup mapping of lower 1st MB */
564 for (i = 0; i < NR_FIX_ISAMAPS; i++)
565 if (is_initial_xendomain())
566 set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
567 else
568 __set_fixmap(FIX_ISAMAP_BEGIN - i,
569 virt_to_machine(empty_zero_page),
570 PAGE_KERNEL_RO);
571 }
573 /*
574 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
575 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
576 * used to involve black magic jumps to work around some nasty CPU bugs,
577 * but fortunately the switch to using exceptions got rid of all that.
578 */
580 static void __init test_wp_bit(void)
581 {
582 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
584 /* Any page-aligned address will do, the test is non-destructive */
585 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
586 boot_cpu_data.wp_works_ok = do_test_wp_bit();
587 clear_fixmap(FIX_WP_TEST);
589 if (!boot_cpu_data.wp_works_ok) {
590 printk("No.\n");
591 #ifdef CONFIG_X86_WP_WORKS_OK
592 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
593 #endif
594 } else {
595 printk("Ok.\n");
596 }
597 }
599 static void __init set_max_mapnr_init(void)
600 {
601 #ifdef CONFIG_HIGHMEM
602 num_physpages = highend_pfn;
603 #else
604 num_physpages = max_low_pfn;
605 #endif
606 #ifdef CONFIG_FLATMEM
607 max_mapnr = num_physpages;
608 #endif
609 }
611 static struct kcore_list kcore_mem, kcore_vmalloc;
613 void __init mem_init(void)
614 {
615 extern int ppro_with_ram_bug(void);
616 int codesize, reservedpages, datasize, initsize;
617 int tmp;
618 int bad_ppro;
619 unsigned long pfn;
621 #if defined(CONFIG_SWIOTLB)
622 swiotlb_init();
623 #endif
625 #ifdef CONFIG_FLATMEM
626 if (!mem_map)
627 BUG();
628 #endif
630 bad_ppro = ppro_with_ram_bug();
632 #ifdef CONFIG_HIGHMEM
633 /* check that fixmap and pkmap do not overlap */
634 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
635 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
636 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
637 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
638 BUG();
639 }
640 #endif
642 set_max_mapnr_init();
644 #ifdef CONFIG_HIGHMEM
645 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
646 #else
647 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
648 #endif
649 printk("vmalloc area: %lx-%lx, maxmem %lx\n",
650 VMALLOC_START,VMALLOC_END,MAXMEM);
651 BUG_ON(VMALLOC_START > VMALLOC_END);
653 /* this will put all low memory onto the freelists */
654 totalram_pages += free_all_bootmem();
655 /* XEN: init and count low-mem pages outside initial allocation. */
656 for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
657 ClearPageReserved(pfn_to_page(pfn));
658 init_page_count(pfn_to_page(pfn));
659 totalram_pages++;
660 }
662 reservedpages = 0;
663 for (tmp = 0; tmp < max_low_pfn; tmp++)
664 /*
665 * Only count reserved RAM pages
666 */
667 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
668 reservedpages++;
670 set_highmem_pages_init(bad_ppro);
672 codesize = (unsigned long) &_etext - (unsigned long) &_text;
673 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
674 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
676 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
677 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
678 VMALLOC_END-VMALLOC_START);
680 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
681 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
682 num_physpages << (PAGE_SHIFT-10),
683 codesize >> 10,
684 reservedpages << (PAGE_SHIFT-10),
685 datasize >> 10,
686 initsize >> 10,
687 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
688 );
690 #ifdef CONFIG_X86_PAE
691 if (!cpu_has_pae)
692 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
693 #endif
694 if (boot_cpu_data.wp_works_ok < 0)
695 test_wp_bit();
697 /*
698 * Subtle. SMP is doing it's boot stuff late (because it has to
699 * fork idle threads) - but it also needs low mappings for the
700 * protected-mode entry to work. We zap these entries only after
701 * the WP-bit has been tested.
702 */
703 #ifndef CONFIG_SMP
704 zap_low_mappings();
705 #endif
707 set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
708 }
710 /*
711 * this is for the non-NUMA, single node SMP system case.
712 * Specifically, in the case of x86, we will always add
713 * memory to the highmem for now.
714 */
715 #ifdef CONFIG_MEMORY_HOTPLUG
716 #ifndef CONFIG_NEED_MULTIPLE_NODES
717 int arch_add_memory(int nid, u64 start, u64 size)
718 {
719 struct pglist_data *pgdata = &contig_page_data;
720 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
721 unsigned long start_pfn = start >> PAGE_SHIFT;
722 unsigned long nr_pages = size >> PAGE_SHIFT;
724 return __add_pages(zone, start_pfn, nr_pages);
725 }
727 int remove_memory(u64 start, u64 size)
728 {
729 return -EINVAL;
730 }
731 #endif
732 #endif
734 kmem_cache_t *pgd_cache;
735 kmem_cache_t *pmd_cache;
737 void __init pgtable_cache_init(void)
738 {
739 if (PTRS_PER_PMD > 1) {
740 pmd_cache = kmem_cache_create("pmd",
741 PTRS_PER_PMD*sizeof(pmd_t),
742 PTRS_PER_PMD*sizeof(pmd_t),
743 0,
744 pmd_ctor,
745 NULL);
746 if (!pmd_cache)
747 panic("pgtable_cache_init(): cannot create pmd cache");
748 }
749 pgd_cache = kmem_cache_create("pgd",
750 #ifndef CONFIG_XEN
751 PTRS_PER_PGD*sizeof(pgd_t),
752 PTRS_PER_PGD*sizeof(pgd_t),
753 #else
754 PAGE_SIZE,
755 PAGE_SIZE,
756 #endif
757 0,
758 pgd_ctor,
759 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
760 if (!pgd_cache)
761 panic("pgtable_cache_init(): Cannot create pgd cache");
762 }
764 /*
765 * This function cannot be __init, since exceptions don't work in that
766 * section. Put this after the callers, so that it cannot be inlined.
767 */
768 static int noinline do_test_wp_bit(void)
769 {
770 char tmp_reg;
771 int flag;
773 __asm__ __volatile__(
774 " movb %0,%1 \n"
775 "1: movb %1,%0 \n"
776 " xorl %2,%2 \n"
777 "2: \n"
778 ".section __ex_table,\"a\"\n"
779 " .align 4 \n"
780 " .long 1b,2b \n"
781 ".previous \n"
782 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
783 "=q" (tmp_reg),
784 "=r" (flag)
785 :"2" (1)
786 :"memory");
788 return flag;
789 }
791 #ifdef CONFIG_DEBUG_RODATA
793 void mark_rodata_ro(void)
794 {
795 unsigned long addr = (unsigned long)__start_rodata;
797 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
798 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
800 printk("Write protecting the kernel read-only data: %uk\n",
801 (__end_rodata - __start_rodata) >> 10);
803 /*
804 * change_page_attr() requires a global_flush_tlb() call after it.
805 * We do this after the printk so that if something went wrong in the
806 * change, the printk gets out at least to give a better debug hint
807 * of who is the culprit.
808 */
809 global_flush_tlb();
810 }
811 #endif
813 void free_init_pages(char *what, unsigned long begin, unsigned long end)
814 {
815 unsigned long addr;
817 for (addr = begin; addr < end; addr += PAGE_SIZE) {
818 ClearPageReserved(virt_to_page(addr));
819 init_page_count(virt_to_page(addr));
820 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
821 free_page(addr);
822 totalram_pages++;
823 }
824 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
825 }
827 void free_initmem(void)
828 {
829 free_init_pages("unused kernel memory",
830 (unsigned long)(&__init_begin),
831 (unsigned long)(&__init_end));
832 }
834 #ifdef CONFIG_BLK_DEV_INITRD
835 void free_initrd_mem(unsigned long start, unsigned long end)
836 {
837 free_init_pages("initrd memory", start, end);
838 }
839 #endif