ia64/xen-unstable

view linux-2.4-xen-sparse/arch/xen/mm/init.c @ 5853:9b713b8d1100

Fix the path to qemu-dm
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 25 21:02:24 2005 +0000 (2005-07-25)
parents 56a63f9f378f
children 8799d14bef77 8799d14bef77 9312a3e8a6f8 dfaf788ab18c
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/apic.h>
37 #include <asm/tlb.h>
39 /* XEN: We *cannot* use mmx_clear_page() this early. Force dumb memset(). */
40 #undef clear_page
41 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
43 mmu_gather_t mmu_gathers[NR_CPUS];
44 unsigned long highstart_pfn, highend_pfn;
45 static unsigned long totalram_pages;
46 static unsigned long totalhigh_pages;
48 int do_check_pgt_cache(int low, int high)
49 {
50 int freed = 0;
51 if(pgtable_cache_size > high) {
52 do {
53 if (!QUICKLIST_EMPTY(pgd_quicklist)) {
54 free_pgd_slow(get_pgd_fast());
55 freed++;
56 }
57 if (!QUICKLIST_EMPTY(pte_quicklist)) {
58 pte_free_slow(pte_alloc_one_fast(NULL, 0));
59 freed++;
60 }
61 } while(pgtable_cache_size > low);
62 }
63 return freed;
64 }
66 /*
67 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
68 * physical space so we can cache the place of the first one and move
69 * around without checking the pgd every time.
70 */
72 #if CONFIG_HIGHMEM
73 pte_t *kmap_pte;
74 pgprot_t kmap_prot;
76 #define kmap_get_fixmap_pte(vaddr) \
77 pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
79 void __init kmap_init(void)
80 {
81 unsigned long kmap_vstart;
83 /* cache the first kmap pte */
84 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
85 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
87 kmap_prot = PAGE_KERNEL;
88 }
89 #endif /* CONFIG_HIGHMEM */
91 void show_mem(void)
92 {
93 int i, total = 0, reserved = 0;
94 int shared = 0, cached = 0;
95 int highmem = 0;
97 printk("Mem-info:\n");
98 show_free_areas();
99 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
100 i = max_mapnr;
101 while (i-- > 0) {
102 total++;
103 if (PageHighMem(mem_map+i))
104 highmem++;
105 if (PageReserved(mem_map+i))
106 reserved++;
107 else if (PageSwapCache(mem_map+i))
108 cached++;
109 else if (page_count(mem_map+i))
110 shared += page_count(mem_map+i) - 1;
111 }
112 printk("%d pages of RAM\n", total);
113 printk("%d pages of HIGHMEM\n",highmem);
114 printk("%d reserved pages\n",reserved);
115 printk("%d pages shared\n",shared);
116 printk("%d pages swap cached\n",cached);
117 printk("%ld pages in page table cache\n",pgtable_cache_size);
118 show_buffers();
119 }
121 /* References to section boundaries */
123 extern char _text, _etext, _edata, __bss_start, _end;
124 extern char __init_begin, __init_end;
126 static inline void set_pte_phys (unsigned long vaddr,
127 unsigned long phys, pgprot_t prot)
128 {
129 pgd_t *pgd;
130 pmd_t *pmd;
131 pte_t *pte;
133 pgd = init_mm.pgd + __pgd_offset(vaddr);
134 if (pgd_none(*pgd)) {
135 printk("PAE BUG #00!\n");
136 return;
137 }
138 pmd = pmd_offset(pgd, vaddr);
139 if (pmd_none(*pmd)) {
140 printk("PAE BUG #01!\n");
141 return;
142 }
143 pte = pte_offset(pmd, vaddr);
145 set_pte(pte, (pte_t) { phys | pgprot_val(prot) });
147 /*
148 * It's enough to flush this one mapping.
149 * (PGE mappings get flushed as well)
150 */
151 __flush_tlb_one(vaddr);
152 }
154 void __set_fixmap(enum fixed_addresses idx, unsigned long phys,
155 pgprot_t flags)
156 {
157 unsigned long address = __fix_to_virt(idx);
159 if (idx >= __end_of_fixed_addresses) {
160 printk("Invalid __set_fixmap\n");
161 return;
162 }
163 set_pte_phys(address, phys, flags);
164 }
166 void clear_fixmap(enum fixed_addresses idx)
167 {
168 set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0));
169 }
171 static void __init fixrange_init (unsigned long start,
172 unsigned long end, pgd_t *pgd_base)
173 {
174 pgd_t *pgd, *kpgd;
175 pmd_t *pmd, *kpmd;
176 pte_t *pte, *kpte;
177 int i, j;
178 unsigned long vaddr;
180 vaddr = start;
181 i = __pgd_offset(vaddr);
182 j = __pmd_offset(vaddr);
183 pgd = pgd_base + i;
185 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
186 #if CONFIG_X86_PAE
187 if (pgd_none(*pgd)) {
188 pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
189 set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
190 if (pmd != pmd_offset(pgd, 0))
191 printk("PAE BUG #02!\n");
192 }
193 pmd = pmd_offset(pgd, vaddr);
194 #else
195 pmd = (pmd_t *)pgd;
196 #endif
197 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
198 if (pmd_none(*pmd)) {
199 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
200 clear_page(pte);
201 kpgd = pgd_offset_k((unsigned long)pte);
202 kpmd = pmd_offset(kpgd, (unsigned long)pte);
203 kpte = pte_offset(kpmd, (unsigned long)pte);
204 set_pte(kpte, pte_wrprotect(*kpte));
205 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
206 }
207 vaddr += PMD_SIZE;
208 }
209 j = 0;
210 }
211 }
214 static void __init pagetable_init (void)
215 {
216 unsigned long vaddr, end, ram_end;
217 pgd_t *kpgd, *pgd, *pgd_base;
218 int i, j, k;
219 pmd_t *kpmd, *pmd;
220 pte_t *kpte, *pte, *pte_base;
222 ram_end = end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
223 if ( xen_start_info.nr_pages < max_low_pfn )
224 ram_end = (unsigned long)__va(xen_start_info.nr_pages * PAGE_SIZE);
226 pgd_base = init_mm.pgd;
227 i = __pgd_offset(PAGE_OFFSET);
228 pgd = pgd_base + i;
230 for (; i < PTRS_PER_PGD; pgd++, i++) {
231 vaddr = i*PGDIR_SIZE;
232 if (vaddr >= end)
233 break;
234 pmd = (pmd_t *)pgd;
235 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
236 vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
237 if (vaddr >= end)
238 break;
240 /* Filled in for us already? */
241 if ( pmd_val(*pmd) & _PAGE_PRESENT )
242 continue;
244 pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
245 clear_page(pte_base);
247 for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
248 vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
249 if (vaddr >= ram_end)
250 break;
251 *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
252 }
253 kpgd = pgd_offset_k((unsigned long)pte_base);
254 kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
255 kpte = pte_offset(kpmd, (unsigned long)pte_base);
256 set_pte(kpte, pte_wrprotect(*kpte));
257 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
258 }
259 }
261 /*
262 * Fixed mappings, only the page table structure has to be
263 * created - mappings will be set by set_fixmap():
264 */
265 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
266 fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
268 #if CONFIG_HIGHMEM
269 /*
270 * Permanent kmaps:
271 */
272 vaddr = PKMAP_BASE;
273 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, init_mm.pgd);
275 pgd = init_mm.pgd + __pgd_offset(vaddr);
276 pmd = pmd_offset(pgd, vaddr);
277 pte = pte_offset(pmd, vaddr);
278 pkmap_page_table = pte;
279 #endif
280 }
282 static void __init zone_sizes_init(void)
283 {
284 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
285 unsigned int max_dma, high, low;
287 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
288 low = max_low_pfn;
289 high = highend_pfn;
291 if (low < max_dma)
292 zones_size[ZONE_DMA] = low;
293 else {
294 zones_size[ZONE_DMA] = max_dma;
295 zones_size[ZONE_NORMAL] = low - max_dma;
296 #ifdef CONFIG_HIGHMEM
297 zones_size[ZONE_HIGHMEM] = high - low;
298 #endif
299 }
300 free_area_init(zones_size);
301 }
303 void __init paging_init(void)
304 {
305 pagetable_init();
307 zone_sizes_init();
309 /* Switch to the real shared_info page, and clear the dummy page. */
310 set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
311 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
312 memset(empty_zero_page, 0, sizeof(empty_zero_page));
314 #ifdef CONFIG_HIGHMEM
315 kmap_init();
316 #endif
317 }
319 static inline int page_is_ram (unsigned long pagenr)
320 {
321 return 1;
322 }
324 #ifdef CONFIG_HIGHMEM
325 void __init one_highpage_init(struct page *page, int free_page)
326 {
327 ClearPageReserved(page);
328 set_bit(PG_highmem, &page->flags);
329 atomic_set(&page->count, 1);
330 if ( free_page )
331 __free_page(page);
332 totalhigh_pages++;
333 }
334 #endif /* CONFIG_HIGHMEM */
336 static void __init set_max_mapnr_init(void)
337 {
338 #ifdef CONFIG_HIGHMEM
339 highmem_start_page = mem_map + highstart_pfn;
340 max_mapnr = num_physpages = highend_pfn;
341 num_mappedpages = max_low_pfn;
342 #else
343 max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
344 #endif
345 }
347 static int __init free_pages_init(void)
348 {
349 #ifdef CONFIG_HIGHMEM
350 int bad_ppro = 0;
351 #endif
352 int reservedpages, pfn;
354 /* add only boot_pfn pages of low memory to free list.
355 * max_low_pfn may be sized for
356 * pages yet to be allocated from the hypervisor, or it may be set
357 * to override the xen_start_info amount of memory
358 */
359 int boot_pfn = min(xen_start_info.nr_pages,max_low_pfn);
361 /* this will put all low memory onto the freelists */
362 totalram_pages += free_all_bootmem();
363 /* XEN: init and count low-mem pages outside initial allocation. */
364 for (pfn = boot_pfn; pfn < max_low_pfn; pfn++) {
365 ClearPageReserved(&mem_map[pfn]);
366 atomic_set(&mem_map[pfn].count, 1);
367 totalram_pages++;
368 }
370 reservedpages = 0;
371 for (pfn = 0; pfn < boot_pfn ; pfn++) {
372 /*
373 * Only count reserved RAM pages
374 */
375 if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
376 reservedpages++;
377 }
378 #ifdef CONFIG_HIGHMEM
379 for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
380 one_highpage_init((struct page *) (mem_map + pfn),
381 (pfn < xen_start_info.nr_pages));
382 totalram_pages += totalhigh_pages;
383 #endif
384 return reservedpages;
385 }
387 void __init mem_init(void)
388 {
389 int codesize, reservedpages, datasize, initsize;
391 if (!mem_map)
392 BUG();
394 #ifdef CONFIG_HIGHMEM
395 /* check that fixmap and pkmap do not overlap */
396 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
397 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
398 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
399 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
400 BUG();
401 }
402 #endif
404 set_max_mapnr_init();
406 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
408 /* clear the zero-page */
409 memset(empty_zero_page, 0, PAGE_SIZE);
411 reservedpages = free_pages_init();
413 codesize = (unsigned long) &_etext - (unsigned long) &_text;
414 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
415 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
417 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
418 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
419 max_mapnr << (PAGE_SHIFT-10),
420 codesize >> 10,
421 reservedpages << (PAGE_SHIFT-10),
422 datasize >> 10,
423 initsize >> 10,
424 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
425 );
427 boot_cpu_data.wp_works_ok = 1;
428 }
430 void free_initmem(void)
431 {
432 unsigned long addr;
434 addr = (unsigned long)(&__init_begin);
435 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
436 ClearPageReserved(virt_to_page(addr));
437 set_page_count(virt_to_page(addr), 1);
438 free_page(addr);
439 totalram_pages++;
440 }
441 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
442 }
444 #ifdef CONFIG_BLK_DEV_INITRD
445 void free_initrd_mem(unsigned long start, unsigned long end)
446 {
447 if (start < end)
448 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
449 for (; start < end; start += PAGE_SIZE) {
450 ClearPageReserved(virt_to_page(start));
451 set_page_count(virt_to_page(start), 1);
452 free_page(start);
453 totalram_pages++;
454 }
455 }
456 #endif
458 void si_meminfo(struct sysinfo *val)
459 {
460 val->totalram = max_pfn;
461 val->sharedram = 0;
462 val->freeram = nr_free_pages();
463 val->bufferram = atomic_read(&buffermem_pages);
464 val->totalhigh = max_pfn-max_low_pfn;
465 val->freehigh = nr_free_highpages();
466 val->mem_unit = PAGE_SIZE;
467 return;
468 }
470 #if defined(CONFIG_X86_PAE)
471 struct kmem_cache_s *pae_pgd_cachep;
472 void __init pgtable_cache_init(void)
473 {
474 /*
475 * PAE pgds must be 16-byte aligned:
476 */
477 pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
478 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
479 if (!pae_pgd_cachep)
480 panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
481 }
482 #endif /* CONFIG_X86_PAE */