ia64/xen-unstable

view linux-2.4.29-xen-sparse/arch/xen/mm/init.c @ 3516:1a4f61d36171

bitkeeper revision 1.1159.223.31 (41f599bcklevTYwPtWQUZ7QK-azDbg)

Fix recent patch to change the way the version string is generated.
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@freefall.cl.cam.ac.uk
date Tue Jan 25 00:58:36 2005 +0000 (2005-01-25)
parents ed0d4ce83995
children d126cac32f08 0dc6a70c0a02
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/apic.h>
37 #include <asm/tlb.h>
39 /* XEN: We *cannot* use mmx_clear_page() this early. Force dumb memset(). */
40 #undef clear_page
41 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
43 mmu_gather_t mmu_gathers[NR_CPUS];
44 unsigned long highstart_pfn, highend_pfn;
45 static unsigned long totalram_pages;
46 static unsigned long totalhigh_pages;
48 int do_check_pgt_cache(int low, int high)
49 {
50 int freed = 0;
51 if(pgtable_cache_size > high) {
52 do {
53 if (!QUICKLIST_EMPTY(pgd_quicklist)) {
54 free_pgd_slow(get_pgd_fast());
55 freed++;
56 }
57 if (!QUICKLIST_EMPTY(pte_quicklist)) {
58 pte_free_slow(pte_alloc_one_fast(NULL, 0));
59 freed++;
60 }
61 } while(pgtable_cache_size > low);
62 }
63 return freed;
64 }
66 /*
67 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
68 * physical space so we can cache the place of the first one and move
69 * around without checking the pgd every time.
70 */
72 #if CONFIG_HIGHMEM
73 pte_t *kmap_pte;
74 pgprot_t kmap_prot;
76 #define kmap_get_fixmap_pte(vaddr) \
77 pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
79 void __init kmap_init(void)
80 {
81 unsigned long kmap_vstart;
83 /* cache the first kmap pte */
84 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
85 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
87 kmap_prot = PAGE_KERNEL;
88 }
89 #endif /* CONFIG_HIGHMEM */
91 void show_mem(void)
92 {
93 int i, total = 0, reserved = 0;
94 int shared = 0, cached = 0;
95 int highmem = 0;
97 printk("Mem-info:\n");
98 show_free_areas();
99 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
100 i = max_mapnr;
101 while (i-- > 0) {
102 total++;
103 if (PageHighMem(mem_map+i))
104 highmem++;
105 if (PageReserved(mem_map+i))
106 reserved++;
107 else if (PageSwapCache(mem_map+i))
108 cached++;
109 else if (page_count(mem_map+i))
110 shared += page_count(mem_map+i) - 1;
111 }
112 printk("%d pages of RAM\n", total);
113 printk("%d pages of HIGHMEM\n",highmem);
114 printk("%d reserved pages\n",reserved);
115 printk("%d pages shared\n",shared);
116 printk("%d pages swap cached\n",cached);
117 printk("%ld pages in page table cache\n",pgtable_cache_size);
118 show_buffers();
119 }
121 /* References to section boundaries */
123 extern char _text, _etext, _edata, __bss_start, _end;
124 extern char __init_begin, __init_end;
126 static inline void set_pte_phys (unsigned long vaddr,
127 unsigned long phys, pgprot_t prot)
128 {
129 pgd_t *pgd;
130 pmd_t *pmd;
131 pte_t *pte;
133 pgd = init_mm.pgd + __pgd_offset(vaddr);
134 if (pgd_none(*pgd)) {
135 printk("PAE BUG #00!\n");
136 return;
137 }
138 pmd = pmd_offset(pgd, vaddr);
139 if (pmd_none(*pmd)) {
140 printk("PAE BUG #01!\n");
141 return;
142 }
143 pte = pte_offset(pmd, vaddr);
145 queue_l1_entry_update(pte, phys | pgprot_val(prot));
147 /*
148 * It's enough to flush this one mapping.
149 * (PGE mappings get flushed as well)
150 */
151 __flush_tlb_one(vaddr);
152 }
154 void __set_fixmap(enum fixed_addresses idx, unsigned long phys,
155 pgprot_t flags)
156 {
157 unsigned long address = __fix_to_virt(idx);
159 if (idx >= __end_of_fixed_addresses) {
160 printk("Invalid __set_fixmap\n");
161 return;
162 }
163 set_pte_phys(address, phys, flags);
164 }
166 void clear_fixmap(enum fixed_addresses idx)
167 {
168 set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0));
169 }
171 static void __init fixrange_init (unsigned long start,
172 unsigned long end, pgd_t *pgd_base)
173 {
174 pgd_t *pgd, *kpgd;
175 pmd_t *pmd, *kpmd;
176 pte_t *pte, *kpte;
177 int i, j;
178 unsigned long vaddr;
180 vaddr = start;
181 i = __pgd_offset(vaddr);
182 j = __pmd_offset(vaddr);
183 pgd = pgd_base + i;
185 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
186 #if CONFIG_X86_PAE
187 if (pgd_none(*pgd)) {
188 pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
189 set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
190 if (pmd != pmd_offset(pgd, 0))
191 printk("PAE BUG #02!\n");
192 }
193 pmd = pmd_offset(pgd, vaddr);
194 #else
195 pmd = (pmd_t *)pgd;
196 #endif
197 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
198 if (pmd_none(*pmd)) {
199 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
200 clear_page(pte);
201 kpgd = pgd_offset_k((unsigned long)pte);
202 kpmd = pmd_offset(kpgd, (unsigned long)pte);
203 kpte = pte_offset(kpmd, (unsigned long)pte);
204 queue_l1_entry_update(kpte,
205 (*(unsigned long *)kpte)&~_PAGE_RW);
207 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
208 }
209 vaddr += PMD_SIZE;
210 }
211 j = 0;
212 }
214 XEN_flush_page_update_queue();
215 }
218 static void __init pagetable_init (void)
219 {
220 unsigned long vaddr, end, ram_end;
221 pgd_t *kpgd, *pgd, *pgd_base;
222 int i, j, k;
223 pmd_t *kpmd, *pmd;
224 pte_t *kpte, *pte, *pte_base;
226 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
227 ram_end = (unsigned long)__va(xen_start_info.nr_pages * PAGE_SIZE);
228 if ( ram_end > end )
229 ram_end = end;
231 pgd_base = init_mm.pgd;
232 i = __pgd_offset(PAGE_OFFSET);
233 pgd = pgd_base + i;
235 for (; i < PTRS_PER_PGD; pgd++, i++) {
236 vaddr = i*PGDIR_SIZE;
237 if (vaddr >= end)
238 break;
239 pmd = (pmd_t *)pgd;
240 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
241 vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
242 if (vaddr >= end)
243 break;
245 /* Filled in for us already? */
246 if ( pmd_val(*pmd) & _PAGE_PRESENT )
247 continue;
249 pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
250 clear_page(pte_base);
252 for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
253 vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
254 if (vaddr >= ram_end)
255 break;
256 *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
257 }
258 kpgd = pgd_offset_k((unsigned long)pte_base);
259 kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
260 kpte = pte_offset(kpmd, (unsigned long)pte_base);
261 queue_l1_entry_update(kpte,
262 (*(unsigned long *)kpte)&~_PAGE_RW);
263 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
264 XEN_flush_page_update_queue();
265 }
266 }
268 /*
269 * Fixed mappings, only the page table structure has to be
270 * created - mappings will be set by set_fixmap():
271 */
272 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
273 fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
275 #if CONFIG_HIGHMEM
276 /*
277 * Permanent kmaps:
278 */
279 vaddr = PKMAP_BASE;
280 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, init_mm.pgd);
282 pgd = init_mm.pgd + __pgd_offset(vaddr);
283 pmd = pmd_offset(pgd, vaddr);
284 pte = pte_offset(pmd, vaddr);
285 pkmap_page_table = pte;
286 #endif
287 }
289 static void __init zone_sizes_init(void)
290 {
291 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
292 unsigned int max_dma, high, low;
294 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
295 low = max_low_pfn;
296 high = highend_pfn;
298 if (low < max_dma)
299 zones_size[ZONE_DMA] = low;
300 else {
301 zones_size[ZONE_DMA] = max_dma;
302 zones_size[ZONE_NORMAL] = low - max_dma;
303 #ifdef CONFIG_HIGHMEM
304 zones_size[ZONE_HIGHMEM] = high - low;
305 #endif
306 }
307 free_area_init(zones_size);
308 }
310 void __init paging_init(void)
311 {
312 pagetable_init();
314 zone_sizes_init();
315 /* Switch to the real shared_info page, and clear the dummy page. */
316 set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
317 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
318 memset(empty_zero_page, 0, sizeof(empty_zero_page));
320 #ifdef CONFIG_HIGHMEM
321 kmap_init();
322 #endif
323 }
325 static inline int page_is_ram (unsigned long pagenr)
326 {
327 return 1;
328 }
330 #ifdef CONFIG_HIGHMEM
331 void __init one_highpage_init(struct page *page, int free_page)
332 {
333 ClearPageReserved(page);
334 set_bit(PG_highmem, &page->flags);
335 atomic_set(&page->count, 1);
336 if ( free_page )
337 __free_page(page);
338 totalhigh_pages++;
339 }
340 #endif /* CONFIG_HIGHMEM */
342 static void __init set_max_mapnr_init(void)
343 {
344 #ifdef CONFIG_HIGHMEM
345 highmem_start_page = mem_map + highstart_pfn;
346 max_mapnr = num_physpages = highend_pfn;
347 num_mappedpages = max_low_pfn;
348 #else
349 max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
350 #endif
351 }
353 static int __init free_pages_init(void)
354 {
355 #ifdef CONFIG_HIGHMEM
356 int bad_ppro = 0;
357 #endif
358 int reservedpages, pfn;
360 /* add only boot_pfn pages of low memory to free list.
361 * max_low_pfn may be sized for
362 * pages yet to be allocated from the hypervisor, or it may be set
363 * to override the xen_start_info amount of memory
364 */
365 int boot_pfn = min(xen_start_info.nr_pages,max_low_pfn);
367 /* this will put all low memory onto the freelists */
368 totalram_pages += free_all_bootmem();
370 reservedpages = 0;
371 for (pfn = 0; pfn < boot_pfn ; pfn++) {
372 /*
373 * Only count reserved RAM pages
374 */
375 if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
376 reservedpages++;
377 }
378 #ifdef CONFIG_HIGHMEM
379 for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
380 one_highpage_init((struct page *) (mem_map + pfn),
381 (pfn < xen_start_info.nr_pages));
382 totalram_pages += totalhigh_pages;
383 #endif
384 return reservedpages;
385 }
387 void __init mem_init(void)
388 {
389 int codesize, reservedpages, datasize, initsize;
391 if (!mem_map)
392 BUG();
394 #ifdef CONFIG_HIGHMEM
395 /* check that fixmap and pkmap do not overlap */
396 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
397 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
398 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
399 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
400 BUG();
401 }
402 #endif
404 set_max_mapnr_init();
406 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
408 /* clear the zero-page */
409 memset(empty_zero_page, 0, PAGE_SIZE);
411 reservedpages = free_pages_init();
413 codesize = (unsigned long) &_etext - (unsigned long) &_text;
414 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
415 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
417 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
418 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
419 max_mapnr << (PAGE_SHIFT-10),
420 codesize >> 10,
421 reservedpages << (PAGE_SHIFT-10),
422 datasize >> 10,
423 initsize >> 10,
424 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
425 );
427 boot_cpu_data.wp_works_ok = 1;
428 }
430 void free_initmem(void)
431 {
432 unsigned long addr;
434 addr = (unsigned long)(&__init_begin);
435 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
436 ClearPageReserved(virt_to_page(addr));
437 set_page_count(virt_to_page(addr), 1);
438 free_page(addr);
439 totalram_pages++;
440 }
441 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
442 }
444 #ifdef CONFIG_BLK_DEV_INITRD
445 void free_initrd_mem(unsigned long start, unsigned long end)
446 {
447 if (start < end)
448 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
449 for (; start < end; start += PAGE_SIZE) {
450 ClearPageReserved(virt_to_page(start));
451 set_page_count(virt_to_page(start), 1);
452 free_page(start);
453 totalram_pages++;
454 }
455 }
456 #endif
458 void si_meminfo(struct sysinfo *val)
459 {
460 val->totalram = max_pfn;
461 val->sharedram = 0;
462 val->freeram = nr_free_pages();
463 val->bufferram = atomic_read(&buffermem_pages);
464 val->totalhigh = max_pfn-max_low_pfn;
465 val->freehigh = nr_free_highpages();
466 val->mem_unit = PAGE_SIZE;
467 return;
468 }
470 #if defined(CONFIG_X86_PAE)
471 struct kmem_cache_s *pae_pgd_cachep;
472 void __init pgtable_cache_init(void)
473 {
474 /*
475 * PAE pgds must be 16-byte aligned:
476 */
477 pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
478 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
479 if (!pae_pgd_cachep)
480 panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
481 }
482 #endif /* CONFIG_X86_PAE */