ia64/xen-unstable

view linux-2.4.26-xen-sparse/arch/xen/mm/init.c @ 1774:131c48baa117

bitkeeper revision 1.1071.1.5 (40f41ae00utn5d2f3tlNLcvG_QhiBA)

Fairly major fixes to the network frontend driver.
Much saner now.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jul 13 17:24:48 2004 +0000 (2004-07-13)
parents f3123052268f
children b9edbe5d4952 6c0df1212e95
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/apic.h>
37 #include <asm/tlb.h>
39 mmu_gather_t mmu_gathers[NR_CPUS];
40 unsigned long highstart_pfn, highend_pfn;
41 static unsigned long totalram_pages;
42 static unsigned long totalhigh_pages;
44 int do_check_pgt_cache(int low, int high)
45 {
46 int freed = 0;
47 if(pgtable_cache_size > high) {
48 do {
49 if (!QUICKLIST_EMPTY(pgd_quicklist)) {
50 free_pgd_slow(get_pgd_fast());
51 freed++;
52 }
53 if (!QUICKLIST_EMPTY(pte_quicklist)) {
54 pte_free_slow(pte_alloc_one_fast(NULL, 0));
55 freed++;
56 }
57 } while(pgtable_cache_size > low);
58 }
59 return freed;
60 }
62 /*
63 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
64 * physical space so we can cache the place of the first one and move
65 * around without checking the pgd every time.
66 */
68 #if CONFIG_HIGHMEM
69 pte_t *kmap_pte;
70 pgprot_t kmap_prot;
72 #define kmap_get_fixmap_pte(vaddr) \
73 pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
75 void __init kmap_init(void)
76 {
77 unsigned long kmap_vstart;
79 /* cache the first kmap pte */
80 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
81 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
83 kmap_prot = PAGE_KERNEL;
84 }
85 #endif /* CONFIG_HIGHMEM */
87 void show_mem(void)
88 {
89 int i, total = 0, reserved = 0;
90 int shared = 0, cached = 0;
91 int highmem = 0;
93 printk("Mem-info:\n");
94 show_free_areas();
95 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
96 i = max_mapnr;
97 while (i-- > 0) {
98 total++;
99 if (PageHighMem(mem_map+i))
100 highmem++;
101 if (PageReserved(mem_map+i))
102 reserved++;
103 else if (PageSwapCache(mem_map+i))
104 cached++;
105 else if (page_count(mem_map+i))
106 shared += page_count(mem_map+i) - 1;
107 }
108 printk("%d pages of RAM\n", total);
109 printk("%d pages of HIGHMEM\n",highmem);
110 printk("%d reserved pages\n",reserved);
111 printk("%d pages shared\n",shared);
112 printk("%d pages swap cached\n",cached);
113 printk("%ld pages in page table cache\n",pgtable_cache_size);
114 show_buffers();
115 }
117 /* References to section boundaries */
119 extern char _text, _etext, _edata, __bss_start, _end;
120 extern char __init_begin, __init_end;
122 static inline void set_pte_phys (unsigned long vaddr,
123 unsigned long phys, pgprot_t prot)
124 {
125 pgd_t *pgd;
126 pmd_t *pmd;
127 pte_t *pte;
129 pgd = init_mm.pgd + __pgd_offset(vaddr);
130 if (pgd_none(*pgd)) {
131 printk("PAE BUG #00!\n");
132 return;
133 }
134 pmd = pmd_offset(pgd, vaddr);
135 if (pmd_none(*pmd)) {
136 printk("PAE BUG #01!\n");
137 return;
138 }
139 pte = pte_offset(pmd, vaddr);
141 queue_l1_entry_update(pte, phys | pgprot_val(prot));
143 /*
144 * It's enough to flush this one mapping.
145 * (PGE mappings get flushed as well)
146 */
147 __flush_tlb_one(vaddr);
148 }
150 void __set_fixmap(enum fixed_addresses idx, unsigned long phys,
151 pgprot_t flags)
152 {
153 unsigned long address = __fix_to_virt(idx);
155 if (idx >= __end_of_fixed_addresses) {
156 printk("Invalid __set_fixmap\n");
157 return;
158 }
159 set_pte_phys(address, phys, flags);
160 }
162 void clear_fixmap(enum fixed_addresses idx)
163 {
164 set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0));
165 }
167 static void __init fixrange_init (unsigned long start,
168 unsigned long end, pgd_t *pgd_base)
169 {
170 pgd_t *pgd, *kpgd;
171 pmd_t *pmd, *kpmd;
172 pte_t *pte, *kpte;
173 int i, j;
174 unsigned long vaddr;
176 vaddr = start;
177 i = __pgd_offset(vaddr);
178 j = __pmd_offset(vaddr);
179 pgd = pgd_base + i;
181 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
182 #if CONFIG_X86_PAE
183 if (pgd_none(*pgd)) {
184 pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
185 set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
186 if (pmd != pmd_offset(pgd, 0))
187 printk("PAE BUG #02!\n");
188 }
189 pmd = pmd_offset(pgd, vaddr);
190 #else
191 pmd = (pmd_t *)pgd;
192 #endif
193 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
194 if (pmd_none(*pmd)) {
195 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
196 clear_page(pte);
197 kpgd = pgd_offset_k((unsigned long)pte);
198 kpmd = pmd_offset(kpgd, (unsigned long)pte);
199 kpte = pte_offset(kpmd, (unsigned long)pte);
200 queue_l1_entry_update(kpte,
201 (*(unsigned long *)kpte)&~_PAGE_RW);
203 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
204 }
205 vaddr += PMD_SIZE;
206 }
207 j = 0;
208 }
210 XEN_flush_page_update_queue();
211 }
214 static void __init pagetable_init (void)
215 {
216 unsigned long vaddr, end;
217 pgd_t *kpgd, *pgd, *pgd_base;
218 int i, j, k;
219 pmd_t *kpmd, *pmd;
220 pte_t *kpte, *pte, *pte_base;
222 /*
223 * This can be zero as well - no problem, in that case we exit
224 * the loops anyway due to the PTRS_PER_* conditions.
225 */
226 end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
228 pgd_base = init_mm.pgd;
229 i = __pgd_offset(PAGE_OFFSET);
230 pgd = pgd_base + i;
232 for (; i < PTRS_PER_PGD; pgd++, i++) {
233 vaddr = i*PGDIR_SIZE;
234 if (end && (vaddr >= end))
235 break;
236 pmd = (pmd_t *)pgd;
237 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
238 vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
239 if (end && (vaddr >= end))
240 break;
242 /* Filled in for us already? */
243 if ( pmd_val(*pmd) & _PAGE_PRESENT )
244 continue;
246 pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
248 for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
249 vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
250 if (end && (vaddr >= end))
251 break;
252 *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
253 }
254 kpgd = pgd_offset_k((unsigned long)pte_base);
255 kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
256 kpte = pte_offset(kpmd, (unsigned long)pte_base);
257 queue_l1_entry_update(kpte,
258 (*(unsigned long *)kpte)&~_PAGE_RW);
259 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
260 XEN_flush_page_update_queue();
261 }
262 }
264 /*
265 * Fixed mappings, only the page table structure has to be
266 * created - mappings will be set by set_fixmap():
267 */
268 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
269 fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
271 #if CONFIG_HIGHMEM
272 /*
273 * Permanent kmaps:
274 */
275 vaddr = PKMAP_BASE;
276 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, init_mm.pgd);
278 pgd = init_mm.pgd + __pgd_offset(vaddr);
279 pmd = pmd_offset(pgd, vaddr);
280 pte = pte_offset(pmd, vaddr);
281 pkmap_page_table = pte;
282 #endif
283 }
285 static void __init zone_sizes_init(void)
286 {
287 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
288 unsigned int max_dma, high, low;
290 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
291 low = max_low_pfn;
292 high = highend_pfn;
294 if (low < max_dma)
295 zones_size[ZONE_DMA] = low;
296 else {
297 zones_size[ZONE_DMA] = max_dma;
298 zones_size[ZONE_NORMAL] = low - max_dma;
299 #ifdef CONFIG_HIGHMEM
300 zones_size[ZONE_HIGHMEM] = high - low;
301 #endif
302 }
303 free_area_init(zones_size);
304 }
306 void __init paging_init(void)
307 {
308 pagetable_init();
310 zone_sizes_init();
312 /* Switch to the real shared_info page, and clear the dummy page. */
313 set_fixmap(FIX_SHARED_INFO, start_info.shared_info);
314 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
315 memset(empty_zero_page, 0, sizeof(empty_zero_page));
317 #ifdef CONFIG_HIGHMEM
318 kmap_init();
319 #endif
320 }
322 static inline int page_is_ram (unsigned long pagenr)
323 {
324 return 1;
325 }
327 static inline int page_kills_ppro(unsigned long pagenr)
328 {
329 return 0;
330 }
332 #ifdef CONFIG_HIGHMEM
333 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
334 {
335 if (!page_is_ram(pfn)) {
336 SetPageReserved(page);
337 return;
338 }
340 if (bad_ppro && page_kills_ppro(pfn)) {
341 SetPageReserved(page);
342 return;
343 }
345 ClearPageReserved(page);
346 set_bit(PG_highmem, &page->flags);
347 atomic_set(&page->count, 1);
348 __free_page(page);
349 totalhigh_pages++;
350 }
351 #endif /* CONFIG_HIGHMEM */
353 static void __init set_max_mapnr_init(void)
354 {
355 #ifdef CONFIG_HIGHMEM
356 highmem_start_page = mem_map + highstart_pfn;
357 max_mapnr = num_physpages = highend_pfn;
358 num_mappedpages = max_low_pfn;
359 #else
360 max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
361 #endif
362 }
364 static int __init free_pages_init(void)
365 {
366 #ifdef CONFIG_HIGHMEM
367 int bad_ppro = 0;
368 #endif
369 int reservedpages, pfn;
371 /* this will put all low memory onto the freelists */
372 totalram_pages += free_all_bootmem();
374 reservedpages = 0;
375 for (pfn = 0; pfn < max_low_pfn; pfn++) {
376 /*
377 * Only count reserved RAM pages
378 */
379 if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
380 reservedpages++;
381 }
382 #ifdef CONFIG_HIGHMEM
383 for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
384 one_highpage_init((struct page *) (mem_map + pfn), pfn, bad_ppro);
385 totalram_pages += totalhigh_pages;
386 #endif
387 return reservedpages;
388 }
390 void __init mem_init(void)
391 {
392 int codesize, reservedpages, datasize, initsize;
394 if (!mem_map)
395 BUG();
397 #ifdef CONFIG_HIGHMEM
398 /* check that fixmap and pkmap do not overlap */
399 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
400 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
401 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
402 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
403 BUG();
404 }
405 #endif
407 set_max_mapnr_init();
409 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
411 /* clear the zero-page */
412 memset(empty_zero_page, 0, PAGE_SIZE);
414 reservedpages = free_pages_init();
416 codesize = (unsigned long) &_etext - (unsigned long) &_text;
417 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
418 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
420 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
421 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
422 max_mapnr << (PAGE_SHIFT-10),
423 codesize >> 10,
424 reservedpages << (PAGE_SHIFT-10),
425 datasize >> 10,
426 initsize >> 10,
427 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
428 );
430 boot_cpu_data.wp_works_ok = 1;
431 }
433 void free_initmem(void)
434 {
435 unsigned long addr;
437 addr = (unsigned long)(&__init_begin);
438 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
439 ClearPageReserved(virt_to_page(addr));
440 set_page_count(virt_to_page(addr), 1);
441 free_page(addr);
442 totalram_pages++;
443 }
444 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
445 }
447 #ifdef CONFIG_BLK_DEV_INITRD
448 void free_initrd_mem(unsigned long start, unsigned long end)
449 {
450 if (start < end)
451 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
452 for (; start < end; start += PAGE_SIZE) {
453 ClearPageReserved(virt_to_page(start));
454 set_page_count(virt_to_page(start), 1);
455 free_page(start);
456 totalram_pages++;
457 }
458 }
459 #endif
461 void si_meminfo(struct sysinfo *val)
462 {
463 val->totalram = totalram_pages;
464 val->sharedram = 0;
465 val->freeram = nr_free_pages();
466 val->bufferram = atomic_read(&buffermem_pages);
467 val->totalhigh = totalhigh_pages;
468 val->freehigh = nr_free_highpages();
469 val->mem_unit = PAGE_SIZE;
470 return;
471 }
473 #if defined(CONFIG_X86_PAE)
474 struct kmem_cache_s *pae_pgd_cachep;
475 void __init pgtable_cache_init(void)
476 {
477 /*
478 * PAE pgds must be 16-byte aligned:
479 */
480 pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
481 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
482 if (!pae_pgd_cachep)
483 panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
484 }
485 #endif /* CONFIG_X86_PAE */