ia64/xen-unstable

view old/xenolinux-2.4.16-sparse/arch/xeno/mm/init.c @ 235:d7d0a23b2e07

bitkeeper revision 1.93 (3e5a4e6bkPheUp3x1uufN2MS3LAB7A)

Latest and Greatest version of XenoLinux based on the Linux-2.4.21-pre4
kernel.
author iap10@labyrinth.cl.cam.ac.uk
date Mon Feb 24 16:55:07 2003 +0000 (2003-02-24)
parents
children
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/bootmem.h>
29 #include <asm/processor.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/dma.h>
35 #include <asm/apic.h>
36 #include <asm/tlb.h>
38 mmu_gather_t mmu_gathers[NR_CPUS];
39 unsigned long highstart_pfn, highend_pfn;
40 static unsigned long totalram_pages;
41 static unsigned long totalhigh_pages;
43 int do_check_pgt_cache(int low, int high)
44 {
45 int freed = 0;
46 if(pgtable_cache_size > high) {
47 do {
48 if (!QUICKLIST_EMPTY(pgd_quicklist)) {
49 free_pgd_slow(get_pgd_fast());
50 freed++;
51 }
52 if (!QUICKLIST_EMPTY(pte_quicklist)) {
53 pte_free_slow(pte_alloc_one_fast(NULL, 0));
54 freed++;
55 }
56 } while(pgtable_cache_size > low);
57 }
58 return freed;
59 }
61 void show_mem(void)
62 {
63 int i, total = 0, reserved = 0;
64 int shared = 0, cached = 0;
65 int highmem = 0;
67 printk("Mem-info:\n");
68 show_free_areas();
69 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
70 i = max_mapnr;
71 while (i-- > 0) {
72 total++;
73 if (PageHighMem(mem_map+i))
74 highmem++;
75 if (PageReserved(mem_map+i))
76 reserved++;
77 else if (PageSwapCache(mem_map+i))
78 cached++;
79 else if (page_count(mem_map+i))
80 shared += page_count(mem_map+i) - 1;
81 }
82 printk("%d pages of RAM\n", total);
83 printk("%d pages of HIGHMEM\n",highmem);
84 printk("%d reserved pages\n",reserved);
85 printk("%d pages shared\n",shared);
86 printk("%d pages swap cached\n",cached);
87 printk("%ld pages in page table cache\n",pgtable_cache_size);
88 show_buffers();
89 }
91 /* References to section boundaries */
93 extern char _text, _etext, _edata, __bss_start, _end;
94 extern char __init_begin, __init_end;
96 static inline void set_pte_phys (unsigned long vaddr,
97 unsigned long phys, pgprot_t flags)
98 {
99 pgprot_t prot;
100 pgd_t *pgd;
101 pmd_t *pmd;
102 pte_t *pte;
104 pgd = init_mm.pgd + __pgd_offset(vaddr);
105 if (pgd_none(*pgd)) {
106 printk("PAE BUG #00!\n");
107 return;
108 }
109 pmd = pmd_offset(pgd, vaddr);
110 if (pmd_none(*pmd)) {
111 printk("PAE BUG #01!\n");
112 return;
113 }
114 pte = pte_offset(pmd, vaddr);
115 if (pte_val(*pte))
116 pte_ERROR(*pte);
117 pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags);
119 /* We queue directly, avoiding hidden phys->machine translation. */
120 queue_l1_entry_update(__pa(pte), phys | pgprot_val(prot));
122 /*
123 * It's enough to flush this one mapping.
124 * (PGE mappings get flushed as well)
125 */
126 __flush_tlb_one(vaddr);
127 }
129 void __set_fixmap (enum fixed_addresses idx, unsigned long phys,
130 pgprot_t flags)
131 {
132 unsigned long address = __fix_to_virt(idx);
134 if (idx >= __end_of_fixed_addresses) {
135 printk("Invalid __set_fixmap\n");
136 return;
137 }
138 set_pte_phys(address, phys, flags);
139 }
141 static void __init fixrange_init (unsigned long start,
142 unsigned long end, pgd_t *pgd_base)
143 {
144 pgd_t *pgd, *kpgd;
145 pmd_t *pmd, *kpmd;
146 pte_t *pte, *kpte;
147 int i, j;
148 unsigned long vaddr;
150 vaddr = start;
151 i = __pgd_offset(vaddr);
152 j = __pmd_offset(vaddr);
153 pgd = pgd_base + i;
155 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
156 pmd = (pmd_t *)pgd;
157 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
158 if (pmd_none(*pmd)) {
159 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
160 clear_page(pte);
161 kpgd = pgd_offset_k((unsigned long)pte);
162 kpmd = pmd_offset(kpgd, (unsigned long)pte);
163 kpte = pte_offset(kpmd, (unsigned long)pte);
164 queue_l1_entry_update(__pa(kpte),
165 (*(unsigned long *)kpte)&~_PAGE_RW);
166 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
167 }
168 vaddr += PMD_SIZE;
169 }
170 j = 0;
171 }
173 XENO_flush_page_update_queue();
174 }
176 void __init paging_init(void)
177 {
178 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
179 unsigned int max_dma, high, low;
180 unsigned long vaddr;
182 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
183 low = max_low_pfn;
184 high = highend_pfn;
186 if (low < max_dma)
187 {
188 zones_size[ZONE_DMA] = low;
189 }
190 else
191 {
192 zones_size[ZONE_DMA] = max_dma;
193 zones_size[ZONE_NORMAL] = low - max_dma;
194 }
195 free_area_init(zones_size);
197 /*
198 * Fixed mappings, only the page table structure has to be created -
199 * mappings will be set by set_fixmap():
200 */
201 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
202 fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
204 /*
205 * XXX We do this conversion early, so that all other page tables
206 * will automatically get this mapping.
207 */
208 set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring);
209 }
212 static inline int page_is_ram (unsigned long pagenr)
213 {
214 return 1;
215 }
217 void __init mem_init(void)
218 {
219 int codesize, reservedpages, datasize, initsize;
220 int tmp;
222 max_mapnr = num_physpages = max_low_pfn;
223 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
225 /* clear the zero-page */
226 memset(empty_zero_page, 0, PAGE_SIZE);
228 /* this will put all low memory onto the freelists */
229 totalram_pages += free_all_bootmem();
231 reservedpages = 0;
232 for (tmp = 0; tmp < max_low_pfn; tmp++)
233 /*
234 * Only count reserved RAM pages
235 */
236 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
237 reservedpages++;
238 codesize = (unsigned long) &_etext - (unsigned long) &_text;
239 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
240 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
242 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
243 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
244 max_mapnr << (PAGE_SHIFT-10),
245 codesize >> 10,
246 reservedpages << (PAGE_SHIFT-10),
247 datasize >> 10,
248 initsize >> 10,
249 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
250 );
252 boot_cpu_data.wp_works_ok = 1;
253 }
255 void free_initmem(void)
256 {
257 unsigned long addr;
259 addr = (unsigned long)(&__init_begin);
260 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
261 ClearPageReserved(virt_to_page(addr));
262 set_page_count(virt_to_page(addr), 1);
263 free_page(addr);
264 totalram_pages++;
265 }
266 printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
267 }
269 #ifdef CONFIG_BLK_DEV_INITRD
270 void free_initrd_mem(unsigned long start, unsigned long end)
271 {
272 if (start < end)
273 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
274 for (; start < end; start += PAGE_SIZE) {
275 ClearPageReserved(virt_to_page(start));
276 set_page_count(virt_to_page(start), 1);
277 free_page(start);
278 totalram_pages++;
279 }
280 }
281 #endif
283 void si_meminfo(struct sysinfo *val)
284 {
285 val->totalram = totalram_pages;
286 val->sharedram = 0;
287 val->freeram = nr_free_pages();
288 val->bufferram = atomic_read(&buffermem_pages);
289 val->totalhigh = totalhigh_pages;
290 val->freehigh = nr_free_highpages();
291 val->mem_unit = PAGE_SIZE;
292 return;
293 }