ia64/xen-unstable

view linux-2.4.29-xen-sparse/mm/vmalloc.c @ 3602:9a9c5a491401

bitkeeper revision 1.1159.235.1 (42000d3dwcPyT8aY4VIPYGCfCAJuQQ)

More x86/64. Status: traps.c now included in the build, but actual building
of IDT doesn't happen, and we need some sort of entry.S. More page-table
building required so that arch_init_memory() can work. And there is something
odd with MP-table parsing; I currently suspect that __init sections are
causing problems.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Tue Feb 01 23:14:05 2005 +0000 (2005-02-01)
parents 610068179f96
children 0a4b76b6b5a0
line source
1 /*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 */
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <linux/spinlock.h>
13 #include <linux/highmem.h>
14 #include <linux/smp_lock.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
19 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
20 struct vm_struct * vmlist;
22 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
23 {
24 pte_t * pte;
25 unsigned long end;
27 if (pmd_none(*pmd))
28 return;
29 if (pmd_bad(*pmd)) {
30 pmd_ERROR(*pmd);
31 pmd_clear(pmd);
32 return;
33 }
34 pte = pte_offset(pmd, address);
35 address &= ~PMD_MASK;
36 end = address + size;
37 if (end > PMD_SIZE)
38 end = PMD_SIZE;
39 do {
40 pte_t page;
41 page = ptep_get_and_clear(pte);
42 address += PAGE_SIZE;
43 pte++;
44 if (pte_none(page))
45 continue;
46 if (pte_present(page)) {
47 struct page *ptpage = pte_page(page);
48 if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
49 __free_page(ptpage);
50 continue;
51 }
52 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
53 } while (address < end);
54 }
56 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
57 {
58 pmd_t * pmd;
59 unsigned long end;
61 if (pgd_none(*dir))
62 return;
63 if (pgd_bad(*dir)) {
64 pgd_ERROR(*dir);
65 pgd_clear(dir);
66 return;
67 }
68 pmd = pmd_offset(dir, address);
69 address &= ~PGDIR_MASK;
70 end = address + size;
71 if (end > PGDIR_SIZE)
72 end = PGDIR_SIZE;
73 do {
74 free_area_pte(pmd, address, end - address);
75 address = (address + PMD_SIZE) & PMD_MASK;
76 pmd++;
77 } while (address < end);
78 }
80 void vmfree_area_pages(unsigned long address, unsigned long size)
81 {
82 pgd_t * dir;
83 unsigned long end = address + size;
85 dir = pgd_offset_k(address);
86 flush_cache_all();
87 do {
88 free_area_pmd(dir, address, end - address);
89 address = (address + PGDIR_SIZE) & PGDIR_MASK;
90 dir++;
91 } while (address && (address < end));
92 flush_tlb_all();
93 }
95 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
96 unsigned long size, int gfp_mask,
97 pgprot_t prot, struct page ***pages)
98 {
99 unsigned long end;
101 address &= ~PMD_MASK;
102 end = address + size;
103 if (end > PMD_SIZE)
104 end = PMD_SIZE;
105 do {
106 struct page * page;
108 if (!pages) {
109 spin_unlock(&init_mm.page_table_lock);
110 page = alloc_page(gfp_mask);
111 spin_lock(&init_mm.page_table_lock);
112 } else {
113 page = (**pages);
114 (*pages)++;
116 /* Add a reference to the page so we can free later */
117 if (page)
118 atomic_inc(&page->count);
120 }
121 if (!pte_none(*pte))
122 printk(KERN_ERR "alloc_area_pte: page already exists\n");
123 if (!page)
124 return -ENOMEM;
125 set_pte(pte, mk_pte(page, prot));
126 address += PAGE_SIZE;
127 pte++;
128 } while (address < end);
129 return 0;
130 }
132 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
133 unsigned long size, int gfp_mask,
134 pgprot_t prot, struct page ***pages)
135 {
136 unsigned long end;
138 address &= ~PGDIR_MASK;
139 end = address + size;
140 if (end > PGDIR_SIZE)
141 end = PGDIR_SIZE;
142 do {
143 pte_t * pte = pte_alloc(&init_mm, pmd, address);
144 if (!pte)
145 return -ENOMEM;
146 if (alloc_area_pte(pte, address, end - address,
147 gfp_mask, prot, pages))
148 return -ENOMEM;
149 address = (address + PMD_SIZE) & PMD_MASK;
150 pmd++;
151 } while (address < end);
152 return 0;
153 }
155 /*static inline*/ int __vmalloc_area_pages (unsigned long address,
156 unsigned long size,
157 int gfp_mask,
158 pgprot_t prot,
159 struct page ***pages)
160 {
161 pgd_t * dir;
162 unsigned long start = address;
163 unsigned long end = address + size;
165 dir = pgd_offset_k(address);
166 spin_lock(&init_mm.page_table_lock);
167 do {
168 pmd_t *pmd;
170 pmd = pmd_alloc(&init_mm, dir, address);
171 if (!pmd)
172 goto err;
174 if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
175 goto err; // The kernel NEVER reclaims pmds, so no need to undo pmd_alloc() here
177 address = (address + PGDIR_SIZE) & PGDIR_MASK;
178 dir++;
179 } while (address && (address < end));
180 spin_unlock(&init_mm.page_table_lock);
181 flush_cache_all();
182 XEN_flush_page_update_queue();
183 return 0;
184 err:
185 spin_unlock(&init_mm.page_table_lock);
186 flush_cache_all();
187 if (address > start)
188 vmfree_area_pages(start, address - start);
189 return -ENOMEM;
190 }
192 int vmalloc_area_pages(unsigned long address, unsigned long size,
193 int gfp_mask, pgprot_t prot)
194 {
195 return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
196 }
198 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
199 {
200 unsigned long addr, next;
201 struct vm_struct **p, *tmp, *area;
203 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
204 if (!area)
205 return NULL;
207 size += PAGE_SIZE;
208 if (!size) {
209 kfree (area);
210 return NULL;
211 }
213 addr = VMALLOC_START;
214 write_lock(&vmlist_lock);
215 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
216 if ((size + addr) < addr)
217 goto out;
218 if (size + addr <= (unsigned long) tmp->addr)
219 break;
220 next = tmp->size + (unsigned long) tmp->addr;
221 if (next > addr)
222 addr = next;
223 if (addr > VMALLOC_END-size)
224 goto out;
225 }
226 area->flags = flags;
227 area->addr = (void *)addr;
228 area->size = size;
229 area->next = *p;
230 *p = area;
231 write_unlock(&vmlist_lock);
232 return area;
234 out:
235 write_unlock(&vmlist_lock);
236 kfree(area);
237 return NULL;
238 }
240 void __vfree(void * addr, int free_area_pages)
241 {
242 struct vm_struct **p, *tmp;
244 if (!addr)
245 return;
246 if ((PAGE_SIZE-1) & (unsigned long) addr) {
247 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
248 return;
249 }
250 write_lock(&vmlist_lock);
251 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
252 if (tmp->addr == addr) {
253 *p = tmp->next;
254 if (free_area_pages)
255 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
256 write_unlock(&vmlist_lock);
257 kfree(tmp);
258 return;
259 }
260 }
261 write_unlock(&vmlist_lock);
262 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
263 }
265 void vfree(void * addr)
266 {
267 __vfree(addr,1);
268 }
270 void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
271 {
272 void * addr;
273 struct vm_struct *area;
275 size = PAGE_ALIGN(size);
276 if (!size || (size >> PAGE_SHIFT) > num_physpages)
277 return NULL;
278 area = get_vm_area(size, VM_ALLOC);
279 if (!area)
280 return NULL;
281 addr = area->addr;
282 if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
283 prot, NULL)) {
284 __vfree(addr, 0);
285 return NULL;
286 }
287 return addr;
288 }
290 void * vmap(struct page **pages, int count,
291 unsigned long flags, pgprot_t prot)
292 {
293 void * addr;
294 struct vm_struct *area;
295 unsigned long size = count << PAGE_SHIFT;
297 if (!size || size > (max_mapnr << PAGE_SHIFT))
298 return NULL;
299 area = get_vm_area(size, flags);
300 if (!area) {
301 return NULL;
302 }
303 addr = area->addr;
304 if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
305 prot, &pages)) {
306 __vfree(addr, 0);
307 return NULL;
308 }
309 return addr;
310 }
312 long vread(char *buf, char *addr, unsigned long count)
313 {
314 struct vm_struct *tmp;
315 char *vaddr, *buf_start = buf;
316 unsigned long n;
318 /* Don't allow overflow */
319 if ((unsigned long) addr + count < count)
320 count = -(unsigned long) addr;
322 read_lock(&vmlist_lock);
323 for (tmp = vmlist; tmp; tmp = tmp->next) {
324 vaddr = (char *) tmp->addr;
325 if (addr >= vaddr + tmp->size - PAGE_SIZE)
326 continue;
327 while (addr < vaddr) {
328 if (count == 0)
329 goto finished;
330 *buf = '\0';
331 buf++;
332 addr++;
333 count--;
334 }
335 n = vaddr + tmp->size - PAGE_SIZE - addr;
336 do {
337 if (count == 0)
338 goto finished;
339 *buf = *addr;
340 buf++;
341 addr++;
342 count--;
343 } while (--n > 0);
344 }
345 finished:
346 read_unlock(&vmlist_lock);
347 return buf - buf_start;
348 }
350 long vwrite(char *buf, char *addr, unsigned long count)
351 {
352 struct vm_struct *tmp;
353 char *vaddr, *buf_start = buf;
354 unsigned long n;
356 /* Don't allow overflow */
357 if ((unsigned long) addr + count < count)
358 count = -(unsigned long) addr;
360 read_lock(&vmlist_lock);
361 for (tmp = vmlist; tmp; tmp = tmp->next) {
362 vaddr = (char *) tmp->addr;
363 if (addr >= vaddr + tmp->size - PAGE_SIZE)
364 continue;
365 while (addr < vaddr) {
366 if (count == 0)
367 goto finished;
368 buf++;
369 addr++;
370 count--;
371 }
372 n = vaddr + tmp->size - PAGE_SIZE - addr;
373 do {
374 if (count == 0)
375 goto finished;
376 *addr = *buf;
377 buf++;
378 addr++;
379 count--;
380 } while (--n > 0);
381 }
382 finished:
383 read_unlock(&vmlist_lock);
384 return buf - buf_start;
385 }