ia64/xen-unstable

view linux-2.4.29-xen-sparse/mm/mremap.c @ 3602:9a9c5a491401

bitkeeper revision 1.1159.235.1 (42000d3dwcPyT8aY4VIPYGCfCAJuQQ)

More x86/64. Status: traps.c now included in the build, but actual building
of IDT doesn't happen, and we need some sort of entry.S. More page-table
building required so that arch_init_memory() can work. And there is something
odd with MP-table parsing; I currently suspect that __init sections are
causing problems.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Tue Feb 01 23:14:05 2005 +0000 (2005-02-01)
parents 610068179f96
children 0a4b76b6b5a0
line source
1 /*
2 * linux/mm/remap.c
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 */
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
9 #include <linux/shm.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 extern int vm_enough_memory(long pages);
18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
19 {
20 pgd_t * pgd;
21 pmd_t * pmd;
22 pte_t * pte = NULL;
24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd))
26 goto end;
27 if (pgd_bad(*pgd)) {
28 pgd_ERROR(*pgd);
29 pgd_clear(pgd);
30 goto end;
31 }
33 pmd = pmd_offset(pgd, addr);
34 if (pmd_none(*pmd))
35 goto end;
36 if (pmd_bad(*pmd)) {
37 pmd_ERROR(*pmd);
38 pmd_clear(pmd);
39 goto end;
40 }
42 pte = pte_offset(pmd, addr);
43 if (pte_none(*pte))
44 pte = NULL;
45 end:
46 return pte;
47 }
49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
50 {
51 pmd_t * pmd;
52 pte_t * pte = NULL;
54 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
55 if (pmd)
56 pte = pte_alloc(mm, pmd, addr);
57 return pte;
58 }
60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
61 {
62 int error = 0;
63 pte_t pte;
65 if (!pte_none(*src)) {
66 pte = ptep_get_and_clear(src);
67 if (!dst) {
68 /* No dest? We must put it back. */
69 dst = src;
70 error++;
71 }
72 set_pte(dst, pte);
73 }
74 return error;
75 }
77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
78 {
79 int error = 0;
80 pte_t * src, * dst;
82 spin_lock(&mm->page_table_lock);
83 src = get_one_pte(mm, old_addr);
84 if (src) {
85 dst = alloc_one_pte(mm, new_addr);
86 src = get_one_pte(mm, old_addr);
87 if (src)
88 error = copy_one_pte(mm, src, dst);
89 }
90 spin_unlock(&mm->page_table_lock);
91 return error;
92 }
94 static int move_page_tables(struct mm_struct * mm,
95 unsigned long new_addr, unsigned long old_addr, unsigned long len)
96 {
97 unsigned long offset = len;
99 flush_cache_range(mm, old_addr, old_addr + len);
101 /*
102 * This is not the clever way to do this, but we're taking the
103 * easy way out on the assumption that most remappings will be
104 * only a few pages.. This also makes error recovery easier.
105 */
106 while (offset) {
107 offset -= PAGE_SIZE;
108 if (move_one_page(mm, old_addr + offset, new_addr + offset))
109 goto oops_we_failed;
110 }
111 flush_tlb_range(mm, old_addr, old_addr + len);
112 return 0;
114 /*
115 * Ok, the move failed because we didn't have enough pages for
116 * the new page table tree. This is unlikely, but we have to
117 * take the possibility into account. In that case we just move
118 * all the pages back (this will work, because we still have
119 * the old page tables)
120 */
121 oops_we_failed:
122 XEN_flush_page_update_queue();
123 flush_cache_range(mm, new_addr, new_addr + len);
124 while ((offset += PAGE_SIZE) < len)
125 move_one_page(mm, new_addr + offset, old_addr + offset);
126 XEN_flush_page_update_queue();
127 zap_page_range(mm, new_addr, len);
128 return -1;
129 }
131 static inline unsigned long move_vma(struct vm_area_struct * vma,
132 unsigned long addr, unsigned long old_len, unsigned long new_len,
133 unsigned long new_addr)
134 {
135 struct mm_struct * mm = vma->vm_mm;
136 struct vm_area_struct * new_vma, * next, * prev;
137 int allocated_vma;
139 new_vma = NULL;
140 next = find_vma_prev(mm, new_addr, &prev);
141 if (next) {
142 if (prev && prev->vm_end == new_addr &&
143 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
144 spin_lock(&mm->page_table_lock);
145 prev->vm_end = new_addr + new_len;
146 spin_unlock(&mm->page_table_lock);
147 new_vma = prev;
148 if (next != prev->vm_next)
149 BUG();
150 if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
151 spin_lock(&mm->page_table_lock);
152 prev->vm_end = next->vm_end;
153 __vma_unlink(mm, next, prev);
154 spin_unlock(&mm->page_table_lock);
156 mm->map_count--;
157 kmem_cache_free(vm_area_cachep, next);
158 }
159 } else if (next->vm_start == new_addr + new_len &&
160 can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
161 spin_lock(&mm->page_table_lock);
162 next->vm_start = new_addr;
163 spin_unlock(&mm->page_table_lock);
164 new_vma = next;
165 }
166 } else {
167 prev = find_vma(mm, new_addr-1);
168 if (prev && prev->vm_end == new_addr &&
169 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
170 spin_lock(&mm->page_table_lock);
171 prev->vm_end = new_addr + new_len;
172 spin_unlock(&mm->page_table_lock);
173 new_vma = prev;
174 }
175 }
177 allocated_vma = 0;
178 if (!new_vma) {
179 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
180 if (!new_vma)
181 goto out;
182 allocated_vma = 1;
183 }
185 if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
186 unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
188 if (allocated_vma) {
189 *new_vma = *vma;
190 new_vma->vm_start = new_addr;
191 new_vma->vm_end = new_addr+new_len;
192 new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
193 new_vma->vm_raend = 0;
194 if (new_vma->vm_file)
195 get_file(new_vma->vm_file);
196 if (new_vma->vm_ops && new_vma->vm_ops->open)
197 new_vma->vm_ops->open(new_vma);
198 insert_vm_struct(current->mm, new_vma);
199 }
201 /* XXX: possible errors masked, mapping might remain */
202 do_munmap(current->mm, addr, old_len);
204 current->mm->total_vm += new_len >> PAGE_SHIFT;
205 if (vm_locked) {
206 current->mm->locked_vm += new_len >> PAGE_SHIFT;
207 if (new_len > old_len)
208 make_pages_present(new_addr + old_len,
209 new_addr + new_len);
210 }
211 return new_addr;
212 }
213 if (allocated_vma)
214 kmem_cache_free(vm_area_cachep, new_vma);
215 out:
216 return -ENOMEM;
217 }
219 /*
220 * Expand (or shrink) an existing mapping, potentially moving it at the
221 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
222 *
223 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
224 * This option implies MREMAP_MAYMOVE.
225 */
226 unsigned long do_mremap(unsigned long addr,
227 unsigned long old_len, unsigned long new_len,
228 unsigned long flags, unsigned long new_addr)
229 {
230 struct vm_area_struct *vma;
231 unsigned long ret = -EINVAL;
233 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
234 goto out;
236 if (addr & ~PAGE_MASK)
237 goto out;
239 old_len = PAGE_ALIGN(old_len);
240 new_len = PAGE_ALIGN(new_len);
242 if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
243 goto out;
245 if (addr >= TASK_SIZE)
246 goto out;
248 /* new_addr is only valid if MREMAP_FIXED is specified */
249 if (flags & MREMAP_FIXED) {
250 if (new_addr & ~PAGE_MASK)
251 goto out;
252 if (!(flags & MREMAP_MAYMOVE))
253 goto out;
255 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
256 goto out;
258 if (new_addr >= TASK_SIZE)
259 goto out;
261 /*
262 * Allow new_len == 0 only if new_addr == addr
263 * to preserve truncation in place (that was working
264 * safe and some app may depend on it).
265 */
266 if (unlikely(!new_len && new_addr != addr))
267 goto out;
269 /* Check if the location we're moving into overlaps the
270 * old location at all, and fail if it does.
271 */
272 if ((new_addr <= addr) && (new_addr+new_len) > addr)
273 goto out;
275 if ((addr <= new_addr) && (addr+old_len) > new_addr)
276 goto out;
278 ret = do_munmap(current->mm, new_addr, new_len);
279 if (ret && new_len)
280 goto out;
281 }
283 /*
284 * Always allow a shrinking remap: that just unmaps
285 * the unnecessary pages..
286 */
287 if (old_len >= new_len) {
288 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
289 if (ret && old_len != new_len)
290 goto out;
291 ret = addr;
292 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
293 goto out;
294 }
296 /*
297 * Ok, we need to grow.. or relocate.
298 */
299 ret = -EFAULT;
300 vma = find_vma(current->mm, addr);
301 if (!vma || vma->vm_start > addr)
302 goto out;
303 /* We can't remap across vm area boundaries */
304 if (old_len > vma->vm_end - addr)
305 goto out;
306 if (vma->vm_flags & VM_DONTEXPAND) {
307 if (new_len > old_len)
308 goto out;
309 }
310 if (vma->vm_flags & VM_LOCKED) {
311 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
312 locked += new_len - old_len;
313 ret = -EAGAIN;
314 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
315 goto out;
316 }
317 ret = -ENOMEM;
318 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
319 > current->rlim[RLIMIT_AS].rlim_cur)
320 goto out;
321 /* Private writable mapping? Check memory availability.. */
322 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
323 !(flags & MAP_NORESERVE) &&
324 !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
325 goto out;
327 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
328 /* mremap() unsupported for I/O mappings in Xenolinux. */
329 ret = -EINVAL;
330 if (vma->vm_flags & VM_IO)
331 goto out;
332 #endif
334 /* old_len exactly to the end of the area..
335 * And we're not relocating the area.
336 */
337 if (old_len == vma->vm_end - addr &&
338 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
339 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
340 unsigned long max_addr = TASK_SIZE;
341 if (vma->vm_next)
342 max_addr = vma->vm_next->vm_start;
343 /* can we just expand the current mapping? */
344 if (max_addr - addr >= new_len) {
345 int pages = (new_len - old_len) >> PAGE_SHIFT;
346 spin_lock(&vma->vm_mm->page_table_lock);
347 vma->vm_end = addr + new_len;
348 spin_unlock(&vma->vm_mm->page_table_lock);
349 current->mm->total_vm += pages;
350 if (vma->vm_flags & VM_LOCKED) {
351 current->mm->locked_vm += pages;
352 make_pages_present(addr + old_len,
353 addr + new_len);
354 }
355 ret = addr;
356 goto out;
357 }
358 }
360 /*
361 * We weren't able to just expand or shrink the area,
362 * we need to create a new one and move it..
363 */
364 ret = -ENOMEM;
365 if (flags & MREMAP_MAYMOVE) {
366 if (!(flags & MREMAP_FIXED)) {
367 unsigned long map_flags = 0;
368 if (vma->vm_flags & VM_SHARED)
369 map_flags |= MAP_SHARED;
371 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff, map_flags);
372 ret = new_addr;
373 if (new_addr & ~PAGE_MASK)
374 goto out;
375 }
376 ret = move_vma(vma, addr, old_len, new_len, new_addr);
377 }
378 out:
379 return ret;
380 }
382 asmlinkage unsigned long sys_mremap(unsigned long addr,
383 unsigned long old_len, unsigned long new_len,
384 unsigned long flags, unsigned long new_addr)
385 {
386 unsigned long ret;
388 down_write(&current->mm->mmap_sem);
389 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
390 up_write(&current->mm->mmap_sem);
391 return ret;
392 }