direct-io.hg

view linux-2.4.30-xen-sparse/mm/mremap.c @ 5517:10e9028c8e3d

bitkeeper revision 1.1718.1.10 (42b7b19aqOS_1M8I4pIOFjiTPYWV-g)

Merge bk://xenbits.xensource.com/xen-unstable.bk
into spot.cl.cam.ac.uk:C:/Documents and Settings/iap10/xen-unstable.bk
author iap10@spot.cl.cam.ac.uk
date Tue Jun 21 06:20:10 2005 +0000 (2005-06-21)
parents 85fcf3b1b7a5
children 56a63f9f378f
line source
1 /*
2 * linux/mm/remap.c
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 */
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
9 #include <linux/shm.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 extern int vm_enough_memory(long pages);
18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
19 {
20 pgd_t * pgd;
21 pmd_t * pmd;
22 pte_t * pte = NULL;
24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd))
26 goto end;
27 if (pgd_bad(*pgd)) {
28 pgd_ERROR(*pgd);
29 pgd_clear(pgd);
30 goto end;
31 }
33 pmd = pmd_offset(pgd, addr);
34 if (pmd_none(*pmd))
35 goto end;
36 if (pmd_bad(*pmd)) {
37 pmd_ERROR(*pmd);
38 pmd_clear(pmd);
39 goto end;
40 }
42 pte = pte_offset(pmd, addr);
43 if (pte_none(*pte))
44 pte = NULL;
45 end:
46 return pte;
47 }
49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
50 {
51 pmd_t * pmd;
52 pte_t * pte = NULL;
54 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
55 if (pmd)
56 pte = pte_alloc(mm, pmd, addr);
57 return pte;
58 }
60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
61 {
62 int error = 0;
63 pte_t pte;
65 if (!pte_none(*src)) {
66 pte = ptep_get_and_clear(src);
67 if (!dst) {
68 /* No dest? We must put it back. */
69 dst = src;
70 error++;
71 }
72 set_pte(dst, pte);
73 }
74 return error;
75 }
77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
78 {
79 int error = 0;
80 pte_t * src, * dst;
82 spin_lock(&mm->page_table_lock);
83 src = get_one_pte(mm, old_addr);
84 if (src) {
85 dst = alloc_one_pte(mm, new_addr);
86 src = get_one_pte(mm, old_addr);
87 if (src)
88 error = copy_one_pte(mm, src, dst);
89 }
90 spin_unlock(&mm->page_table_lock);
91 return error;
92 }
94 static int move_page_tables(struct mm_struct * mm,
95 unsigned long new_addr, unsigned long old_addr, unsigned long len)
96 {
97 unsigned long offset = len;
99 flush_cache_range(mm, old_addr, old_addr + len);
101 /*
102 * This is not the clever way to do this, but we're taking the
103 * easy way out on the assumption that most remappings will be
104 * only a few pages.. This also makes error recovery easier.
105 */
106 while (offset) {
107 offset -= PAGE_SIZE;
108 if (move_one_page(mm, old_addr + offset, new_addr + offset))
109 goto oops_we_failed;
110 }
111 flush_tlb_range(mm, old_addr, old_addr + len);
112 return 0;
114 /*
115 * Ok, the move failed because we didn't have enough pages for
116 * the new page table tree. This is unlikely, but we have to
117 * take the possibility into account. In that case we just move
118 * all the pages back (this will work, because we still have
119 * the old page tables)
120 */
121 oops_we_failed:
122 flush_cache_range(mm, new_addr, new_addr + len);
123 while ((offset += PAGE_SIZE) < len)
124 move_one_page(mm, new_addr + offset, old_addr + offset);
125 zap_page_range(mm, new_addr, len);
126 return -1;
127 }
129 static inline unsigned long move_vma(struct vm_area_struct * vma,
130 unsigned long addr, unsigned long old_len, unsigned long new_len,
131 unsigned long new_addr)
132 {
133 struct mm_struct * mm = vma->vm_mm;
134 struct vm_area_struct * new_vma, * next, * prev;
135 int allocated_vma;
137 new_vma = NULL;
138 next = find_vma_prev(mm, new_addr, &prev);
139 if (next) {
140 if (prev && prev->vm_end == new_addr &&
141 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
142 spin_lock(&mm->page_table_lock);
143 prev->vm_end = new_addr + new_len;
144 spin_unlock(&mm->page_table_lock);
145 new_vma = prev;
146 if (next != prev->vm_next)
147 BUG();
148 if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
149 spin_lock(&mm->page_table_lock);
150 prev->vm_end = next->vm_end;
151 __vma_unlink(mm, next, prev);
152 spin_unlock(&mm->page_table_lock);
154 mm->map_count--;
155 kmem_cache_free(vm_area_cachep, next);
156 }
157 } else if (next->vm_start == new_addr + new_len &&
158 can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
159 spin_lock(&mm->page_table_lock);
160 next->vm_start = new_addr;
161 spin_unlock(&mm->page_table_lock);
162 new_vma = next;
163 }
164 } else {
165 prev = find_vma(mm, new_addr-1);
166 if (prev && prev->vm_end == new_addr &&
167 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
168 spin_lock(&mm->page_table_lock);
169 prev->vm_end = new_addr + new_len;
170 spin_unlock(&mm->page_table_lock);
171 new_vma = prev;
172 }
173 }
175 allocated_vma = 0;
176 if (!new_vma) {
177 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
178 if (!new_vma)
179 goto out;
180 allocated_vma = 1;
181 }
183 if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
184 unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
186 if (allocated_vma) {
187 *new_vma = *vma;
188 new_vma->vm_start = new_addr;
189 new_vma->vm_end = new_addr+new_len;
190 new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
191 new_vma->vm_raend = 0;
192 if (new_vma->vm_file)
193 get_file(new_vma->vm_file);
194 if (new_vma->vm_ops && new_vma->vm_ops->open)
195 new_vma->vm_ops->open(new_vma);
196 insert_vm_struct(current->mm, new_vma);
197 }
199 /* XXX: possible errors masked, mapping might remain */
200 do_munmap(current->mm, addr, old_len);
202 current->mm->total_vm += new_len >> PAGE_SHIFT;
203 if (vm_locked) {
204 current->mm->locked_vm += new_len >> PAGE_SHIFT;
205 if (new_len > old_len)
206 make_pages_present(new_addr + old_len,
207 new_addr + new_len);
208 }
209 return new_addr;
210 }
211 if (allocated_vma)
212 kmem_cache_free(vm_area_cachep, new_vma);
213 out:
214 return -ENOMEM;
215 }
217 /*
218 * Expand (or shrink) an existing mapping, potentially moving it at the
219 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
220 *
221 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
222 * This option implies MREMAP_MAYMOVE.
223 */
224 unsigned long do_mremap(unsigned long addr,
225 unsigned long old_len, unsigned long new_len,
226 unsigned long flags, unsigned long new_addr)
227 {
228 struct vm_area_struct *vma;
229 unsigned long ret = -EINVAL;
231 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
232 goto out;
234 if (addr & ~PAGE_MASK)
235 goto out;
237 old_len = PAGE_ALIGN(old_len);
238 new_len = PAGE_ALIGN(new_len);
240 if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
241 goto out;
243 if (addr >= TASK_SIZE)
244 goto out;
246 /* new_addr is only valid if MREMAP_FIXED is specified */
247 if (flags & MREMAP_FIXED) {
248 if (new_addr & ~PAGE_MASK)
249 goto out;
250 if (!(flags & MREMAP_MAYMOVE))
251 goto out;
253 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
254 goto out;
256 if (new_addr >= TASK_SIZE)
257 goto out;
259 /*
260 * Allow new_len == 0 only if new_addr == addr
261 * to preserve truncation in place (that was working
262 * safe and some app may depend on it).
263 */
264 if (unlikely(!new_len && new_addr != addr))
265 goto out;
267 /* Check if the location we're moving into overlaps the
268 * old location at all, and fail if it does.
269 */
270 if ((new_addr <= addr) && (new_addr+new_len) > addr)
271 goto out;
273 if ((addr <= new_addr) && (addr+old_len) > new_addr)
274 goto out;
276 ret = do_munmap(current->mm, new_addr, new_len);
277 if (ret && new_len)
278 goto out;
279 }
281 /*
282 * Always allow a shrinking remap: that just unmaps
283 * the unnecessary pages..
284 */
285 if (old_len >= new_len) {
286 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
287 if (ret && old_len != new_len)
288 goto out;
289 ret = addr;
290 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
291 goto out;
292 }
294 /*
295 * Ok, we need to grow.. or relocate.
296 */
297 ret = -EFAULT;
298 vma = find_vma(current->mm, addr);
299 if (!vma || vma->vm_start > addr)
300 goto out;
301 /* We can't remap across vm area boundaries */
302 if (old_len > vma->vm_end - addr)
303 goto out;
304 if (vma->vm_flags & VM_DONTEXPAND) {
305 if (new_len > old_len)
306 goto out;
307 }
308 if (vma->vm_flags & VM_LOCKED) {
309 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
310 locked += new_len - old_len;
311 ret = -EAGAIN;
312 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
313 goto out;
314 }
315 ret = -ENOMEM;
316 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
317 > current->rlim[RLIMIT_AS].rlim_cur)
318 goto out;
319 /* Private writable mapping? Check memory availability.. */
320 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
321 !(flags & MAP_NORESERVE) &&
322 !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
323 goto out;
325 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
326 /* mremap() unsupported for I/O mappings in Xenolinux. */
327 ret = -EINVAL;
328 if (vma->vm_flags & VM_IO)
329 goto out;
330 #endif
332 /* old_len exactly to the end of the area..
333 * And we're not relocating the area.
334 */
335 if (old_len == vma->vm_end - addr &&
336 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
337 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
338 unsigned long max_addr = TASK_SIZE;
339 if (vma->vm_next)
340 max_addr = vma->vm_next->vm_start;
341 /* can we just expand the current mapping? */
342 if (max_addr - addr >= new_len) {
343 int pages = (new_len - old_len) >> PAGE_SHIFT;
344 spin_lock(&vma->vm_mm->page_table_lock);
345 vma->vm_end = addr + new_len;
346 spin_unlock(&vma->vm_mm->page_table_lock);
347 current->mm->total_vm += pages;
348 if (vma->vm_flags & VM_LOCKED) {
349 current->mm->locked_vm += pages;
350 make_pages_present(addr + old_len,
351 addr + new_len);
352 }
353 ret = addr;
354 goto out;
355 }
356 }
358 /*
359 * We weren't able to just expand or shrink the area,
360 * we need to create a new one and move it..
361 */
362 ret = -ENOMEM;
363 if (flags & MREMAP_MAYMOVE) {
364 if (!(flags & MREMAP_FIXED)) {
365 unsigned long map_flags = 0;
366 if (vma->vm_flags & VM_SHARED)
367 map_flags |= MAP_SHARED;
369 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff, map_flags);
370 ret = new_addr;
371 if (new_addr & ~PAGE_MASK)
372 goto out;
373 }
374 ret = move_vma(vma, addr, old_len, new_len, new_addr);
375 }
376 out:
377 return ret;
378 }
380 asmlinkage unsigned long sys_mremap(unsigned long addr,
381 unsigned long old_len, unsigned long new_len,
382 unsigned long flags, unsigned long new_addr)
383 {
384 unsigned long ret;
386 down_write(&current->mm->mmap_sem);
387 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
388 up_write(&current->mm->mmap_sem);
389 return ret;
390 }