ia64/xen-unstable

view linux-2.4-xen-sparse/mm/memory.c @ 5853:9b713b8d1100

Fix the path to qemu-dm
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 25 21:02:24 2005 +0000 (2005-07-25)
parents 56a63f9f378f
children 8799d14bef77 8799d14bef77 9312a3e8a6f8 dfaf788ab18c
line source
1 /*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
7 /*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
10 */
12 /*
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
15 *
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
19 *
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21 */
23 /*
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
29 */
31 /*
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
34 *
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
37 */
39 #include <linux/mm.h>
40 #include <linux/mman.h>
41 #include <linux/swap.h>
42 #include <linux/smp_lock.h>
43 #include <linux/swapctl.h>
44 #include <linux/iobuf.h>
45 #include <linux/highmem.h>
46 #include <linux/pagemap.h>
47 #include <linux/module.h>
49 #include <asm/pgalloc.h>
50 #include <asm/uaccess.h>
51 #include <asm/tlb.h>
53 unsigned long max_mapnr;
54 unsigned long num_physpages;
55 unsigned long num_mappedpages;
56 void * high_memory;
57 struct page *highmem_start_page;
59 /*
60 * We special-case the C-O-W ZERO_PAGE, because it's such
61 * a common occurrence (no need to read the page to know
62 * that it's zero - better for the cache and memory subsystem).
63 */
64 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
65 {
66 if (from == ZERO_PAGE(address)) {
67 clear_user_highpage(to, address);
68 return;
69 }
70 copy_user_highpage(to, from, address);
71 }
73 mem_map_t * mem_map;
75 /*
76 * Called by TLB shootdown
77 */
78 void __free_pte(pte_t pte)
79 {
80 struct page *page = pte_page(pte);
81 if ((!VALID_PAGE(page)) || PageReserved(page))
82 return;
83 if (pte_dirty(pte))
84 set_page_dirty(page);
85 free_page_and_swap_cache(page);
86 }
89 /*
90 * Note: this doesn't free the actual pages themselves. That
91 * has been handled earlier when unmapping all the memory regions.
92 */
93 static inline void free_one_pmd(pmd_t * dir)
94 {
95 pte_t * pte;
97 if (pmd_none(*dir))
98 return;
99 if (pmd_bad(*dir)) {
100 pmd_ERROR(*dir);
101 pmd_clear(dir);
102 return;
103 }
104 pte = pte_offset(dir, 0);
105 pmd_clear(dir);
106 pte_free(pte);
107 }
109 static inline void free_one_pgd(pgd_t * dir)
110 {
111 int j;
112 pmd_t * pmd;
114 if (pgd_none(*dir))
115 return;
116 if (pgd_bad(*dir)) {
117 pgd_ERROR(*dir);
118 pgd_clear(dir);
119 return;
120 }
121 pmd = pmd_offset(dir, 0);
122 pgd_clear(dir);
123 for (j = 0; j < PTRS_PER_PMD ; j++) {
124 prefetchw(pmd+j+(PREFETCH_STRIDE/16));
125 free_one_pmd(pmd+j);
126 }
127 pmd_free(pmd);
128 }
130 /* Low and high watermarks for page table cache.
131 The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
132 */
133 int pgt_cache_water[2] = { 25, 50 };
135 /* Returns the number of pages freed */
136 int check_pgt_cache(void)
137 {
138 return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
139 }
142 /*
143 * This function clears all user-level page tables of a process - this
144 * is needed by execve(), so that old pages aren't in the way.
145 */
146 void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
147 {
148 pgd_t * page_dir = mm->pgd;
150 spin_lock(&mm->page_table_lock);
151 page_dir += first;
152 do {
153 free_one_pgd(page_dir);
154 page_dir++;
155 } while (--nr);
156 spin_unlock(&mm->page_table_lock);
158 /* keep the page table cache within bounds */
159 check_pgt_cache();
160 }
162 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
163 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
165 /*
166 * copy one vm_area from one task to the other. Assumes the page tables
167 * already present in the new task to be cleared in the whole range
168 * covered by this vma.
169 *
170 * 08Jan98 Merged into one routine from several inline routines to reduce
171 * variable count and make things faster. -jj
172 *
173 * dst->page_table_lock is held on entry and exit,
174 * but may be dropped within pmd_alloc() and pte_alloc().
175 */
176 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
177 struct vm_area_struct *vma)
178 {
179 pgd_t * src_pgd, * dst_pgd;
180 unsigned long address = vma->vm_start;
181 unsigned long end = vma->vm_end;
182 unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
184 src_pgd = pgd_offset(src, address)-1;
185 dst_pgd = pgd_offset(dst, address)-1;
187 for (;;) {
188 pmd_t * src_pmd, * dst_pmd;
190 src_pgd++; dst_pgd++;
192 /* copy_pmd_range */
194 if (pgd_none(*src_pgd))
195 goto skip_copy_pmd_range;
196 if (pgd_bad(*src_pgd)) {
197 pgd_ERROR(*src_pgd);
198 pgd_clear(src_pgd);
199 skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
200 if (!address || (address >= end))
201 goto out;
202 continue;
203 }
205 src_pmd = pmd_offset(src_pgd, address);
206 dst_pmd = pmd_alloc(dst, dst_pgd, address);
207 if (!dst_pmd)
208 goto nomem;
210 do {
211 pte_t * src_pte, * dst_pte;
213 /* copy_pte_range */
215 if (pmd_none(*src_pmd))
216 goto skip_copy_pte_range;
217 if (pmd_bad(*src_pmd)) {
218 pmd_ERROR(*src_pmd);
219 pmd_clear(src_pmd);
220 skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
221 if (address >= end)
222 goto out;
223 goto cont_copy_pmd_range;
224 }
226 src_pte = pte_offset(src_pmd, address);
227 dst_pte = pte_alloc(dst, dst_pmd, address);
228 if (!dst_pte)
229 goto nomem;
231 spin_lock(&src->page_table_lock);
232 do {
233 pte_t pte = *src_pte;
234 struct page *ptepage;
236 /* copy_one_pte */
238 if (pte_none(pte))
239 goto cont_copy_pte_range_noset;
240 if (!pte_present(pte)) {
241 swap_duplicate(pte_to_swp_entry(pte));
242 goto cont_copy_pte_range;
243 }
244 ptepage = pte_page(pte);
245 if ((!VALID_PAGE(ptepage)) ||
246 PageReserved(ptepage))
247 goto cont_copy_pte_range;
249 /* If it's a COW mapping, write protect it both in the parent and the child */
250 if (cow && pte_write(pte)) {
251 ptep_set_wrprotect(src_pte);
252 pte = *src_pte;
253 }
255 /* If it's a shared mapping, mark it clean in the child */
256 if (vma->vm_flags & VM_SHARED)
257 pte = pte_mkclean(pte);
258 pte = pte_mkold(pte);
259 get_page(ptepage);
260 dst->rss++;
262 cont_copy_pte_range: set_pte(dst_pte, pte);
263 cont_copy_pte_range_noset: address += PAGE_SIZE;
264 if (address >= end)
265 goto out_unlock;
266 src_pte++;
267 dst_pte++;
268 } while ((unsigned long)src_pte & PTE_TABLE_MASK);
269 spin_unlock(&src->page_table_lock);
271 cont_copy_pmd_range: src_pmd++;
272 dst_pmd++;
273 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
274 }
275 out_unlock:
276 spin_unlock(&src->page_table_lock);
277 out:
278 return 0;
279 nomem:
280 return -ENOMEM;
281 }
283 /*
284 * Return indicates whether a page was freed so caller can adjust rss
285 */
286 static inline void forget_pte(pte_t page)
287 {
288 if (!pte_none(page)) {
289 printk("forget_pte: old mapping existed!\n");
290 BUG();
291 }
292 }
294 static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size)
295 {
296 unsigned long offset;
297 pte_t * ptep;
298 int freed = 0;
300 if (pmd_none(*pmd))
301 return 0;
302 if (pmd_bad(*pmd)) {
303 pmd_ERROR(*pmd);
304 pmd_clear(pmd);
305 return 0;
306 }
307 ptep = pte_offset(pmd, address);
308 offset = address & ~PMD_MASK;
309 if (offset + size > PMD_SIZE)
310 size = PMD_SIZE - offset;
311 size &= PAGE_MASK;
312 for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
313 pte_t pte = *ptep;
314 if (pte_none(pte))
315 continue;
316 if (pte_present(pte)) {
317 struct page *page = pte_page(pte);
318 if (VALID_PAGE(page) && !PageReserved(page))
319 freed ++;
320 /* This will eventually call __free_pte on the pte. */
321 tlb_remove_page(tlb, ptep, address + offset);
322 } else {
323 free_swap_and_cache(pte_to_swp_entry(pte));
324 pte_clear(ptep);
325 }
326 }
328 return freed;
329 }
331 static inline int zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long address, unsigned long size)
332 {
333 pmd_t * pmd;
334 unsigned long end;
335 int freed;
337 if (pgd_none(*dir))
338 return 0;
339 if (pgd_bad(*dir)) {
340 pgd_ERROR(*dir);
341 pgd_clear(dir);
342 return 0;
343 }
344 pmd = pmd_offset(dir, address);
345 end = address + size;
346 if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
347 end = ((address + PGDIR_SIZE) & PGDIR_MASK);
348 freed = 0;
349 do {
350 freed += zap_pte_range(tlb, pmd, address, end - address);
351 address = (address + PMD_SIZE) & PMD_MASK;
352 pmd++;
353 } while (address < end);
354 return freed;
355 }
357 /*
358 * remove user pages in a given range.
359 */
360 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
361 {
362 mmu_gather_t *tlb;
363 pgd_t * dir;
364 unsigned long start = address, end = address + size;
365 int freed = 0;
367 dir = pgd_offset(mm, address);
369 /*
370 * This is a long-lived spinlock. That's fine.
371 * There's no contention, because the page table
372 * lock only protects against kswapd anyway, and
373 * even if kswapd happened to be looking at this
374 * process we _want_ it to get stuck.
375 */
376 if (address >= end)
377 BUG();
378 spin_lock(&mm->page_table_lock);
379 flush_cache_range(mm, address, end);
380 tlb = tlb_gather_mmu(mm);
382 do {
383 freed += zap_pmd_range(tlb, dir, address, end - address);
384 address = (address + PGDIR_SIZE) & PGDIR_MASK;
385 dir++;
386 } while (address && (address < end));
388 /* this will flush any remaining tlb entries */
389 tlb_finish_mmu(tlb, start, end);
391 /*
392 * Update rss for the mm_struct (not necessarily current->mm)
393 * Notice that rss is an unsigned long.
394 */
395 if (mm->rss > freed)
396 mm->rss -= freed;
397 else
398 mm->rss = 0;
399 spin_unlock(&mm->page_table_lock);
400 }
402 /*
403 * Do a quick page-table lookup for a single page.
404 */
405 static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write)
406 {
407 pgd_t *pgd;
408 pmd_t *pmd;
409 pte_t *ptep, pte;
411 pgd = pgd_offset(mm, address);
412 if (pgd_none(*pgd) || pgd_bad(*pgd))
413 goto out;
415 pmd = pmd_offset(pgd, address);
416 if (pmd_none(*pmd) || pmd_bad(*pmd))
417 goto out;
419 ptep = pte_offset(pmd, address);
420 if (!ptep)
421 goto out;
423 pte = *ptep;
424 if (pte_present(pte)) {
425 if (!write ||
426 (pte_write(pte) && pte_dirty(pte)))
427 return pte_page(pte);
428 }
430 out:
431 return 0;
432 }
434 /*
435 * Given a physical address, is there a useful struct page pointing to
436 * it? This may become more complex in the future if we start dealing
437 * with IO-aperture pages in kiobufs.
438 */
440 static inline struct page * get_page_map(struct page *page)
441 {
442 if (!VALID_PAGE(page))
443 return 0;
444 return page;
445 }
447 /*
448 * Please read Documentation/cachetlb.txt before using this function,
449 * accessing foreign memory spaces can cause cache coherency problems.
450 *
451 * Accessing a VM_IO area is even more dangerous, therefore the function
452 * fails if pages is != NULL and a VM_IO area is found.
453 */
454 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
455 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas)
456 {
457 int i;
458 unsigned int flags;
460 /*
461 * Require read or write permissions.
462 * If 'force' is set, we only require the "MAY" flags.
463 */
464 flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
465 flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
466 i = 0;
468 do {
469 struct vm_area_struct * vma;
471 vma = find_extend_vma(mm, start);
473 if ( !vma || (pages && vma->vm_flags & VM_IO) || !(flags & vma->vm_flags) )
474 return i ? : -EFAULT;
476 spin_lock(&mm->page_table_lock);
477 do {
478 struct page *map;
479 while (!(map = follow_page(mm, start, write))) {
480 spin_unlock(&mm->page_table_lock);
481 switch (handle_mm_fault(mm, vma, start, write)) {
482 case 1:
483 tsk->min_flt++;
484 break;
485 case 2:
486 tsk->maj_flt++;
487 break;
488 case 0:
489 if (i) return i;
490 return -EFAULT;
491 default:
492 if (i) return i;
493 return -ENOMEM;
494 }
495 spin_lock(&mm->page_table_lock);
496 }
497 if (pages) {
498 pages[i] = get_page_map(map);
499 /* FIXME: call the correct function,
500 * depending on the type of the found page
501 */
502 if (!pages[i] || PageReserved(pages[i])) {
503 if (pages[i] != ZERO_PAGE(start))
504 goto bad_page;
505 } else
506 page_cache_get(pages[i]);
507 }
508 if (vmas)
509 vmas[i] = vma;
510 i++;
511 start += PAGE_SIZE;
512 len--;
513 } while(len && start < vma->vm_end);
514 spin_unlock(&mm->page_table_lock);
515 } while(len);
516 out:
517 return i;
519 /*
520 * We found an invalid page in the VMA. Release all we have
521 * so far and fail.
522 */
523 bad_page:
524 spin_unlock(&mm->page_table_lock);
525 while (i--)
526 page_cache_release(pages[i]);
527 i = -EFAULT;
528 goto out;
529 }
531 EXPORT_SYMBOL(get_user_pages);
533 /*
534 * Force in an entire range of pages from the current process's user VA,
535 * and pin them in physical memory.
536 */
537 #define dprintk(x...)
539 int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
540 {
541 int pgcount, err;
542 struct mm_struct * mm;
544 /* Make sure the iobuf is not already mapped somewhere. */
545 if (iobuf->nr_pages)
546 return -EINVAL;
548 mm = current->mm;
549 dprintk ("map_user_kiobuf: begin\n");
551 pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;
552 /* mapping 0 bytes is not permitted */
553 if (!pgcount) BUG();
554 err = expand_kiobuf(iobuf, pgcount);
555 if (err)
556 return err;
558 iobuf->locked = 0;
559 iobuf->offset = va & (PAGE_SIZE-1);
560 iobuf->length = len;
562 /* Try to fault in all of the necessary pages */
563 down_read(&mm->mmap_sem);
564 /* rw==READ means read from disk, write into memory area */
565 err = get_user_pages(current, mm, va, pgcount,
566 (rw==READ), 0, iobuf->maplist, NULL);
567 up_read(&mm->mmap_sem);
568 if (err < 0) {
569 unmap_kiobuf(iobuf);
570 dprintk ("map_user_kiobuf: end %d\n", err);
571 return err;
572 }
573 iobuf->nr_pages = err;
574 while (pgcount--) {
575 /* FIXME: flush superflous for rw==READ,
576 * probably wrong function for rw==WRITE
577 */
578 flush_dcache_page(iobuf->maplist[pgcount]);
579 }
580 dprintk ("map_user_kiobuf: end OK\n");
581 return 0;
582 }
584 /*
585 * Mark all of the pages in a kiobuf as dirty
586 *
587 * We need to be able to deal with short reads from disk: if an IO error
588 * occurs, the number of bytes read into memory may be less than the
589 * size of the kiobuf, so we have to stop marking pages dirty once the
590 * requested byte count has been reached.
591 *
592 * Must be called from process context - set_page_dirty() takes VFS locks.
593 */
595 void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
596 {
597 int index, offset, remaining;
598 struct page *page;
600 index = iobuf->offset >> PAGE_SHIFT;
601 offset = iobuf->offset & ~PAGE_MASK;
602 remaining = bytes;
603 if (remaining > iobuf->length)
604 remaining = iobuf->length;
606 while (remaining > 0 && index < iobuf->nr_pages) {
607 page = iobuf->maplist[index];
609 if (!PageReserved(page))
610 set_page_dirty(page);
612 remaining -= (PAGE_SIZE - offset);
613 offset = 0;
614 index++;
615 }
616 }
618 /*
619 * Unmap all of the pages referenced by a kiobuf. We release the pages,
620 * and unlock them if they were locked.
621 */
623 void unmap_kiobuf (struct kiobuf *iobuf)
624 {
625 int i;
626 struct page *map;
628 for (i = 0; i < iobuf->nr_pages; i++) {
629 map = iobuf->maplist[i];
630 if (map) {
631 if (iobuf->locked)
632 UnlockPage(map);
633 /* FIXME: cache flush missing for rw==READ
634 * FIXME: call the correct reference counting function
635 */
636 page_cache_release(map);
637 }
638 }
640 iobuf->nr_pages = 0;
641 iobuf->locked = 0;
642 }
645 /*
646 * Lock down all of the pages of a kiovec for IO.
647 *
648 * If any page is mapped twice in the kiovec, we return the error -EINVAL.
649 *
650 * The optional wait parameter causes the lock call to block until all
651 * pages can be locked if set. If wait==0, the lock operation is
652 * aborted if any locked pages are found and -EAGAIN is returned.
653 */
655 int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
656 {
657 struct kiobuf *iobuf;
658 int i, j;
659 struct page *page, **ppage;
660 int doublepage = 0;
661 int repeat = 0;
663 repeat:
665 for (i = 0; i < nr; i++) {
666 iobuf = iovec[i];
668 if (iobuf->locked)
669 continue;
671 ppage = iobuf->maplist;
672 for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
673 page = *ppage;
674 if (!page)
675 continue;
677 if (TryLockPage(page)) {
678 while (j--) {
679 struct page *tmp = *--ppage;
680 if (tmp)
681 UnlockPage(tmp);
682 }
683 goto retry;
684 }
685 }
686 iobuf->locked = 1;
687 }
689 return 0;
691 retry:
693 /*
694 * We couldn't lock one of the pages. Undo the locking so far,
695 * wait on the page we got to, and try again.
696 */
698 unlock_kiovec(nr, iovec);
699 if (!wait)
700 return -EAGAIN;
702 /*
703 * Did the release also unlock the page we got stuck on?
704 */
705 if (!PageLocked(page)) {
706 /*
707 * If so, we may well have the page mapped twice
708 * in the IO address range. Bad news. Of
709 * course, it _might_ just be a coincidence,
710 * but if it happens more than once, chances
711 * are we have a double-mapped page.
712 */
713 if (++doublepage >= 3)
714 return -EINVAL;
716 /* Try again... */
717 wait_on_page(page);
718 }
720 if (++repeat < 16)
721 goto repeat;
722 return -EAGAIN;
723 }
725 /*
726 * Unlock all of the pages of a kiovec after IO.
727 */
729 int unlock_kiovec(int nr, struct kiobuf *iovec[])
730 {
731 struct kiobuf *iobuf;
732 int i, j;
733 struct page *page, **ppage;
735 for (i = 0; i < nr; i++) {
736 iobuf = iovec[i];
738 if (!iobuf->locked)
739 continue;
740 iobuf->locked = 0;
742 ppage = iobuf->maplist;
743 for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
744 page = *ppage;
745 if (!page)
746 continue;
747 UnlockPage(page);
748 }
749 }
750 return 0;
751 }
753 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
754 unsigned long size, pgprot_t prot)
755 {
756 unsigned long end;
758 address &= ~PMD_MASK;
759 end = address + size;
760 if (end > PMD_SIZE)
761 end = PMD_SIZE;
762 do {
763 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
764 pte_t oldpage = ptep_get_and_clear(pte);
765 set_pte(pte, zero_pte);
766 forget_pte(oldpage);
767 address += PAGE_SIZE;
768 pte++;
769 } while (address && (address < end));
770 }
772 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
773 unsigned long size, pgprot_t prot)
774 {
775 unsigned long end;
777 address &= ~PGDIR_MASK;
778 end = address + size;
779 if (end > PGDIR_SIZE)
780 end = PGDIR_SIZE;
781 do {
782 pte_t * pte = pte_alloc(mm, pmd, address);
783 if (!pte)
784 return -ENOMEM;
785 zeromap_pte_range(pte, address, end - address, prot);
786 address = (address + PMD_SIZE) & PMD_MASK;
787 pmd++;
788 } while (address && (address < end));
789 return 0;
790 }
792 int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
793 {
794 int error = 0;
795 pgd_t * dir;
796 unsigned long beg = address;
797 unsigned long end = address + size;
798 struct mm_struct *mm = current->mm;
800 dir = pgd_offset(mm, address);
801 flush_cache_range(mm, beg, end);
802 if (address >= end)
803 BUG();
805 spin_lock(&mm->page_table_lock);
806 do {
807 pmd_t *pmd = pmd_alloc(mm, dir, address);
808 error = -ENOMEM;
809 if (!pmd)
810 break;
811 error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
812 if (error)
813 break;
814 address = (address + PGDIR_SIZE) & PGDIR_MASK;
815 dir++;
816 } while (address && (address < end));
817 spin_unlock(&mm->page_table_lock);
818 flush_tlb_range(mm, beg, end);
819 return error;
820 }
822 /*
823 * maps a range of physical memory into the requested pages. the old
824 * mappings are removed. any references to nonexistent pages results
825 * in null mappings (currently treated as "copy-on-access")
826 */
827 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
828 unsigned long phys_addr, pgprot_t prot)
829 {
830 unsigned long end;
832 address &= ~PMD_MASK;
833 end = address + size;
834 if (end > PMD_SIZE)
835 end = PMD_SIZE;
836 do {
837 struct page *page;
838 pte_t oldpage;
839 oldpage = ptep_get_and_clear(pte);
841 page = virt_to_page(__va(phys_addr));
842 if ((!VALID_PAGE(page)) || PageReserved(page))
843 set_pte(pte, mk_pte_phys(phys_addr, prot));
844 forget_pte(oldpage);
845 address += PAGE_SIZE;
846 phys_addr += PAGE_SIZE;
847 pte++;
848 } while (address && (address < end));
849 }
851 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
852 unsigned long phys_addr, pgprot_t prot)
853 {
854 unsigned long end;
856 address &= ~PGDIR_MASK;
857 end = address + size;
858 if (end > PGDIR_SIZE)
859 end = PGDIR_SIZE;
860 phys_addr -= address;
861 do {
862 pte_t * pte = pte_alloc(mm, pmd, address);
863 if (!pte)
864 return -ENOMEM;
865 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
866 address = (address + PMD_SIZE) & PMD_MASK;
867 pmd++;
868 } while (address && (address < end));
869 return 0;
870 }
872 /* Note: this is only safe if the mm semaphore is held when called. */
873 int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
874 {
875 int error = 0;
876 pgd_t * dir;
877 unsigned long beg = from;
878 unsigned long end = from + size;
879 struct mm_struct *mm = current->mm;
881 phys_addr -= from;
882 dir = pgd_offset(mm, from);
883 flush_cache_range(mm, beg, end);
884 if (from >= end)
885 BUG();
887 spin_lock(&mm->page_table_lock);
888 do {
889 pmd_t *pmd = pmd_alloc(mm, dir, from);
890 error = -ENOMEM;
891 if (!pmd)
892 break;
893 error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
894 if (error)
895 break;
896 from = (from + PGDIR_SIZE) & PGDIR_MASK;
897 dir++;
898 } while (from && (from < end));
899 spin_unlock(&mm->page_table_lock);
900 flush_tlb_range(mm, beg, end);
901 return error;
902 }
904 /*
905 * Establish a new mapping:
906 * - flush the old one
907 * - update the page tables
908 * - inform the TLB about the new one
909 *
910 * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
911 */
912 static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
913 {
914 #ifdef CONFIG_XEN
915 if ( likely(vma->vm_mm == current->mm) ) {
916 HYPERVISOR_update_va_mapping(address, entry, UVMF_INVLPG|UVMF_LOCAL);
917 } else {
918 set_pte(page_table, entry);
919 flush_tlb_page(vma, address);
920 }
921 #else
922 set_pte(page_table, entry);
923 flush_tlb_page(vma, address);
924 #endif
925 update_mmu_cache(vma, address, entry);
926 }
928 /*
929 * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
930 */
931 static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address,
932 pte_t *page_table)
933 {
934 flush_page_to_ram(new_page);
935 flush_cache_page(vma, address);
936 establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
937 }
939 /*
940 * This routine handles present pages, when users try to write
941 * to a shared page. It is done by copying the page to a new address
942 * and decrementing the shared-page counter for the old page.
943 *
944 * Goto-purists beware: the only reason for goto's here is that it results
945 * in better assembly code.. The "default" path will see no jumps at all.
946 *
947 * Note that this routine assumes that the protection checks have been
948 * done by the caller (the low-level page fault routine in most cases).
949 * Thus we can safely just mark it writable once we've done any necessary
950 * COW.
951 *
952 * We also mark the page dirty at this point even though the page will
953 * change only once the write actually happens. This avoids a few races,
954 * and potentially makes it more efficient.
955 *
956 * We hold the mm semaphore and the page_table_lock on entry and exit
957 * with the page_table_lock released.
958 */
959 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
960 unsigned long address, pte_t *page_table, pte_t pte)
961 {
962 struct page *old_page, *new_page;
964 old_page = pte_page(pte);
965 if (!VALID_PAGE(old_page))
966 goto bad_wp_page;
968 if (!TryLockPage(old_page)) {
969 int reuse = can_share_swap_page(old_page);
970 unlock_page(old_page);
971 if (reuse) {
972 flush_cache_page(vma, address);
973 establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
974 spin_unlock(&mm->page_table_lock);
975 return 1; /* Minor fault */
976 }
977 }
979 /*
980 * Ok, we need to copy. Oh, well..
981 */
982 page_cache_get(old_page);
983 spin_unlock(&mm->page_table_lock);
985 new_page = alloc_page(GFP_HIGHUSER);
986 if (!new_page)
987 goto no_mem;
988 copy_cow_page(old_page,new_page,address);
990 /*
991 * Re-check the pte - we dropped the lock
992 */
993 spin_lock(&mm->page_table_lock);
994 if (pte_same(*page_table, pte)) {
995 if (PageReserved(old_page))
996 ++mm->rss;
997 break_cow(vma, new_page, address, page_table);
998 if (vm_anon_lru)
999 lru_cache_add(new_page);
1001 /* Free the old page.. */
1002 new_page = old_page;
1004 spin_unlock(&mm->page_table_lock);
1005 page_cache_release(new_page);
1006 page_cache_release(old_page);
1007 return 1; /* Minor fault */
1009 bad_wp_page:
1010 spin_unlock(&mm->page_table_lock);
1011 printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
1012 return -1;
1013 no_mem:
1014 page_cache_release(old_page);
1015 return -1;
1018 static void vmtruncate_list(struct vm_area_struct *mpnt, unsigned long pgoff)
1020 do {
1021 struct mm_struct *mm = mpnt->vm_mm;
1022 unsigned long start = mpnt->vm_start;
1023 unsigned long end = mpnt->vm_end;
1024 unsigned long len = end - start;
1025 unsigned long diff;
1027 /* mapping wholly truncated? */
1028 if (mpnt->vm_pgoff >= pgoff) {
1029 zap_page_range(mm, start, len);
1030 continue;
1033 /* mapping wholly unaffected? */
1034 len = len >> PAGE_SHIFT;
1035 diff = pgoff - mpnt->vm_pgoff;
1036 if (diff >= len)
1037 continue;
1039 /* Ok, partially affected.. */
1040 start += diff << PAGE_SHIFT;
1041 len = (len - diff) << PAGE_SHIFT;
1042 zap_page_range(mm, start, len);
1043 } while ((mpnt = mpnt->vm_next_share) != NULL);
1046 /*
1047 * Handle all mappings that got truncated by a "truncate()"
1048 * system call.
1050 * NOTE! We have to be ready to update the memory sharing
1051 * between the file and the memory map for a potential last
1052 * incomplete page. Ugly, but necessary.
1053 */
1054 int vmtruncate(struct inode * inode, loff_t offset)
1056 unsigned long pgoff;
1057 struct address_space *mapping = inode->i_mapping;
1058 unsigned long limit;
1060 if (inode->i_size < offset)
1061 goto do_expand;
1062 inode->i_size = offset;
1063 spin_lock(&mapping->i_shared_lock);
1064 if (!mapping->i_mmap && !mapping->i_mmap_shared)
1065 goto out_unlock;
1067 pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1068 if (mapping->i_mmap != NULL)
1069 vmtruncate_list(mapping->i_mmap, pgoff);
1070 if (mapping->i_mmap_shared != NULL)
1071 vmtruncate_list(mapping->i_mmap_shared, pgoff);
1073 out_unlock:
1074 spin_unlock(&mapping->i_shared_lock);
1075 truncate_inode_pages(mapping, offset);
1076 goto out_truncate;
1078 do_expand:
1079 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
1080 if (limit != RLIM_INFINITY && offset > limit)
1081 goto out_sig;
1082 if (offset > inode->i_sb->s_maxbytes)
1083 goto out;
1084 inode->i_size = offset;
1086 out_truncate:
1087 if (inode->i_op && inode->i_op->truncate) {
1088 lock_kernel();
1089 inode->i_op->truncate(inode);
1090 unlock_kernel();
1092 return 0;
1093 out_sig:
1094 send_sig(SIGXFSZ, current, 0);
1095 out:
1096 return -EFBIG;
1099 /*
1100 * Primitive swap readahead code. We simply read an aligned block of
1101 * (1 << page_cluster) entries in the swap area. This method is chosen
1102 * because it doesn't cost us any seek time. We also make sure to queue
1103 * the 'original' request together with the readahead ones...
1104 */
1105 void swapin_readahead(swp_entry_t entry)
1107 int i, num;
1108 struct page *new_page;
1109 unsigned long offset;
1111 /*
1112 * Get the number of handles we should do readahead io to.
1113 */
1114 num = valid_swaphandles(entry, &offset);
1115 for (i = 0; i < num; offset++, i++) {
1116 /* Ok, do the async read-ahead now */
1117 new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset));
1118 if (!new_page)
1119 break;
1120 page_cache_release(new_page);
1122 return;
1125 /*
1126 * We hold the mm semaphore and the page_table_lock on entry and
1127 * should release the pagetable lock on exit..
1128 */
1129 static int do_swap_page(struct mm_struct * mm,
1130 struct vm_area_struct * vma, unsigned long address,
1131 pte_t * page_table, pte_t orig_pte, int write_access)
1133 struct page *page;
1134 swp_entry_t entry = pte_to_swp_entry(orig_pte);
1135 pte_t pte;
1136 int ret = 1;
1138 spin_unlock(&mm->page_table_lock);
1139 page = lookup_swap_cache(entry);
1140 if (!page) {
1141 swapin_readahead(entry);
1142 page = read_swap_cache_async(entry);
1143 if (!page) {
1144 /*
1145 * Back out if somebody else faulted in this pte while
1146 * we released the page table lock.
1147 */
1148 int retval;
1149 spin_lock(&mm->page_table_lock);
1150 retval = pte_same(*page_table, orig_pte) ? -1 : 1;
1151 spin_unlock(&mm->page_table_lock);
1152 return retval;
1155 /* Had to read the page from swap area: Major fault */
1156 ret = 2;
1159 mark_page_accessed(page);
1161 lock_page(page);
1163 /*
1164 * Back out if somebody else faulted in this pte while we
1165 * released the page table lock.
1166 */
1167 spin_lock(&mm->page_table_lock);
1168 if (!pte_same(*page_table, orig_pte)) {
1169 spin_unlock(&mm->page_table_lock);
1170 unlock_page(page);
1171 page_cache_release(page);
1172 return 1;
1175 /* The page isn't present yet, go ahead with the fault. */
1177 swap_free(entry);
1178 if (vm_swap_full())
1179 remove_exclusive_swap_page(page);
1181 mm->rss++;
1182 pte = mk_pte(page, vma->vm_page_prot);
1183 if (write_access && can_share_swap_page(page))
1184 pte = pte_mkdirty(pte_mkwrite(pte));
1185 unlock_page(page);
1187 flush_page_to_ram(page);
1188 flush_icache_page(vma, page);
1189 #ifdef CONFIG_XEN
1190 if ( likely(vma->vm_mm == current->mm) )
1191 HYPERVISOR_update_va_mapping(address, pte, 0);
1192 else
1193 set_pte(page_table, pte);
1194 #else
1195 set_pte(page_table, pte);
1196 #endif
1198 /* No need to invalidate - it was non-present before */
1199 update_mmu_cache(vma, address, pte);
1200 spin_unlock(&mm->page_table_lock);
1201 return ret;
1204 /*
1205 * We are called with the MM semaphore and page_table_lock
1206 * spinlock held to protect against concurrent faults in
1207 * multithreaded programs.
1208 */
1209 static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
1211 pte_t entry;
1213 /* Read-only mapping of ZERO_PAGE. */
1214 entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1216 /* ..except if it's a write access */
1217 if (write_access) {
1218 struct page *page;
1220 /* Allocate our own private page. */
1221 spin_unlock(&mm->page_table_lock);
1223 page = alloc_page(GFP_HIGHUSER);
1224 if (!page)
1225 goto no_mem;
1226 clear_user_highpage(page, addr);
1228 spin_lock(&mm->page_table_lock);
1229 if (!pte_none(*page_table)) {
1230 page_cache_release(page);
1231 spin_unlock(&mm->page_table_lock);
1232 return 1;
1234 mm->rss++;
1235 flush_page_to_ram(page);
1236 entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1237 if (vm_anon_lru)
1238 lru_cache_add(page);
1239 mark_page_accessed(page);
1242 #ifdef CONFIG_XEN
1243 if ( likely(vma->vm_mm == current->mm) )
1244 HYPERVISOR_update_va_mapping(addr, entry, 0);
1245 else
1246 set_pte(page_table, entry);
1247 #else
1248 set_pte(page_table, entry);
1249 #endif
1251 /* No need to invalidate - it was non-present before */
1252 update_mmu_cache(vma, addr, entry);
1253 spin_unlock(&mm->page_table_lock);
1254 return 1; /* Minor fault */
1256 no_mem:
1257 return -1;
1260 /*
1261 * do_no_page() tries to create a new page mapping. It aggressively
1262 * tries to share with existing pages, but makes a separate copy if
1263 * the "write_access" parameter is true in order to avoid the next
1264 * page fault.
1266 * As this is called only for pages that do not currently exist, we
1267 * do not need to flush old virtual caches or the TLB.
1269 * This is called with the MM semaphore held and the page table
1270 * spinlock held. Exit with the spinlock released.
1271 */
1272 static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
1273 unsigned long address, int write_access, pte_t *page_table)
1275 struct page * new_page;
1276 pte_t entry;
1278 if (!vma->vm_ops || !vma->vm_ops->nopage)
1279 return do_anonymous_page(mm, vma, page_table, write_access, address);
1280 spin_unlock(&mm->page_table_lock);
1282 new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
1284 if (new_page == NULL) /* no page was available -- SIGBUS */
1285 return 0;
1286 if (new_page == NOPAGE_OOM)
1287 return -1;
1289 /*
1290 * Should we do an early C-O-W break?
1291 */
1292 if (write_access && !(vma->vm_flags & VM_SHARED)) {
1293 struct page * page = alloc_page(GFP_HIGHUSER);
1294 if (!page) {
1295 page_cache_release(new_page);
1296 return -1;
1298 copy_user_highpage(page, new_page, address);
1299 page_cache_release(new_page);
1300 if (vm_anon_lru)
1301 lru_cache_add(page);
1302 new_page = page;
1305 spin_lock(&mm->page_table_lock);
1306 /*
1307 * This silly early PAGE_DIRTY setting removes a race
1308 * due to the bad i386 page protection. But it's valid
1309 * for other architectures too.
1311 * Note that if write_access is true, we either now have
1312 * an exclusive copy of the page, or this is a shared mapping,
1313 * so we can make it writable and dirty to avoid having to
1314 * handle that later.
1315 */
1316 /* Only go through if we didn't race with anybody else... */
1317 if (pte_none(*page_table)) {
1318 if (!PageReserved(new_page))
1319 ++mm->rss;
1320 flush_page_to_ram(new_page);
1321 flush_icache_page(vma, new_page);
1322 entry = mk_pte(new_page, vma->vm_page_prot);
1323 if (write_access)
1324 entry = pte_mkwrite(pte_mkdirty(entry));
1325 #ifdef CONFIG_XEN
1326 if ( likely(vma->vm_mm == current->mm) )
1327 HYPERVISOR_update_va_mapping(address, entry, 0);
1328 else
1329 set_pte(page_table, entry);
1330 #else
1331 set_pte(page_table, entry);
1332 #endif
1333 } else {
1334 /* One of our sibling threads was faster, back out. */
1335 page_cache_release(new_page);
1336 spin_unlock(&mm->page_table_lock);
1337 return 1;
1340 /* no need to invalidate: a not-present page shouldn't be cached */
1341 update_mmu_cache(vma, address, entry);
1342 spin_unlock(&mm->page_table_lock);
1343 return 2; /* Major fault */
1346 /*
1347 * These routines also need to handle stuff like marking pages dirty
1348 * and/or accessed for architectures that don't do it in hardware (most
1349 * RISC architectures). The early dirtying is also good on the i386.
1351 * There is also a hook called "update_mmu_cache()" that architectures
1352 * with external mmu caches can use to update those (ie the Sparc or
1353 * PowerPC hashed page tables that act as extended TLBs).
1355 * Note the "page_table_lock". It is to protect against kswapd removing
1356 * pages from under us. Note that kswapd only ever _removes_ pages, never
1357 * adds them. As such, once we have noticed that the page is not present,
1358 * we can drop the lock early.
1360 * The adding of pages is protected by the MM semaphore (which we hold),
1361 * so we don't need to worry about a page being suddenly been added into
1362 * our VM.
1364 * We enter with the pagetable spinlock held, we are supposed to
1365 * release it when done.
1366 */
1367 static inline int handle_pte_fault(struct mm_struct *mm,
1368 struct vm_area_struct * vma, unsigned long address,
1369 int write_access, pte_t * pte)
1371 pte_t entry;
1373 entry = *pte;
1374 if (!pte_present(entry)) {
1375 /*
1376 * If it truly wasn't present, we know that kswapd
1377 * and the PTE updates will not touch it later. So
1378 * drop the lock.
1379 */
1380 if (pte_none(entry))
1381 return do_no_page(mm, vma, address, write_access, pte);
1382 return do_swap_page(mm, vma, address, pte, entry, write_access);
1385 if (write_access) {
1386 if (!pte_write(entry))
1387 return do_wp_page(mm, vma, address, pte, entry);
1389 entry = pte_mkdirty(entry);
1391 entry = pte_mkyoung(entry);
1392 establish_pte(vma, address, pte, entry);
1393 spin_unlock(&mm->page_table_lock);
1394 return 1;
1397 /*
1398 * By the time we get here, we already hold the mm semaphore
1399 */
1400 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
1401 unsigned long address, int write_access)
1403 pgd_t *pgd;
1404 pmd_t *pmd;
1406 current->state = TASK_RUNNING;
1407 pgd = pgd_offset(mm, address);
1409 /*
1410 * We need the page table lock to synchronize with kswapd
1411 * and the SMP-safe atomic PTE updates.
1412 */
1413 spin_lock(&mm->page_table_lock);
1414 pmd = pmd_alloc(mm, pgd, address);
1416 if (pmd) {
1417 pte_t * pte = pte_alloc(mm, pmd, address);
1418 if (pte)
1419 return handle_pte_fault(mm, vma, address, write_access, pte);
1421 spin_unlock(&mm->page_table_lock);
1422 return -1;
1425 /*
1426 * Allocate page middle directory.
1428 * We've already handled the fast-path in-line, and we own the
1429 * page table lock.
1431 * On a two-level page table, this ends up actually being entirely
1432 * optimized away.
1433 */
1434 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1436 pmd_t *new;
1438 /* "fast" allocation can happen without dropping the lock.. */
1439 new = pmd_alloc_one_fast(mm, address);
1440 if (!new) {
1441 spin_unlock(&mm->page_table_lock);
1442 new = pmd_alloc_one(mm, address);
1443 spin_lock(&mm->page_table_lock);
1444 if (!new)
1445 return NULL;
1447 /*
1448 * Because we dropped the lock, we should re-check the
1449 * entry, as somebody else could have populated it..
1450 */
1451 if (!pgd_none(*pgd)) {
1452 pmd_free(new);
1453 check_pgt_cache();
1454 goto out;
1457 pgd_populate(mm, pgd, new);
1458 out:
1459 return pmd_offset(pgd, address);
1462 /*
1463 * Allocate the page table directory.
1465 * We've already handled the fast-path in-line, and we own the
1466 * page table lock.
1467 */
1468 pte_t fastcall *pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
1470 if (pmd_none(*pmd)) {
1471 pte_t *new;
1473 /* "fast" allocation can happen without dropping the lock.. */
1474 new = pte_alloc_one_fast(mm, address);
1475 if (!new) {
1476 spin_unlock(&mm->page_table_lock);
1477 new = pte_alloc_one(mm, address);
1478 spin_lock(&mm->page_table_lock);
1479 if (!new)
1480 return NULL;
1482 /*
1483 * Because we dropped the lock, we should re-check the
1484 * entry, as somebody else could have populated it..
1485 */
1486 if (!pmd_none(*pmd)) {
1487 pte_free(new);
1488 check_pgt_cache();
1489 goto out;
1492 pmd_populate(mm, pmd, new);
1494 out:
1495 return pte_offset(pmd, address);
1498 int make_pages_present(unsigned long addr, unsigned long end)
1500 int ret, len, write;
1501 struct vm_area_struct * vma;
1503 vma = find_vma(current->mm, addr);
1504 write = (vma->vm_flags & VM_WRITE) != 0;
1505 if (addr >= end)
1506 BUG();
1507 if (end > vma->vm_end)
1508 BUG();
1509 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
1510 ret = get_user_pages(current, current->mm, addr,
1511 len, write, 0, NULL, NULL);
1512 return ret == len ? 0 : -1;
1515 struct page * vmalloc_to_page(void * vmalloc_addr)
1517 unsigned long addr = (unsigned long) vmalloc_addr;
1518 struct page *page = NULL;
1519 pmd_t *pmd;
1520 pte_t *pte;
1521 pgd_t *pgd;
1523 pgd = pgd_offset_k(addr);
1524 if (!pgd_none(*pgd)) {
1525 pmd = pmd_offset(pgd, addr);
1526 if (!pmd_none(*pmd)) {
1527 pte = pte_offset(pmd, addr);
1528 if (pte_present(*pte)) {
1529 page = pte_page(*pte);
1533 return page;