ia64/xen-unstable

view linux-2.4.29-xen-sparse/mm/swapfile.c @ 3602:9a9c5a491401

bitkeeper revision 1.1159.235.1 (42000d3dwcPyT8aY4VIPYGCfCAJuQQ)

More x86/64. Status: traps.c now included in the build, but actual building
of IDT doesn't happen, and we need some sort of entry.S. More page-table
building required so that arch_init_memory() can work. And there is something
odd with MP-table parsing; I currently suspect that __init sections are
causing problems.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Tue Feb 01 23:14:05 2005 +0000 (2005-02-01)
parents 610068179f96
children 0a4b76b6b5a0
line source
1 /*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
8 #include <linux/slab.h>
9 #include <linux/smp_lock.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/swap.h>
12 #include <linux/swapctl.h>
13 #include <linux/blkdev.h> /* for blk_size */
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/shm.h>
18 #include <asm/pgtable.h>
20 spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
21 unsigned int nr_swapfiles;
22 int total_swap_pages;
23 static int swap_overflow;
25 static const char Bad_file[] = "Bad swap file entry ";
26 static const char Unused_file[] = "Unused swap file entry ";
27 static const char Bad_offset[] = "Bad swap offset entry ";
28 static const char Unused_offset[] = "Unused swap offset entry ";
30 struct swap_list_t swap_list = {-1, -1};
32 struct swap_info_struct swap_info[MAX_SWAPFILES];
34 #define SWAPFILE_CLUSTER 256
36 static inline int scan_swap_map(struct swap_info_struct *si)
37 {
38 unsigned long offset;
39 /*
40 * We try to cluster swap pages by allocating them
41 * sequentially in swap. Once we've allocated
42 * SWAPFILE_CLUSTER pages this way, however, we resort to
43 * first-free allocation, starting a new cluster. This
44 * prevents us from scattering swap pages all over the entire
45 * swap partition, so that we reduce overall disk seek times
46 * between swap pages. -- sct */
47 if (si->cluster_nr) {
48 while (si->cluster_next <= si->highest_bit) {
49 offset = si->cluster_next++;
50 if (si->swap_map[offset])
51 continue;
52 si->cluster_nr--;
53 goto got_page;
54 }
55 }
56 si->cluster_nr = SWAPFILE_CLUSTER;
58 /* try to find an empty (even not aligned) cluster. */
59 offset = si->lowest_bit;
60 check_next_cluster:
61 if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
62 {
63 int nr;
64 for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
65 if (si->swap_map[nr])
66 {
67 offset = nr+1;
68 goto check_next_cluster;
69 }
70 /* We found a completly empty cluster, so start
71 * using it.
72 */
73 goto got_page;
74 }
75 /* No luck, so now go finegrined as usual. -Andrea */
76 for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
77 if (si->swap_map[offset])
78 continue;
79 si->lowest_bit = offset+1;
80 got_page:
81 if (offset == si->lowest_bit)
82 si->lowest_bit++;
83 if (offset == si->highest_bit)
84 si->highest_bit--;
85 if (si->lowest_bit > si->highest_bit) {
86 si->lowest_bit = si->max;
87 si->highest_bit = 0;
88 }
89 si->swap_map[offset] = 1;
90 nr_swap_pages--;
91 si->cluster_next = offset+1;
92 return offset;
93 }
94 si->lowest_bit = si->max;
95 si->highest_bit = 0;
96 return 0;
97 }
99 swp_entry_t get_swap_page(void)
100 {
101 struct swap_info_struct * p;
102 unsigned long offset;
103 swp_entry_t entry;
104 int type, wrapped = 0;
106 entry.val = 0; /* Out of memory */
107 swap_list_lock();
108 type = swap_list.next;
109 if (type < 0)
110 goto out;
111 if (nr_swap_pages <= 0)
112 goto out;
114 while (1) {
115 p = &swap_info[type];
116 if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
117 swap_device_lock(p);
118 offset = scan_swap_map(p);
119 swap_device_unlock(p);
120 if (offset) {
121 entry = SWP_ENTRY(type,offset);
122 type = swap_info[type].next;
123 if (type < 0 ||
124 p->prio != swap_info[type].prio) {
125 swap_list.next = swap_list.head;
126 } else {
127 swap_list.next = type;
128 }
129 goto out;
130 }
131 }
132 type = p->next;
133 if (!wrapped) {
134 if (type < 0 || p->prio != swap_info[type].prio) {
135 type = swap_list.head;
136 wrapped = 1;
137 }
138 } else
139 if (type < 0)
140 goto out; /* out of swap space */
141 }
142 out:
143 swap_list_unlock();
144 return entry;
145 }
147 static struct swap_info_struct * swap_info_get(swp_entry_t entry)
148 {
149 struct swap_info_struct * p;
150 unsigned long offset, type;
152 if (!entry.val)
153 goto out;
154 type = SWP_TYPE(entry);
155 if (type >= nr_swapfiles)
156 goto bad_nofile;
157 p = & swap_info[type];
158 if (!(p->flags & SWP_USED))
159 goto bad_device;
160 offset = SWP_OFFSET(entry);
161 if (offset >= p->max)
162 goto bad_offset;
163 if (!p->swap_map[offset])
164 goto bad_free;
165 swap_list_lock();
166 if (p->prio > swap_info[swap_list.next].prio)
167 swap_list.next = type;
168 swap_device_lock(p);
169 return p;
171 bad_free:
172 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
173 goto out;
174 bad_offset:
175 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
176 goto out;
177 bad_device:
178 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
179 goto out;
180 bad_nofile:
181 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
182 out:
183 return NULL;
184 }
186 static void swap_info_put(struct swap_info_struct * p)
187 {
188 swap_device_unlock(p);
189 swap_list_unlock();
190 }
192 static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
193 {
194 int count = p->swap_map[offset];
196 if (count < SWAP_MAP_MAX) {
197 count--;
198 p->swap_map[offset] = count;
199 if (!count) {
200 if (offset < p->lowest_bit)
201 p->lowest_bit = offset;
202 if (offset > p->highest_bit)
203 p->highest_bit = offset;
204 nr_swap_pages++;
205 }
206 }
207 return count;
208 }
210 /*
211 * Caller has made sure that the swapdevice corresponding to entry
212 * is still around or has not been recycled.
213 */
214 void swap_free(swp_entry_t entry)
215 {
216 struct swap_info_struct * p;
218 p = swap_info_get(entry);
219 if (p) {
220 swap_entry_free(p, SWP_OFFSET(entry));
221 swap_info_put(p);
222 }
223 }
225 /*
226 * Check if we're the only user of a swap page,
227 * when the page is locked.
228 */
229 static int exclusive_swap_page(struct page *page)
230 {
231 int retval = 0;
232 struct swap_info_struct * p;
233 swp_entry_t entry;
235 entry.val = page->index;
236 p = swap_info_get(entry);
237 if (p) {
238 /* Is the only swap cache user the cache itself? */
239 if (p->swap_map[SWP_OFFSET(entry)] == 1) {
240 /* Recheck the page count with the pagecache lock held.. */
241 spin_lock(&pagecache_lock);
242 if (page_count(page) - !!page->buffers == 2)
243 retval = 1;
244 spin_unlock(&pagecache_lock);
245 }
246 swap_info_put(p);
247 }
248 return retval;
249 }
251 /*
252 * We can use this swap cache entry directly
253 * if there are no other references to it.
254 *
255 * Here "exclusive_swap_page()" does the real
256 * work, but we opportunistically check whether
257 * we need to get all the locks first..
258 */
259 int fastcall can_share_swap_page(struct page *page)
260 {
261 int retval = 0;
263 if (!PageLocked(page))
264 BUG();
265 switch (page_count(page)) {
266 case 3:
267 if (!page->buffers)
268 break;
269 /* Fallthrough */
270 case 2:
271 if (!PageSwapCache(page))
272 break;
273 retval = exclusive_swap_page(page);
274 break;
275 case 1:
276 if (PageReserved(page))
277 break;
278 retval = 1;
279 }
280 return retval;
281 }
283 /*
284 * Work out if there are any other processes sharing this
285 * swap cache page. Free it if you can. Return success.
286 */
287 int fastcall remove_exclusive_swap_page(struct page *page)
288 {
289 int retval;
290 struct swap_info_struct * p;
291 swp_entry_t entry;
293 if (!PageLocked(page))
294 BUG();
295 if (!PageSwapCache(page))
296 return 0;
297 if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
298 return 0;
300 entry.val = page->index;
301 p = swap_info_get(entry);
302 if (!p)
303 return 0;
305 /* Is the only swap cache user the cache itself? */
306 retval = 0;
307 if (p->swap_map[SWP_OFFSET(entry)] == 1) {
308 /* Recheck the page count with the pagecache lock held.. */
309 spin_lock(&pagecache_lock);
310 if (page_count(page) - !!page->buffers == 2) {
311 __delete_from_swap_cache(page);
312 SetPageDirty(page);
313 retval = 1;
314 }
315 spin_unlock(&pagecache_lock);
316 }
317 swap_info_put(p);
319 if (retval) {
320 block_flushpage(page, 0);
321 swap_free(entry);
322 page_cache_release(page);
323 }
325 return retval;
326 }
328 /*
329 * Free the swap entry like above, but also try to
330 * free the page cache entry if it is the last user.
331 */
332 void free_swap_and_cache(swp_entry_t entry)
333 {
334 struct swap_info_struct * p;
335 struct page *page = NULL;
337 p = swap_info_get(entry);
338 if (p) {
339 if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
340 page = find_trylock_page(&swapper_space, entry.val);
341 swap_info_put(p);
342 }
343 if (page) {
344 page_cache_get(page);
345 /* Only cache user (+us), or swap space full? Free it! */
346 if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
347 delete_from_swap_cache(page);
348 SetPageDirty(page);
349 }
350 UnlockPage(page);
351 page_cache_release(page);
352 }
353 }
355 /*
356 * The swap entry has been read in advance, and we return 1 to indicate
357 * that the page has been used or is no longer needed.
358 *
359 * Always set the resulting pte to be nowrite (the same as COW pages
360 * after one process has exited). We don't know just how many PTEs will
361 * share this swap entry, so be cautious and let do_wp_page work out
362 * what to do if a write is requested later.
363 */
364 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
365 static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
366 pte_t *dir, swp_entry_t entry, struct page* page)
367 {
368 pte_t pte = *dir;
370 if (likely(pte_to_swp_entry(pte).val != entry.val))
371 return;
372 if (unlikely(pte_none(pte) || pte_present(pte)))
373 return;
374 get_page(page);
375 set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
376 swap_free(entry);
377 ++vma->vm_mm->rss;
378 }
380 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
381 static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
382 unsigned long address, unsigned long size, unsigned long offset,
383 swp_entry_t entry, struct page* page)
384 {
385 pte_t * pte;
386 unsigned long end;
388 if (pmd_none(*dir))
389 return;
390 if (pmd_bad(*dir)) {
391 pmd_ERROR(*dir);
392 pmd_clear(dir);
393 return;
394 }
395 pte = pte_offset(dir, address);
396 offset += address & PMD_MASK;
397 address &= ~PMD_MASK;
398 end = address + size;
399 if (end > PMD_SIZE)
400 end = PMD_SIZE;
401 do {
402 unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page);
403 address += PAGE_SIZE;
404 pte++;
405 } while (address && (address < end));
406 }
408 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
409 static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
410 unsigned long address, unsigned long size,
411 swp_entry_t entry, struct page* page)
412 {
413 pmd_t * pmd;
414 unsigned long offset, end;
416 if (pgd_none(*dir))
417 return;
418 if (pgd_bad(*dir)) {
419 pgd_ERROR(*dir);
420 pgd_clear(dir);
421 return;
422 }
423 pmd = pmd_offset(dir, address);
424 offset = address & PGDIR_MASK;
425 address &= ~PGDIR_MASK;
426 end = address + size;
427 if (end > PGDIR_SIZE)
428 end = PGDIR_SIZE;
429 if (address >= end)
430 BUG();
431 do {
432 unuse_pmd(vma, pmd, address, end - address, offset, entry,
433 page);
434 address = (address + PMD_SIZE) & PMD_MASK;
435 pmd++;
436 } while (address && (address < end));
437 }
439 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
440 static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
441 swp_entry_t entry, struct page* page)
442 {
443 unsigned long start = vma->vm_start, end = vma->vm_end;
445 if (start >= end)
446 BUG();
447 do {
448 unuse_pgd(vma, pgdir, start, end - start, entry, page);
449 start = (start + PGDIR_SIZE) & PGDIR_MASK;
450 pgdir++;
451 } while (start && (start < end));
452 }
454 static void unuse_process(struct mm_struct * mm,
455 swp_entry_t entry, struct page* page)
456 {
457 struct vm_area_struct* vma;
459 /*
460 * Go through process' page directory.
461 */
462 spin_lock(&mm->page_table_lock);
463 for (vma = mm->mmap; vma; vma = vma->vm_next) {
464 pgd_t * pgd = pgd_offset(mm, vma->vm_start);
465 unuse_vma(vma, pgd, entry, page);
466 }
467 XEN_flush_page_update_queue();
468 spin_unlock(&mm->page_table_lock);
469 return;
470 }
472 /*
473 * Scan swap_map from current position to next entry still in use.
474 * Recycle to start on reaching the end, returning 0 when empty.
475 */
476 static int find_next_to_unuse(struct swap_info_struct *si, int prev)
477 {
478 int max = si->max;
479 int i = prev;
480 int count;
482 /*
483 * No need for swap_device_lock(si) here: we're just looking
484 * for whether an entry is in use, not modifying it; false
485 * hits are okay, and sys_swapoff() has already prevented new
486 * allocations from this area (while holding swap_list_lock()).
487 */
488 for (;;) {
489 if (++i >= max) {
490 if (!prev) {
491 i = 0;
492 break;
493 }
494 /*
495 * No entries in use at top of swap_map,
496 * loop back to start and recheck there.
497 */
498 max = prev + 1;
499 prev = 0;
500 i = 1;
501 }
502 count = si->swap_map[i];
503 if (count && count != SWAP_MAP_BAD)
504 break;
505 }
506 return i;
507 }
509 /*
510 * We completely avoid races by reading each swap page in advance,
511 * and then search for the process using it. All the necessary
512 * page table adjustments can then be made atomically.
513 */
514 static int try_to_unuse(unsigned int type)
515 {
516 struct swap_info_struct * si = &swap_info[type];
517 struct mm_struct *start_mm;
518 unsigned short *swap_map;
519 unsigned short swcount;
520 struct page *page;
521 swp_entry_t entry;
522 int i = 0;
523 int retval = 0;
524 int reset_overflow = 0;
525 int shmem;
527 /*
528 * When searching mms for an entry, a good strategy is to
529 * start at the first mm we freed the previous entry from
530 * (though actually we don't notice whether we or coincidence
531 * freed the entry). Initialize this start_mm with a hold.
532 *
533 * A simpler strategy would be to start at the last mm we
534 * freed the previous entry from; but that would take less
535 * advantage of mmlist ordering (now preserved by swap_out()),
536 * which clusters forked address spaces together, most recent
537 * child immediately after parent. If we race with dup_mmap(),
538 * we very much want to resolve parent before child, otherwise
539 * we may miss some entries: using last mm would invert that.
540 */
541 start_mm = &init_mm;
542 atomic_inc(&init_mm.mm_users);
544 /*
545 * Keep on scanning until all entries have gone. Usually,
546 * one pass through swap_map is enough, but not necessarily:
547 * mmput() removes mm from mmlist before exit_mmap() and its
548 * zap_page_range(). That's not too bad, those entries are
549 * on their way out, and handled faster there than here.
550 * do_munmap() behaves similarly, taking the range out of mm's
551 * vma list before zap_page_range(). But unfortunately, when
552 * unmapping a part of a vma, it takes the whole out first,
553 * then reinserts what's left after (might even reschedule if
554 * open() method called) - so swap entries may be invisible
555 * to swapoff for a while, then reappear - but that is rare.
556 */
557 while ((i = find_next_to_unuse(si, i))) {
558 /*
559 * Get a page for the entry, using the existing swap
560 * cache page if there is one. Otherwise, get a clean
561 * page and read the swap into it.
562 */
563 swap_map = &si->swap_map[i];
564 entry = SWP_ENTRY(type, i);
565 page = read_swap_cache_async(entry);
566 if (!page) {
567 /*
568 * Either swap_duplicate() failed because entry
569 * has been freed independently, and will not be
570 * reused since sys_swapoff() already disabled
571 * allocation from here, or alloc_page() failed.
572 */
573 if (!*swap_map)
574 continue;
575 retval = -ENOMEM;
576 break;
577 }
579 /*
580 * Don't hold on to start_mm if it looks like exiting.
581 */
582 if (atomic_read(&start_mm->mm_users) == 1) {
583 mmput(start_mm);
584 start_mm = &init_mm;
585 atomic_inc(&init_mm.mm_users);
586 }
588 /*
589 * Wait for and lock page. When do_swap_page races with
590 * try_to_unuse, do_swap_page can handle the fault much
591 * faster than try_to_unuse can locate the entry. This
592 * apparently redundant "wait_on_page" lets try_to_unuse
593 * defer to do_swap_page in such a case - in some tests,
594 * do_swap_page and try_to_unuse repeatedly compete.
595 */
596 wait_on_page(page);
597 lock_page(page);
599 /*
600 * Remove all references to entry, without blocking.
601 * Whenever we reach init_mm, there's no address space
602 * to search, but use it as a reminder to search shmem.
603 */
604 shmem = 0;
605 swcount = *swap_map;
606 if (swcount > 1) {
607 flush_page_to_ram(page);
608 if (start_mm == &init_mm)
609 shmem = shmem_unuse(entry, page);
610 else
611 unuse_process(start_mm, entry, page);
612 }
613 if (*swap_map > 1) {
614 int set_start_mm = (*swap_map >= swcount);
615 struct list_head *p = &start_mm->mmlist;
616 struct mm_struct *new_start_mm = start_mm;
617 struct mm_struct *mm;
619 spin_lock(&mmlist_lock);
620 while (*swap_map > 1 &&
621 (p = p->next) != &start_mm->mmlist) {
622 mm = list_entry(p, struct mm_struct, mmlist);
623 swcount = *swap_map;
624 if (mm == &init_mm) {
625 set_start_mm = 1;
626 spin_unlock(&mmlist_lock);
627 shmem = shmem_unuse(entry, page);
628 spin_lock(&mmlist_lock);
629 } else
630 unuse_process(mm, entry, page);
631 if (set_start_mm && *swap_map < swcount) {
632 new_start_mm = mm;
633 set_start_mm = 0;
634 }
635 }
636 atomic_inc(&new_start_mm->mm_users);
637 spin_unlock(&mmlist_lock);
638 mmput(start_mm);
639 start_mm = new_start_mm;
640 }
642 /*
643 * How could swap count reach 0x7fff when the maximum
644 * pid is 0x7fff, and there's no way to repeat a swap
645 * page within an mm (except in shmem, where it's the
646 * shared object which takes the reference count)?
647 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
648 *
649 * If that's wrong, then we should worry more about
650 * exit_mmap() and do_munmap() cases described above:
651 * we might be resetting SWAP_MAP_MAX too early here.
652 * We know "Undead"s can happen, they're okay, so don't
653 * report them; but do report if we reset SWAP_MAP_MAX.
654 */
655 if (*swap_map == SWAP_MAP_MAX) {
656 swap_list_lock();
657 swap_device_lock(si);
658 nr_swap_pages++;
659 *swap_map = 1;
660 swap_device_unlock(si);
661 swap_list_unlock();
662 reset_overflow = 1;
663 }
665 /*
666 * If a reference remains (rare), we would like to leave
667 * the page in the swap cache; but try_to_swap_out could
668 * then re-duplicate the entry once we drop page lock,
669 * so we might loop indefinitely; also, that page could
670 * not be swapped out to other storage meanwhile. So:
671 * delete from cache even if there's another reference,
672 * after ensuring that the data has been saved to disk -
673 * since if the reference remains (rarer), it will be
674 * read from disk into another page. Splitting into two
675 * pages would be incorrect if swap supported "shared
676 * private" pages, but they are handled by tmpfs files.
677 *
678 * Note shmem_unuse already deleted swappage from cache,
679 * unless corresponding filepage found already in cache:
680 * in which case it left swappage in cache, lowered its
681 * swap count to pass quickly through the loops above,
682 * and now we must reincrement count to try again later.
683 */
684 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
685 rw_swap_page(WRITE, page);
686 lock_page(page);
687 }
688 if (PageSwapCache(page)) {
689 if (shmem)
690 swap_duplicate(entry);
691 else
692 delete_from_swap_cache(page);
693 }
695 /*
696 * So we could skip searching mms once swap count went
697 * to 1, we did not mark any present ptes as dirty: must
698 * mark page dirty so try_to_swap_out will preserve it.
699 */
700 SetPageDirty(page);
701 UnlockPage(page);
702 page_cache_release(page);
704 /*
705 * Make sure that we aren't completely killing
706 * interactive performance. Interruptible check on
707 * signal_pending() would be nice, but changes the spec?
708 */
709 if (current->need_resched)
710 schedule();
711 }
713 mmput(start_mm);
714 if (reset_overflow) {
715 printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
716 swap_overflow = 0;
717 }
718 return retval;
719 }
721 asmlinkage long sys_swapoff(const char * specialfile)
722 {
723 struct swap_info_struct * p = NULL;
724 unsigned short *swap_map;
725 struct nameidata nd;
726 int i, type, prev;
727 int err;
729 if (!capable(CAP_SYS_ADMIN))
730 return -EPERM;
732 err = user_path_walk(specialfile, &nd);
733 if (err)
734 goto out;
736 lock_kernel();
737 prev = -1;
738 swap_list_lock();
739 for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
740 p = swap_info + type;
741 if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
742 if (p->swap_file == nd.dentry)
743 break;
744 }
745 prev = type;
746 }
747 err = -EINVAL;
748 if (type < 0) {
749 swap_list_unlock();
750 goto out_dput;
751 }
753 if (prev < 0) {
754 swap_list.head = p->next;
755 } else {
756 swap_info[prev].next = p->next;
757 }
758 if (type == swap_list.next) {
759 /* just pick something that's safe... */
760 swap_list.next = swap_list.head;
761 }
762 nr_swap_pages -= p->pages;
763 total_swap_pages -= p->pages;
764 p->flags = SWP_USED;
765 swap_list_unlock();
766 unlock_kernel();
767 err = try_to_unuse(type);
768 lock_kernel();
769 if (err) {
770 /* re-insert swap space back into swap_list */
771 swap_list_lock();
772 for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
773 if (p->prio >= swap_info[i].prio)
774 break;
775 p->next = i;
776 if (prev < 0)
777 swap_list.head = swap_list.next = p - swap_info;
778 else
779 swap_info[prev].next = p - swap_info;
780 nr_swap_pages += p->pages;
781 total_swap_pages += p->pages;
782 p->flags = SWP_WRITEOK;
783 swap_list_unlock();
784 goto out_dput;
785 }
786 if (p->swap_device)
787 blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
788 path_release(&nd);
790 swap_list_lock();
791 swap_device_lock(p);
792 nd.mnt = p->swap_vfsmnt;
793 nd.dentry = p->swap_file;
794 p->swap_vfsmnt = NULL;
795 p->swap_file = NULL;
796 p->swap_device = 0;
797 p->max = 0;
798 swap_map = p->swap_map;
799 p->swap_map = NULL;
800 p->flags = 0;
801 swap_device_unlock(p);
802 swap_list_unlock();
803 vfree(swap_map);
804 err = 0;
806 out_dput:
807 unlock_kernel();
808 path_release(&nd);
809 out:
810 return err;
811 }
813 int get_swaparea_info(char *buf)
814 {
815 char * page = (char *) __get_free_page(GFP_KERNEL);
816 struct swap_info_struct *ptr = swap_info;
817 int i, j, len = 0, usedswap;
819 if (!page)
820 return -ENOMEM;
822 len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
823 for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
824 if ((ptr->flags & SWP_USED) && ptr->swap_map) {
825 char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt,
826 page, PAGE_SIZE);
828 len += sprintf(buf + len, "%-31s ", path);
830 if (!ptr->swap_device)
831 len += sprintf(buf + len, "file\t\t");
832 else
833 len += sprintf(buf + len, "partition\t");
835 usedswap = 0;
836 for (j = 0; j < ptr->max; ++j)
837 switch (ptr->swap_map[j]) {
838 case SWAP_MAP_BAD:
839 case 0:
840 continue;
841 default:
842 usedswap++;
843 }
844 len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages << (PAGE_SHIFT - 10),
845 usedswap << (PAGE_SHIFT - 10), ptr->prio);
846 }
847 }
848 free_page((unsigned long) page);
849 return len;
850 }
852 int is_swap_partition(kdev_t dev) {
853 struct swap_info_struct *ptr = swap_info;
854 int i;
856 for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
857 if (ptr->flags & SWP_USED)
858 if (ptr->swap_device == dev)
859 return 1;
860 }
861 return 0;
862 }
864 /*
865 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
866 *
867 * The swapon system call
868 */
869 asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
870 {
871 struct swap_info_struct * p;
872 struct nameidata nd;
873 struct inode * swap_inode;
874 unsigned int type;
875 int i, j, prev;
876 int error;
877 static int least_priority = 0;
878 union swap_header *swap_header = 0;
879 int swap_header_version;
880 int nr_good_pages = 0;
881 unsigned long maxpages = 1;
882 int swapfilesize;
883 struct block_device *bdev = NULL;
884 unsigned short *swap_map;
886 if (!capable(CAP_SYS_ADMIN))
887 return -EPERM;
888 lock_kernel();
889 swap_list_lock();
890 p = swap_info;
891 for (type = 0 ; type < nr_swapfiles ; type++,p++)
892 if (!(p->flags & SWP_USED))
893 break;
894 error = -EPERM;
895 if (type >= MAX_SWAPFILES) {
896 swap_list_unlock();
897 goto out;
898 }
899 if (type >= nr_swapfiles)
900 nr_swapfiles = type+1;
901 p->flags = SWP_USED;
902 p->swap_file = NULL;
903 p->swap_vfsmnt = NULL;
904 p->swap_device = 0;
905 p->swap_map = NULL;
906 p->lowest_bit = 0;
907 p->highest_bit = 0;
908 p->cluster_nr = 0;
909 p->sdev_lock = SPIN_LOCK_UNLOCKED;
910 p->next = -1;
911 if (swap_flags & SWAP_FLAG_PREFER) {
912 p->prio =
913 (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
914 } else {
915 p->prio = --least_priority;
916 }
917 swap_list_unlock();
918 error = user_path_walk(specialfile, &nd);
919 if (error)
920 goto bad_swap_2;
922 p->swap_file = nd.dentry;
923 p->swap_vfsmnt = nd.mnt;
924 swap_inode = nd.dentry->d_inode;
925 error = -EINVAL;
927 if (S_ISBLK(swap_inode->i_mode)) {
928 kdev_t dev = swap_inode->i_rdev;
929 struct block_device_operations *bdops;
930 devfs_handle_t de;
932 if (is_mounted(dev)) {
933 error = -EBUSY;
934 goto bad_swap_2;
935 }
937 p->swap_device = dev;
938 set_blocksize(dev, PAGE_SIZE);
940 bd_acquire(swap_inode);
941 bdev = swap_inode->i_bdev;
942 de = devfs_get_handle_from_inode(swap_inode);
943 bdops = devfs_get_ops(de); /* Increments module use count */
944 if (bdops) bdev->bd_op = bdops;
946 error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP);
947 devfs_put_ops(de);/*Decrement module use count now we're safe*/
948 if (error)
949 goto bad_swap_2;
950 set_blocksize(dev, PAGE_SIZE);
951 error = -ENODEV;
952 if (!dev || (blk_size[MAJOR(dev)] &&
953 !blk_size[MAJOR(dev)][MINOR(dev)]))
954 goto bad_swap;
955 swapfilesize = 0;
956 if (blk_size[MAJOR(dev)])
957 swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
958 >> (PAGE_SHIFT - 10);
959 } else if (S_ISREG(swap_inode->i_mode))
960 swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
961 else
962 goto bad_swap;
964 error = -EBUSY;
965 for (i = 0 ; i < nr_swapfiles ; i++) {
966 struct swap_info_struct *q = &swap_info[i];
967 if (i == type || !q->swap_file)
968 continue;
969 if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
970 goto bad_swap;
971 }
973 swap_header = (void *) __get_free_page(GFP_USER);
974 if (!swap_header) {
975 printk("Unable to start swapping: out of memory :-)\n");
976 error = -ENOMEM;
977 goto bad_swap;
978 }
980 lock_page(virt_to_page(swap_header));
981 rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
983 if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
984 swap_header_version = 1;
985 else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
986 swap_header_version = 2;
987 else {
988 printk("Unable to find swap-space signature\n");
989 error = -EINVAL;
990 goto bad_swap;
991 }
993 switch (swap_header_version) {
994 case 1:
995 memset(((char *) swap_header)+PAGE_SIZE-10,0,10);
996 j = 0;
997 p->lowest_bit = 0;
998 p->highest_bit = 0;
999 for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
1000 if (test_bit(i,(char *) swap_header)) {
1001 if (!p->lowest_bit)
1002 p->lowest_bit = i;
1003 p->highest_bit = i;
1004 maxpages = i+1;
1005 j++;
1008 nr_good_pages = j;
1009 p->swap_map = vmalloc(maxpages * sizeof(short));
1010 if (!p->swap_map) {
1011 error = -ENOMEM;
1012 goto bad_swap;
1014 for (i = 1 ; i < maxpages ; i++) {
1015 if (test_bit(i,(char *) swap_header))
1016 p->swap_map[i] = 0;
1017 else
1018 p->swap_map[i] = SWAP_MAP_BAD;
1020 break;
1022 case 2:
1023 /* Check the swap header's sub-version and the size of
1024 the swap file and bad block lists */
1025 if (swap_header->info.version != 1) {
1026 printk(KERN_WARNING
1027 "Unable to handle swap header version %d\n",
1028 swap_header->info.version);
1029 error = -EINVAL;
1030 goto bad_swap;
1033 p->lowest_bit = 1;
1034 maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
1035 if (maxpages > swap_header->info.last_page)
1036 maxpages = swap_header->info.last_page;
1037 p->highest_bit = maxpages - 1;
1039 error = -EINVAL;
1040 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1041 goto bad_swap;
1043 /* OK, set up the swap map and apply the bad block list */
1044 if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
1045 error = -ENOMEM;
1046 goto bad_swap;
1049 error = 0;
1050 memset(p->swap_map, 0, maxpages * sizeof(short));
1051 for (i=0; i<swap_header->info.nr_badpages; i++) {
1052 int page = swap_header->info.badpages[i];
1053 if (page <= 0 || page >= swap_header->info.last_page)
1054 error = -EINVAL;
1055 else
1056 p->swap_map[page] = SWAP_MAP_BAD;
1058 nr_good_pages = swap_header->info.last_page -
1059 swap_header->info.nr_badpages -
1060 1 /* header page */;
1061 if (error)
1062 goto bad_swap;
1065 if (swapfilesize && maxpages > swapfilesize) {
1066 printk(KERN_WARNING
1067 "Swap area shorter than signature indicates\n");
1068 error = -EINVAL;
1069 goto bad_swap;
1071 if (!nr_good_pages) {
1072 printk(KERN_WARNING "Empty swap-file\n");
1073 error = -EINVAL;
1074 goto bad_swap;
1076 p->swap_map[0] = SWAP_MAP_BAD;
1077 swap_list_lock();
1078 swap_device_lock(p);
1079 p->max = maxpages;
1080 p->flags = SWP_WRITEOK;
1081 p->pages = nr_good_pages;
1082 nr_swap_pages += nr_good_pages;
1083 total_swap_pages += nr_good_pages;
1084 printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
1085 nr_good_pages<<(PAGE_SHIFT-10), p->prio);
1087 /* insert swap space into swap_list: */
1088 prev = -1;
1089 for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
1090 if (p->prio >= swap_info[i].prio) {
1091 break;
1093 prev = i;
1095 p->next = i;
1096 if (prev < 0) {
1097 swap_list.head = swap_list.next = p - swap_info;
1098 } else {
1099 swap_info[prev].next = p - swap_info;
1101 swap_device_unlock(p);
1102 swap_list_unlock();
1103 error = 0;
1104 goto out;
1105 bad_swap:
1106 if (bdev)
1107 blkdev_put(bdev, BDEV_SWAP);
1108 bad_swap_2:
1109 swap_list_lock();
1110 swap_map = p->swap_map;
1111 nd.mnt = p->swap_vfsmnt;
1112 nd.dentry = p->swap_file;
1113 p->swap_device = 0;
1114 p->swap_file = NULL;
1115 p->swap_vfsmnt = NULL;
1116 p->swap_map = NULL;
1117 p->flags = 0;
1118 if (!(swap_flags & SWAP_FLAG_PREFER))
1119 ++least_priority;
1120 swap_list_unlock();
1121 if (swap_map)
1122 vfree(swap_map);
1123 path_release(&nd);
1124 out:
1125 if (swap_header)
1126 free_page((long) swap_header);
1127 unlock_kernel();
1128 return error;
1131 void si_swapinfo(struct sysinfo *val)
1133 unsigned int i;
1134 unsigned long nr_to_be_unused = 0;
1136 swap_list_lock();
1137 for (i = 0; i < nr_swapfiles; i++) {
1138 unsigned int j;
1139 if (swap_info[i].flags != SWP_USED)
1140 continue;
1141 for (j = 0; j < swap_info[i].max; ++j) {
1142 switch (swap_info[i].swap_map[j]) {
1143 case 0:
1144 case SWAP_MAP_BAD:
1145 continue;
1146 default:
1147 nr_to_be_unused++;
1151 val->freeswap = nr_swap_pages + nr_to_be_unused;
1152 val->totalswap = total_swap_pages + nr_to_be_unused;
1153 swap_list_unlock();
1156 /*
1157 * Verify that a swap entry is valid and increment its swap map count.
1159 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
1160 * "permanent", but will be reclaimed by the next swapoff.
1161 */
1162 int swap_duplicate(swp_entry_t entry)
1164 struct swap_info_struct * p;
1165 unsigned long offset, type;
1166 int result = 0;
1168 type = SWP_TYPE(entry);
1169 if (type >= nr_swapfiles)
1170 goto bad_file;
1171 p = type + swap_info;
1172 offset = SWP_OFFSET(entry);
1174 swap_device_lock(p);
1175 if (offset < p->max && p->swap_map[offset]) {
1176 if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
1177 p->swap_map[offset]++;
1178 result = 1;
1179 } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
1180 if (swap_overflow++ < 5)
1181 printk(KERN_WARNING "swap_dup: swap entry overflow\n");
1182 p->swap_map[offset] = SWAP_MAP_MAX;
1183 result = 1;
1186 swap_device_unlock(p);
1187 out:
1188 return result;
1190 bad_file:
1191 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
1192 goto out;
1195 /*
1196 * Prior swap_duplicate protects against swap device deletion.
1197 */
1198 void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
1199 kdev_t *dev, struct inode **swapf)
1201 unsigned long type;
1202 struct swap_info_struct *p;
1204 type = SWP_TYPE(entry);
1205 if (type >= nr_swapfiles) {
1206 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
1207 return;
1210 p = &swap_info[type];
1211 *offset = SWP_OFFSET(entry);
1212 if (*offset >= p->max && *offset != 0) {
1213 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val);
1214 return;
1216 if (p->swap_map && !p->swap_map[*offset]) {
1217 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val);
1218 return;
1220 if (!(p->flags & SWP_USED)) {
1221 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val);
1222 return;
1225 if (p->swap_device) {
1226 *dev = p->swap_device;
1227 } else if (p->swap_file) {
1228 *swapf = p->swap_file->d_inode;
1229 } else {
1230 printk(KERN_ERR "rw_swap_page: no swap file or device\n");
1232 return;
1235 /*
1236 * swap_device_lock prevents swap_map being freed. Don't grab an extra
1237 * reference on the swaphandle, it doesn't matter if it becomes unused.
1238 */
1239 int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
1241 int ret = 0, i = 1 << page_cluster;
1242 unsigned long toff;
1243 struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
1245 if (!page_cluster) /* no readahead */
1246 return 0;
1247 toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
1248 if (!toff) /* first page is swap header */
1249 toff++, i--;
1250 *offset = toff;
1252 swap_device_lock(swapdev);
1253 do {
1254 /* Don't read-ahead past the end of the swap area */
1255 if (toff >= swapdev->max)
1256 break;
1257 /* Don't read in free or bad pages */
1258 if (!swapdev->swap_map[toff])
1259 break;
1260 if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
1261 break;
1262 toff++;
1263 ret++;
1264 } while (--i);
1265 swap_device_unlock(swapdev);
1266 return ret;