ia64/xen-unstable

view xen/common/page_alloc.c @ 9776:72f9c751d3ea

Replace &foo[0] with foo where the latter seems cleaner
(which is usually, and particularly when its an argument
to one of the bitops functions).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 19 18:32:20 2006 +0100 (2006-04-19)
parents bb316b4df46f
children 808430428622
line source
1 /******************************************************************************
2 * page_alloc.c
3 *
4 * Simple buddy heap allocator for Xen.
5 *
6 * Copyright (c) 2002-2004 K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/init.h>
25 #include <xen/types.h>
26 #include <xen/lib.h>
27 #include <xen/perfc.h>
28 #include <xen/sched.h>
29 #include <xen/spinlock.h>
30 #include <xen/mm.h>
31 #include <xen/irq.h>
32 #include <xen/softirq.h>
33 #include <xen/shadow.h>
34 #include <xen/domain_page.h>
35 #include <xen/keyhandler.h>
36 #include <asm/page.h>
38 /*
39 * Comma-separated list of hexadecimal page numbers containing bad bytes.
40 * e.g. 'badpage=0x3f45,0x8a321'.
41 */
42 static char opt_badpage[100] = "";
43 string_param("badpage", opt_badpage);
45 /*
46 * Amount of memory to reserve in a low-memory (<4GB) pool for specific
47 * allocation requests. Ordinary requests will not fall back to the
48 * lowmem emergency pool.
49 */
50 static unsigned long lowmem_emergency_pool_pages;
51 static void parse_lowmem_emergency_pool(char *s)
52 {
53 unsigned long long bytes;
54 bytes = parse_size_and_unit(s);
55 lowmem_emergency_pool_pages = bytes >> PAGE_SHIFT;
56 }
57 custom_param("lowmem_emergency_pool", parse_lowmem_emergency_pool);
59 #define round_pgdown(_p) ((_p)&PAGE_MASK)
60 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
62 static spinlock_t page_scrub_lock = SPIN_LOCK_UNLOCKED;
63 LIST_HEAD(page_scrub_list);
65 /*********************
66 * ALLOCATION BITMAP
67 * One bit per page of memory. Bit set => page is allocated.
68 */
70 static unsigned long *alloc_bitmap;
71 #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
73 #define allocated_in_map(_pn) \
74 ( !! (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & \
75 (1UL<<((_pn)&(PAGES_PER_MAPWORD-1)))) )
77 /*
78 * Hint regarding bitwise arithmetic in map_{alloc,free}:
79 * -(1<<n) sets all bits >= n.
80 * (1<<n)-1 sets all bits < n.
81 * Variable names in map_{alloc,free}:
82 * *_idx == Index into `alloc_bitmap' array.
83 * *_off == Bit offset within an element of the `alloc_bitmap' array.
84 */
86 static void map_alloc(unsigned long first_page, unsigned long nr_pages)
87 {
88 unsigned long start_off, end_off, curr_idx, end_idx;
90 #ifndef NDEBUG
91 unsigned long i;
92 /* Check that the block isn't already allocated. */
93 for ( i = 0; i < nr_pages; i++ )
94 ASSERT(!allocated_in_map(first_page + i));
95 #endif
97 curr_idx = first_page / PAGES_PER_MAPWORD;
98 start_off = first_page & (PAGES_PER_MAPWORD-1);
99 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
100 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
102 if ( curr_idx == end_idx )
103 {
104 alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
105 }
106 else
107 {
108 alloc_bitmap[curr_idx] |= -(1UL<<start_off);
109 while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
110 alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
111 }
112 }
115 static void map_free(unsigned long first_page, unsigned long nr_pages)
116 {
117 unsigned long start_off, end_off, curr_idx, end_idx;
119 #ifndef NDEBUG
120 unsigned long i;
121 /* Check that the block isn't already freed. */
122 for ( i = 0; i < nr_pages; i++ )
123 ASSERT(allocated_in_map(first_page + i));
124 #endif
126 curr_idx = first_page / PAGES_PER_MAPWORD;
127 start_off = first_page & (PAGES_PER_MAPWORD-1);
128 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
129 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
131 if ( curr_idx == end_idx )
132 {
133 alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
134 }
135 else
136 {
137 alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
138 while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
139 alloc_bitmap[curr_idx] &= -(1UL<<end_off);
140 }
141 }
145 /*************************
146 * BOOT-TIME ALLOCATOR
147 */
149 /* Initialise allocator to handle up to @max_page pages. */
150 paddr_t init_boot_allocator(paddr_t bitmap_start)
151 {
152 unsigned long bitmap_size;
154 bitmap_start = round_pgup(bitmap_start);
156 /*
157 * Allocate space for the allocation bitmap. Include an extra longword
158 * of padding for possible overrun in map_alloc and map_free.
159 */
160 bitmap_size = max_page / 8;
161 bitmap_size += sizeof(unsigned long);
162 bitmap_size = round_pgup(bitmap_size);
163 alloc_bitmap = (unsigned long *)maddr_to_virt(bitmap_start);
165 /* All allocated by default. */
166 memset(alloc_bitmap, ~0, bitmap_size);
168 return bitmap_start + bitmap_size;
169 }
171 void init_boot_pages(paddr_t ps, paddr_t pe)
172 {
173 unsigned long bad_spfn, bad_epfn, i;
174 char *p;
176 ps = round_pgup(ps);
177 pe = round_pgdown(pe);
178 if ( pe <= ps )
179 return;
181 map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT);
183 /* Check new pages against the bad-page list. */
184 p = opt_badpage;
185 while ( *p != '\0' )
186 {
187 bad_spfn = simple_strtoul(p, &p, 0);
188 bad_epfn = bad_spfn;
190 if ( *p == '-' )
191 {
192 p++;
193 bad_epfn = simple_strtoul(p, &p, 0);
194 if ( bad_epfn < bad_spfn )
195 bad_epfn = bad_spfn;
196 }
198 if ( *p == ',' )
199 p++;
200 else if ( *p != '\0' )
201 break;
203 if ( bad_epfn == bad_spfn )
204 printk("Marking page %lx as bad\n", bad_spfn);
205 else
206 printk("Marking pages %lx through %lx as bad\n",
207 bad_spfn, bad_epfn);
209 for ( i = bad_spfn; i <= bad_epfn; i++ )
210 if ( (i < max_page) && !allocated_in_map(i) )
211 map_alloc(i, 1);
212 }
213 }
215 unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
216 {
217 unsigned long pg, i;
219 for ( pg = 0; (pg + nr_pfns) < max_page; pg += pfn_align )
220 {
221 for ( i = 0; i < nr_pfns; i++ )
222 if ( allocated_in_map(pg + i) )
223 break;
225 if ( i == nr_pfns )
226 {
227 map_alloc(pg, nr_pfns);
228 return pg;
229 }
230 }
232 return 0;
233 }
237 /*************************
238 * BINARY BUDDY ALLOCATOR
239 */
241 #define MEMZONE_XEN 0
242 #define MEMZONE_DOM 1
243 #define MEMZONE_DMADOM 2
244 #define NR_ZONES 3
246 #define pfn_dom_zone_type(_pfn) \
247 (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
249 static struct list_head heap[NR_ZONES][MAX_ORDER+1];
251 static unsigned long avail[NR_ZONES];
253 static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
255 void end_boot_allocator(void)
256 {
257 unsigned long i, j;
258 int curr_free = 0, next_free = 0;
260 memset(avail, 0, sizeof(avail));
262 for ( i = 0; i < NR_ZONES; i++ )
263 for ( j = 0; j <= MAX_ORDER; j++ )
264 INIT_LIST_HEAD(&heap[i][j]);
266 /* Pages that are free now go to the domain sub-allocator. */
267 for ( i = 0; i < max_page; i++ )
268 {
269 curr_free = next_free;
270 next_free = !allocated_in_map(i+1);
271 if ( next_free )
272 map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
273 if ( curr_free )
274 free_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 0);
275 }
276 }
278 /* Hand the specified arbitrary page range to the specified heap zone. */
279 void init_heap_pages(
280 unsigned int zone, struct page_info *pg, unsigned long nr_pages)
281 {
282 unsigned long i;
284 ASSERT(zone < NR_ZONES);
286 for ( i = 0; i < nr_pages; i++ )
287 free_heap_pages(zone, pg+i, 0);
288 }
291 /* Allocate 2^@order contiguous pages. */
292 struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order)
293 {
294 int i;
295 struct page_info *pg;
297 ASSERT(zone < NR_ZONES);
299 if ( unlikely(order > MAX_ORDER) )
300 return NULL;
302 spin_lock(&heap_lock);
304 /* Find smallest order which can satisfy the request. */
305 for ( i = order; i <= MAX_ORDER; i++ )
306 if ( !list_empty(&heap[zone][i]) )
307 goto found;
309 /* No suitable memory blocks. Fail the request. */
310 spin_unlock(&heap_lock);
311 return NULL;
313 found:
314 pg = list_entry(heap[zone][i].next, struct page_info, list);
315 list_del(&pg->list);
317 /* We may have to halve the chunk a number of times. */
318 while ( i != order )
319 {
320 PFN_ORDER(pg) = --i;
321 list_add_tail(&pg->list, &heap[zone][i]);
322 pg += 1 << i;
323 }
325 map_alloc(page_to_mfn(pg), 1 << order);
326 avail[zone] -= 1 << order;
328 spin_unlock(&heap_lock);
330 return pg;
331 }
334 /* Free 2^@order set of pages. */
335 void free_heap_pages(
336 unsigned int zone, struct page_info *pg, unsigned int order)
337 {
338 unsigned long mask;
340 ASSERT(zone < NR_ZONES);
341 ASSERT(order <= MAX_ORDER);
343 spin_lock(&heap_lock);
345 map_free(page_to_mfn(pg), 1 << order);
346 avail[zone] += 1 << order;
348 /* Merge chunks as far as possible. */
349 while ( order < MAX_ORDER )
350 {
351 mask = 1 << order;
353 if ( (page_to_mfn(pg) & mask) )
354 {
355 /* Merge with predecessor block? */
356 if ( allocated_in_map(page_to_mfn(pg)-mask) ||
357 (PFN_ORDER(pg-mask) != order) )
358 break;
359 list_del(&(pg-mask)->list);
360 pg -= mask;
361 }
362 else
363 {
364 /* Merge with successor block? */
365 if ( allocated_in_map(page_to_mfn(pg)+mask) ||
366 (PFN_ORDER(pg+mask) != order) )
367 break;
368 list_del(&(pg+mask)->list);
369 }
371 order++;
372 }
374 PFN_ORDER(pg) = order;
375 list_add_tail(&pg->list, &heap[zone][order]);
377 spin_unlock(&heap_lock);
378 }
381 /*
382 * Scrub all unallocated pages in all heap zones. This function is more
383 * convoluted than appears necessary because we do not want to continuously
384 * hold the lock or disable interrupts while scrubbing very large memory areas.
385 */
386 void scrub_heap_pages(void)
387 {
388 void *p;
389 unsigned long pfn;
390 int cpu = smp_processor_id();
392 printk("Scrubbing Free RAM: ");
394 for ( pfn = 0; pfn < max_page; pfn++ )
395 {
396 /* Every 100MB, print a progress dot. */
397 if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 )
398 printk(".");
400 if ( unlikely(softirq_pending(cpu)) )
401 do_softirq();
403 /* Quick lock-free check. */
404 if ( allocated_in_map(pfn) )
405 continue;
407 spin_lock_irq(&heap_lock);
409 /* Re-check page status with lock held. */
410 if ( !allocated_in_map(pfn) )
411 {
412 if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) )
413 {
414 p = page_to_virt(mfn_to_page(pfn));
415 memguard_unguard_range(p, PAGE_SIZE);
416 clear_page(p);
417 memguard_guard_range(p, PAGE_SIZE);
418 }
419 else
420 {
421 p = map_domain_page(pfn);
422 clear_page(p);
423 unmap_domain_page(p);
424 }
425 }
427 spin_unlock_irq(&heap_lock);
428 }
430 printk("done.\n");
431 }
435 /*************************
436 * XEN-HEAP SUB-ALLOCATOR
437 */
439 void init_xenheap_pages(paddr_t ps, paddr_t pe)
440 {
441 unsigned long flags;
443 ps = round_pgup(ps);
444 pe = round_pgdown(pe);
445 if ( pe <= ps )
446 return;
448 memguard_guard_range(maddr_to_virt(ps), pe - ps);
450 /*
451 * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
452 * prevent merging of power-of-two blocks across the zone boundary.
453 */
454 if ( !IS_XEN_HEAP_FRAME(maddr_to_page(pe)) )
455 pe -= PAGE_SIZE;
457 local_irq_save(flags);
458 init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
459 local_irq_restore(flags);
460 }
463 void *alloc_xenheap_pages(unsigned int order)
464 {
465 unsigned long flags;
466 struct page_info *pg;
467 int i;
469 local_irq_save(flags);
470 pg = alloc_heap_pages(MEMZONE_XEN, order);
471 local_irq_restore(flags);
473 if ( unlikely(pg == NULL) )
474 goto no_memory;
476 memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
478 for ( i = 0; i < (1 << order); i++ )
479 {
480 pg[i].count_info = 0;
481 pg[i].u.inuse._domain = 0;
482 pg[i].u.inuse.type_info = 0;
483 }
485 return page_to_virt(pg);
487 no_memory:
488 printk("Cannot handle page request order %d!\n", order);
489 return NULL;
490 }
493 void free_xenheap_pages(void *v, unsigned int order)
494 {
495 unsigned long flags;
497 if ( v == NULL )
498 return;
500 memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
502 local_irq_save(flags);
503 free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
504 local_irq_restore(flags);
505 }
509 /*************************
510 * DOMAIN-HEAP SUB-ALLOCATOR
511 */
513 void init_domheap_pages(paddr_t ps, paddr_t pe)
514 {
515 unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
517 ASSERT(!in_irq());
519 s_tot = round_pgup(ps) >> PAGE_SHIFT;
520 e_tot = round_pgdown(pe) >> PAGE_SHIFT;
522 s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
523 e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
524 if ( s_dma < e_dma )
525 init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma);
527 s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
528 e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
529 if ( s_nrm < e_nrm )
530 init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm);
531 }
534 struct page_info *alloc_domheap_pages(
535 struct domain *d, unsigned int order, unsigned int flags)
536 {
537 struct page_info *pg = NULL;
538 cpumask_t mask;
539 int i;
541 ASSERT(!in_irq());
543 if ( !(flags & ALLOC_DOM_DMA) )
544 {
545 pg = alloc_heap_pages(MEMZONE_DOM, order);
546 /* Failure? Then check if we can fall back to the DMA pool. */
547 if ( unlikely(pg == NULL) &&
548 ((order > MAX_ORDER) ||
549 (avail[MEMZONE_DMADOM] <
550 (lowmem_emergency_pool_pages + (1UL << order)))) )
551 return NULL;
552 }
554 if ( pg == NULL )
555 if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
556 return NULL;
558 mask = pg->u.free.cpumask;
559 tlbflush_filter(mask, pg->tlbflush_timestamp);
561 pg->count_info = 0;
562 pg->u.inuse._domain = 0;
563 pg->u.inuse.type_info = 0;
565 for ( i = 1; i < (1 << order); i++ )
566 {
567 /* Add in any extra CPUs that need flushing because of this page. */
568 cpumask_t extra_cpus_mask;
569 cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
570 tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
571 cpus_or(mask, mask, extra_cpus_mask);
573 pg[i].count_info = 0;
574 pg[i].u.inuse._domain = 0;
575 pg[i].u.inuse.type_info = 0;
576 page_set_owner(&pg[i], NULL);
577 }
579 if ( unlikely(!cpus_empty(mask)) )
580 {
581 perfc_incrc(need_flush_tlb_flush);
582 flush_tlb_mask(mask);
583 }
585 if ( d == NULL )
586 return pg;
588 spin_lock(&d->page_alloc_lock);
590 if ( unlikely(test_bit(_DOMF_dying, &d->domain_flags)) ||
591 unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
592 {
593 DPRINTK("Over-allocation for domain %u: %u > %u\n",
594 d->domain_id, d->tot_pages + (1 << order), d->max_pages);
595 DPRINTK("...or the domain is dying (%d)\n",
596 !!test_bit(_DOMF_dying, &d->domain_flags));
597 spin_unlock(&d->page_alloc_lock);
598 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
599 return NULL;
600 }
602 if ( unlikely(d->tot_pages == 0) )
603 get_knownalive_domain(d);
605 d->tot_pages += 1 << order;
607 for ( i = 0; i < (1 << order); i++ )
608 {
609 page_set_owner(&pg[i], d);
610 wmb(); /* Domain pointer must be visible before updating refcnt. */
611 pg[i].count_info |= PGC_allocated | 1;
612 list_add_tail(&pg[i].list, &d->page_list);
613 }
615 spin_unlock(&d->page_alloc_lock);
617 return pg;
618 }
621 void free_domheap_pages(struct page_info *pg, unsigned int order)
622 {
623 int i, drop_dom_ref;
624 struct domain *d = page_get_owner(pg);
626 ASSERT(!in_irq());
628 if ( unlikely(IS_XEN_HEAP_FRAME(pg)) )
629 {
630 /* NB. May recursively lock from relinquish_memory(). */
631 spin_lock_recursive(&d->page_alloc_lock);
633 for ( i = 0; i < (1 << order); i++ )
634 list_del(&pg[i].list);
636 d->xenheap_pages -= 1 << order;
637 drop_dom_ref = (d->xenheap_pages == 0);
639 spin_unlock_recursive(&d->page_alloc_lock);
640 }
641 else if ( likely(d != NULL) )
642 {
643 /* NB. May recursively lock from relinquish_memory(). */
644 spin_lock_recursive(&d->page_alloc_lock);
646 for ( i = 0; i < (1 << order); i++ )
647 {
648 shadow_drop_references(d, &pg[i]);
649 ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
650 pg[i].tlbflush_timestamp = tlbflush_current_time();
651 pg[i].u.free.cpumask = d->domain_dirty_cpumask;
652 list_del(&pg[i].list);
653 }
655 d->tot_pages -= 1 << order;
656 drop_dom_ref = (d->tot_pages == 0);
658 spin_unlock_recursive(&d->page_alloc_lock);
660 if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
661 {
662 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
663 }
664 else
665 {
666 /*
667 * Normally we expect a domain to clear pages before freeing them,
668 * if it cares about the secrecy of their contents. However, after
669 * a domain has died we assume responsibility for erasure.
670 */
671 for ( i = 0; i < (1 << order); i++ )
672 {
673 spin_lock(&page_scrub_lock);
674 list_add(&pg[i].list, &page_scrub_list);
675 spin_unlock(&page_scrub_lock);
676 }
677 }
678 }
679 else
680 {
681 /* Freeing anonymous domain-heap pages. */
682 for ( i = 0; i < (1 << order); i++ )
683 pg[i].u.free.cpumask = CPU_MASK_NONE;
684 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
685 drop_dom_ref = 0;
686 }
688 if ( drop_dom_ref )
689 put_domain(d);
690 }
693 unsigned long avail_domheap_pages(void)
694 {
695 unsigned long avail_nrm, avail_dma;
697 avail_nrm = avail[MEMZONE_DOM];
699 avail_dma = avail[MEMZONE_DMADOM];
700 if ( avail_dma > lowmem_emergency_pool_pages )
701 avail_dma -= lowmem_emergency_pool_pages;
702 else
703 avail_dma = 0;
705 return avail_nrm + avail_dma;
706 }
709 static void pagealloc_keyhandler(unsigned char key)
710 {
711 printk("Physical memory information:\n");
712 printk(" Xen heap: %lukB free\n"
713 " DMA heap: %lukB free\n"
714 " Dom heap: %lukB free\n",
715 avail[MEMZONE_XEN]<<(PAGE_SHIFT-10),
716 avail[MEMZONE_DMADOM]<<(PAGE_SHIFT-10),
717 avail[MEMZONE_DOM]<<(PAGE_SHIFT-10));
718 }
721 static __init int pagealloc_keyhandler_init(void)
722 {
723 register_keyhandler('m', pagealloc_keyhandler, "memory info");
724 return 0;
725 }
726 __initcall(pagealloc_keyhandler_init);
730 /*************************
731 * PAGE SCRUBBING
732 */
734 static void page_scrub_softirq(void)
735 {
736 struct list_head *ent;
737 struct page_info *pg;
738 void *p;
739 int i;
740 s_time_t start = NOW();
742 /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
743 do {
744 spin_lock(&page_scrub_lock);
746 if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
747 {
748 spin_unlock(&page_scrub_lock);
749 return;
750 }
752 /* Peel up to 16 pages from the list. */
753 for ( i = 0; i < 16; i++ )
754 {
755 if ( ent->next == &page_scrub_list )
756 break;
757 ent = ent->next;
758 }
760 /* Remove peeled pages from the list. */
761 ent->next->prev = &page_scrub_list;
762 page_scrub_list.next = ent->next;
764 spin_unlock(&page_scrub_lock);
766 /* Working backwards, scrub each page in turn. */
767 while ( ent != &page_scrub_list )
768 {
769 pg = list_entry(ent, struct page_info, list);
770 ent = ent->prev;
771 p = map_domain_page(page_to_mfn(pg));
772 clear_page(p);
773 unmap_domain_page(p);
774 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
775 }
776 } while ( (NOW() - start) < MILLISECS(1) );
777 }
779 static __init int page_scrub_init(void)
780 {
781 open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
782 return 0;
783 }
784 __initcall(page_scrub_init);
786 /*
787 * Local variables:
788 * mode: C
789 * c-set-style: "BSD"
790 * c-basic-offset: 4
791 * tab-width: 4
792 * indent-tabs-mode: nil
793 * End:
794 */