ia64/xen-unstable

view xen/common/page_alloc.c @ 10570:8dc4af3f192c

[IA64] Implement and use DOM0_DOMAIN_STEUP.

DOM0_GETMEMLIST now reads ptes and use gpfn.
Domain builder reworked: calls DOMAIN_SETUP, setup start_info page.
SAL data are now in domain memory.
is_vti field added in domain.arch.
Many cleanups (indentation, static, warnings).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jul 05 09:28:32 2006 -0600 (2006-07-05)
parents 234939c0ec3a
children 53f552ad4042
line source
1 /******************************************************************************
2 * page_alloc.c
3 *
4 * Simple buddy heap allocator for Xen.
5 *
6 * Copyright (c) 2002-2004 K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/init.h>
25 #include <xen/types.h>
26 #include <xen/lib.h>
27 #include <xen/perfc.h>
28 #include <xen/sched.h>
29 #include <xen/spinlock.h>
30 #include <xen/mm.h>
31 #include <xen/irq.h>
32 #include <xen/softirq.h>
33 #include <xen/shadow.h>
34 #include <xen/domain_page.h>
35 #include <xen/keyhandler.h>
36 #include <asm/page.h>
38 /*
39 * Comma-separated list of hexadecimal page numbers containing bad bytes.
40 * e.g. 'badpage=0x3f45,0x8a321'.
41 */
42 static char opt_badpage[100] = "";
43 string_param("badpage", opt_badpage);
45 /*
46 * Amount of memory to reserve in a low-memory (<4GB) pool for specific
47 * allocation requests. Ordinary requests will not fall back to the
48 * lowmem emergency pool.
49 */
50 static unsigned long lowmem_emergency_pool_pages;
51 static void parse_lowmem_emergency_pool(char *s)
52 {
53 unsigned long long bytes;
54 bytes = parse_size_and_unit(s);
55 lowmem_emergency_pool_pages = bytes >> PAGE_SHIFT;
56 }
57 custom_param("lowmem_emergency_pool", parse_lowmem_emergency_pool);
59 #define round_pgdown(_p) ((_p)&PAGE_MASK)
60 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
62 static DEFINE_SPINLOCK(page_scrub_lock);
63 LIST_HEAD(page_scrub_list);
64 static unsigned long scrub_pages;
66 /*********************
67 * ALLOCATION BITMAP
68 * One bit per page of memory. Bit set => page is allocated.
69 */
71 static unsigned long *alloc_bitmap;
72 #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
74 #define allocated_in_map(_pn) \
75 ( !! (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & \
76 (1UL<<((_pn)&(PAGES_PER_MAPWORD-1)))) )
78 /*
79 * Hint regarding bitwise arithmetic in map_{alloc,free}:
80 * -(1<<n) sets all bits >= n.
81 * (1<<n)-1 sets all bits < n.
82 * Variable names in map_{alloc,free}:
83 * *_idx == Index into `alloc_bitmap' array.
84 * *_off == Bit offset within an element of the `alloc_bitmap' array.
85 */
87 static void map_alloc(unsigned long first_page, unsigned long nr_pages)
88 {
89 unsigned long start_off, end_off, curr_idx, end_idx;
91 #ifndef NDEBUG
92 unsigned long i;
93 /* Check that the block isn't already allocated. */
94 for ( i = 0; i < nr_pages; i++ )
95 ASSERT(!allocated_in_map(first_page + i));
96 #endif
98 curr_idx = first_page / PAGES_PER_MAPWORD;
99 start_off = first_page & (PAGES_PER_MAPWORD-1);
100 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
101 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
103 if ( curr_idx == end_idx )
104 {
105 alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
106 }
107 else
108 {
109 alloc_bitmap[curr_idx] |= -(1UL<<start_off);
110 while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
111 alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
112 }
113 }
116 static void map_free(unsigned long first_page, unsigned long nr_pages)
117 {
118 unsigned long start_off, end_off, curr_idx, end_idx;
120 #ifndef NDEBUG
121 unsigned long i;
122 /* Check that the block isn't already freed. */
123 for ( i = 0; i < nr_pages; i++ )
124 ASSERT(allocated_in_map(first_page + i));
125 #endif
127 curr_idx = first_page / PAGES_PER_MAPWORD;
128 start_off = first_page & (PAGES_PER_MAPWORD-1);
129 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
130 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
132 if ( curr_idx == end_idx )
133 {
134 alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
135 }
136 else
137 {
138 alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
139 while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
140 alloc_bitmap[curr_idx] &= -(1UL<<end_off);
141 }
142 }
146 /*************************
147 * BOOT-TIME ALLOCATOR
148 */
150 /* Initialise allocator to handle up to @max_page pages. */
151 paddr_t init_boot_allocator(paddr_t bitmap_start)
152 {
153 unsigned long bitmap_size;
155 bitmap_start = round_pgup(bitmap_start);
157 /*
158 * Allocate space for the allocation bitmap. Include an extra longword
159 * of padding for possible overrun in map_alloc and map_free.
160 */
161 bitmap_size = max_page / 8;
162 bitmap_size += sizeof(unsigned long);
163 bitmap_size = round_pgup(bitmap_size);
164 alloc_bitmap = (unsigned long *)maddr_to_virt(bitmap_start);
166 /* All allocated by default. */
167 memset(alloc_bitmap, ~0, bitmap_size);
169 return bitmap_start + bitmap_size;
170 }
172 void init_boot_pages(paddr_t ps, paddr_t pe)
173 {
174 unsigned long bad_spfn, bad_epfn, i;
175 char *p;
177 ps = round_pgup(ps);
178 pe = round_pgdown(pe);
179 if ( pe <= ps )
180 return;
182 map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT);
184 /* Check new pages against the bad-page list. */
185 p = opt_badpage;
186 while ( *p != '\0' )
187 {
188 bad_spfn = simple_strtoul(p, &p, 0);
189 bad_epfn = bad_spfn;
191 if ( *p == '-' )
192 {
193 p++;
194 bad_epfn = simple_strtoul(p, &p, 0);
195 if ( bad_epfn < bad_spfn )
196 bad_epfn = bad_spfn;
197 }
199 if ( *p == ',' )
200 p++;
201 else if ( *p != '\0' )
202 break;
204 if ( bad_epfn == bad_spfn )
205 printk("Marking page %lx as bad\n", bad_spfn);
206 else
207 printk("Marking pages %lx through %lx as bad\n",
208 bad_spfn, bad_epfn);
210 for ( i = bad_spfn; i <= bad_epfn; i++ )
211 if ( (i < max_page) && !allocated_in_map(i) )
212 map_alloc(i, 1);
213 }
214 }
216 unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
217 {
218 unsigned long pg, i;
220 for ( pg = 0; (pg + nr_pfns) < max_page; pg += pfn_align )
221 {
222 for ( i = 0; i < nr_pfns; i++ )
223 if ( allocated_in_map(pg + i) )
224 break;
226 if ( i == nr_pfns )
227 {
228 map_alloc(pg, nr_pfns);
229 return pg;
230 }
231 }
233 return 0;
234 }
238 /*************************
239 * BINARY BUDDY ALLOCATOR
240 */
242 #define MEMZONE_XEN 0
243 #define MEMZONE_DOM 1
244 #define MEMZONE_DMADOM 2
245 #define NR_ZONES 3
247 #define pfn_dom_zone_type(_pfn) \
248 (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
250 static struct list_head heap[NR_ZONES][MAX_ORDER+1];
252 static unsigned long avail[NR_ZONES];
254 static DEFINE_SPINLOCK(heap_lock);
256 void end_boot_allocator(void)
257 {
258 unsigned long i, j;
259 int curr_free = 0, next_free = 0;
261 memset(avail, 0, sizeof(avail));
263 for ( i = 0; i < NR_ZONES; i++ )
264 for ( j = 0; j <= MAX_ORDER; j++ )
265 INIT_LIST_HEAD(&heap[i][j]);
267 /* Pages that are free now go to the domain sub-allocator. */
268 for ( i = 0; i < max_page; i++ )
269 {
270 curr_free = next_free;
271 next_free = !allocated_in_map(i+1);
272 if ( next_free )
273 map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
274 if ( curr_free )
275 free_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 0);
276 }
277 }
279 /* Hand the specified arbitrary page range to the specified heap zone. */
280 void init_heap_pages(
281 unsigned int zone, struct page_info *pg, unsigned long nr_pages)
282 {
283 unsigned long i;
285 ASSERT(zone < NR_ZONES);
287 for ( i = 0; i < nr_pages; i++ )
288 free_heap_pages(zone, pg+i, 0);
289 }
292 /* Allocate 2^@order contiguous pages. */
293 struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order)
294 {
295 int i;
296 struct page_info *pg;
298 ASSERT(zone < NR_ZONES);
300 if ( unlikely(order > MAX_ORDER) )
301 return NULL;
303 spin_lock(&heap_lock);
305 /* Find smallest order which can satisfy the request. */
306 for ( i = order; i <= MAX_ORDER; i++ )
307 if ( !list_empty(&heap[zone][i]) )
308 goto found;
310 /* No suitable memory blocks. Fail the request. */
311 spin_unlock(&heap_lock);
312 return NULL;
314 found:
315 pg = list_entry(heap[zone][i].next, struct page_info, list);
316 list_del(&pg->list);
318 /* We may have to halve the chunk a number of times. */
319 while ( i != order )
320 {
321 PFN_ORDER(pg) = --i;
322 list_add_tail(&pg->list, &heap[zone][i]);
323 pg += 1 << i;
324 }
326 map_alloc(page_to_mfn(pg), 1 << order);
327 avail[zone] -= 1 << order;
329 spin_unlock(&heap_lock);
331 return pg;
332 }
335 /* Free 2^@order set of pages. */
336 void free_heap_pages(
337 unsigned int zone, struct page_info *pg, unsigned int order)
338 {
339 unsigned long mask;
341 ASSERT(zone < NR_ZONES);
342 ASSERT(order <= MAX_ORDER);
344 spin_lock(&heap_lock);
346 map_free(page_to_mfn(pg), 1 << order);
347 avail[zone] += 1 << order;
349 /* Merge chunks as far as possible. */
350 while ( order < MAX_ORDER )
351 {
352 mask = 1 << order;
354 if ( (page_to_mfn(pg) & mask) )
355 {
356 /* Merge with predecessor block? */
357 if ( allocated_in_map(page_to_mfn(pg)-mask) ||
358 (PFN_ORDER(pg-mask) != order) )
359 break;
360 list_del(&(pg-mask)->list);
361 pg -= mask;
362 }
363 else
364 {
365 /* Merge with successor block? */
366 if ( allocated_in_map(page_to_mfn(pg)+mask) ||
367 (PFN_ORDER(pg+mask) != order) )
368 break;
369 list_del(&(pg+mask)->list);
370 }
372 order++;
373 }
375 PFN_ORDER(pg) = order;
376 list_add_tail(&pg->list, &heap[zone][order]);
378 spin_unlock(&heap_lock);
379 }
382 /*
383 * Scrub all unallocated pages in all heap zones. This function is more
384 * convoluted than appears necessary because we do not want to continuously
385 * hold the lock or disable interrupts while scrubbing very large memory areas.
386 */
387 void scrub_heap_pages(void)
388 {
389 void *p;
390 unsigned long pfn;
391 int cpu = smp_processor_id();
393 printk("Scrubbing Free RAM: ");
395 for ( pfn = 0; pfn < max_page; pfn++ )
396 {
397 /* Every 100MB, print a progress dot. */
398 if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 )
399 printk(".");
401 if ( unlikely(softirq_pending(cpu)) )
402 do_softirq();
404 /* Quick lock-free check. */
405 if ( allocated_in_map(pfn) )
406 continue;
408 spin_lock_irq(&heap_lock);
410 /* Re-check page status with lock held. */
411 if ( !allocated_in_map(pfn) )
412 {
413 if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) )
414 {
415 p = page_to_virt(mfn_to_page(pfn));
416 memguard_unguard_range(p, PAGE_SIZE);
417 clear_page(p);
418 memguard_guard_range(p, PAGE_SIZE);
419 }
420 else
421 {
422 p = map_domain_page(pfn);
423 clear_page(p);
424 unmap_domain_page(p);
425 }
426 }
428 spin_unlock_irq(&heap_lock);
429 }
431 printk("done.\n");
432 }
436 /*************************
437 * XEN-HEAP SUB-ALLOCATOR
438 */
440 void init_xenheap_pages(paddr_t ps, paddr_t pe)
441 {
442 unsigned long flags;
444 ps = round_pgup(ps);
445 pe = round_pgdown(pe);
446 if ( pe <= ps )
447 return;
449 memguard_guard_range(maddr_to_virt(ps), pe - ps);
451 /*
452 * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
453 * prevent merging of power-of-two blocks across the zone boundary.
454 */
455 if ( !IS_XEN_HEAP_FRAME(maddr_to_page(pe)) )
456 pe -= PAGE_SIZE;
458 local_irq_save(flags);
459 init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
460 local_irq_restore(flags);
461 }
464 void *alloc_xenheap_pages(unsigned int order)
465 {
466 unsigned long flags;
467 struct page_info *pg;
468 int i;
470 local_irq_save(flags);
471 pg = alloc_heap_pages(MEMZONE_XEN, order);
472 local_irq_restore(flags);
474 if ( unlikely(pg == NULL) )
475 goto no_memory;
477 memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
479 for ( i = 0; i < (1 << order); i++ )
480 {
481 pg[i].count_info = 0;
482 pg[i].u.inuse._domain = 0;
483 pg[i].u.inuse.type_info = 0;
484 }
486 return page_to_virt(pg);
488 no_memory:
489 printk("Cannot handle page request order %d!\n", order);
490 return NULL;
491 }
494 void free_xenheap_pages(void *v, unsigned int order)
495 {
496 unsigned long flags;
498 if ( v == NULL )
499 return;
501 memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
503 local_irq_save(flags);
504 free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
505 local_irq_restore(flags);
506 }
510 /*************************
511 * DOMAIN-HEAP SUB-ALLOCATOR
512 */
514 void init_domheap_pages(paddr_t ps, paddr_t pe)
515 {
516 unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
518 ASSERT(!in_irq());
520 s_tot = round_pgup(ps) >> PAGE_SHIFT;
521 e_tot = round_pgdown(pe) >> PAGE_SHIFT;
523 s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
524 e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
525 if ( s_dma < e_dma )
526 init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma);
528 s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
529 e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
530 if ( s_nrm < e_nrm )
531 init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm);
532 }
535 int assign_pages(
536 struct domain *d,
537 struct page_info *pg,
538 unsigned int order,
539 unsigned int memflags)
540 {
541 unsigned long i;
543 spin_lock(&d->page_alloc_lock);
545 if ( unlikely(test_bit(_DOMF_dying, &d->domain_flags)) )
546 {
547 DPRINTK("Cannot assign page to domain%d -- dying.\n", d->domain_id);
548 goto fail;
549 }
551 if ( !(memflags & MEMF_no_refcount) )
552 {
553 if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
554 {
555 DPRINTK("Over-allocation for domain %u: %u > %u\n",
556 d->domain_id, d->tot_pages + (1 << order), d->max_pages);
557 goto fail;
558 }
560 if ( unlikely(d->tot_pages == 0) )
561 get_knownalive_domain(d);
563 d->tot_pages += 1 << order;
564 }
566 for ( i = 0; i < (1 << order); i++ )
567 {
568 ASSERT(page_get_owner(&pg[i]) == NULL);
569 ASSERT((pg[i].count_info & ~(PGC_allocated | 1)) == 0);
570 page_set_owner(&pg[i], d);
571 wmb(); /* Domain pointer must be visible before updating refcnt. */
572 pg[i].count_info = PGC_allocated | 1;
573 list_add_tail(&pg[i].list, &d->page_list);
574 }
576 spin_unlock(&d->page_alloc_lock);
577 return 0;
579 fail:
580 spin_unlock(&d->page_alloc_lock);
581 return -1;
582 }
585 struct page_info *alloc_domheap_pages(
586 struct domain *d, unsigned int order, unsigned int memflags)
587 {
588 struct page_info *pg = NULL;
589 cpumask_t mask;
590 unsigned long i;
592 ASSERT(!in_irq());
594 if ( !(memflags & MEMF_dma) )
595 {
596 pg = alloc_heap_pages(MEMZONE_DOM, order);
597 /* Failure? Then check if we can fall back to the DMA pool. */
598 if ( unlikely(pg == NULL) &&
599 ((order > MAX_ORDER) ||
600 (avail[MEMZONE_DMADOM] <
601 (lowmem_emergency_pool_pages + (1UL << order)))) )
602 return NULL;
603 }
605 if ( pg == NULL )
606 if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
607 return NULL;
609 mask = pg->u.free.cpumask;
610 tlbflush_filter(mask, pg->tlbflush_timestamp);
612 pg->count_info = 0;
613 pg->u.inuse._domain = 0;
614 pg->u.inuse.type_info = 0;
616 for ( i = 1; i < (1 << order); i++ )
617 {
618 /* Add in any extra CPUs that need flushing because of this page. */
619 cpumask_t extra_cpus_mask;
620 cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
621 tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
622 cpus_or(mask, mask, extra_cpus_mask);
624 pg[i].count_info = 0;
625 pg[i].u.inuse._domain = 0;
626 pg[i].u.inuse.type_info = 0;
627 page_set_owner(&pg[i], NULL);
628 }
630 if ( unlikely(!cpus_empty(mask)) )
631 {
632 perfc_incrc(need_flush_tlb_flush);
633 flush_tlb_mask(mask);
634 }
636 if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
637 {
638 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
639 return NULL;
640 }
642 return pg;
643 }
646 void free_domheap_pages(struct page_info *pg, unsigned int order)
647 {
648 int i, drop_dom_ref;
649 struct domain *d = page_get_owner(pg);
651 ASSERT(!in_irq());
653 if ( unlikely(IS_XEN_HEAP_FRAME(pg)) )
654 {
655 /* NB. May recursively lock from relinquish_memory(). */
656 spin_lock_recursive(&d->page_alloc_lock);
658 for ( i = 0; i < (1 << order); i++ )
659 list_del(&pg[i].list);
661 d->xenheap_pages -= 1 << order;
662 drop_dom_ref = (d->xenheap_pages == 0);
664 spin_unlock_recursive(&d->page_alloc_lock);
665 }
666 else if ( likely(d != NULL) )
667 {
668 /* NB. May recursively lock from relinquish_memory(). */
669 spin_lock_recursive(&d->page_alloc_lock);
671 for ( i = 0; i < (1 << order); i++ )
672 {
673 shadow_drop_references(d, &pg[i]);
674 ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
675 pg[i].tlbflush_timestamp = tlbflush_current_time();
676 pg[i].u.free.cpumask = d->domain_dirty_cpumask;
677 list_del(&pg[i].list);
678 }
680 d->tot_pages -= 1 << order;
681 drop_dom_ref = (d->tot_pages == 0);
683 spin_unlock_recursive(&d->page_alloc_lock);
685 if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
686 {
687 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
688 }
689 else
690 {
691 /*
692 * Normally we expect a domain to clear pages before freeing them,
693 * if it cares about the secrecy of their contents. However, after
694 * a domain has died we assume responsibility for erasure.
695 */
696 for ( i = 0; i < (1 << order); i++ )
697 {
698 spin_lock(&page_scrub_lock);
699 list_add(&pg[i].list, &page_scrub_list);
700 scrub_pages++;
701 spin_unlock(&page_scrub_lock);
702 }
703 }
704 }
705 else
706 {
707 /* Freeing anonymous domain-heap pages. */
708 for ( i = 0; i < (1 << order); i++ )
709 pg[i].u.free.cpumask = CPU_MASK_NONE;
710 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
711 drop_dom_ref = 0;
712 }
714 if ( drop_dom_ref )
715 put_domain(d);
716 }
719 unsigned long avail_domheap_pages(void)
720 {
721 unsigned long avail_nrm, avail_dma;
723 avail_nrm = avail[MEMZONE_DOM];
725 avail_dma = avail[MEMZONE_DMADOM];
726 if ( avail_dma > lowmem_emergency_pool_pages )
727 avail_dma -= lowmem_emergency_pool_pages;
728 else
729 avail_dma = 0;
731 return avail_nrm + avail_dma;
732 }
735 static void pagealloc_keyhandler(unsigned char key)
736 {
737 printk("Physical memory information:\n");
738 printk(" Xen heap: %lukB free\n"
739 " DMA heap: %lukB free\n"
740 " Dom heap: %lukB free\n",
741 avail[MEMZONE_XEN]<<(PAGE_SHIFT-10),
742 avail[MEMZONE_DMADOM]<<(PAGE_SHIFT-10),
743 avail[MEMZONE_DOM]<<(PAGE_SHIFT-10));
744 }
747 static __init int pagealloc_keyhandler_init(void)
748 {
749 register_keyhandler('m', pagealloc_keyhandler, "memory info");
750 return 0;
751 }
752 __initcall(pagealloc_keyhandler_init);
756 /*************************
757 * PAGE SCRUBBING
758 */
760 static void page_scrub_softirq(void)
761 {
762 struct list_head *ent;
763 struct page_info *pg;
764 void *p;
765 int i;
766 s_time_t start = NOW();
768 /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
769 do {
770 spin_lock(&page_scrub_lock);
772 if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
773 {
774 spin_unlock(&page_scrub_lock);
775 return;
776 }
778 /* Peel up to 16 pages from the list. */
779 for ( i = 0; i < 16; i++ )
780 {
781 if ( ent->next == &page_scrub_list )
782 break;
783 ent = ent->next;
784 }
786 /* Remove peeled pages from the list. */
787 ent->next->prev = &page_scrub_list;
788 page_scrub_list.next = ent->next;
789 scrub_pages -= (i+1);
791 spin_unlock(&page_scrub_lock);
793 /* Working backwards, scrub each page in turn. */
794 while ( ent != &page_scrub_list )
795 {
796 pg = list_entry(ent, struct page_info, list);
797 ent = ent->prev;
798 p = map_domain_page(page_to_mfn(pg));
799 clear_page(p);
800 unmap_domain_page(p);
801 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
802 }
803 } while ( (NOW() - start) < MILLISECS(1) );
804 }
806 unsigned long avail_scrub_pages(void)
807 {
808 return scrub_pages;
809 }
811 static __init int page_scrub_init(void)
812 {
813 open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
814 return 0;
815 }
816 __initcall(page_scrub_init);
818 /*
819 * Local variables:
820 * mode: C
821 * c-set-style: "BSD"
822 * c-basic-offset: 4
823 * tab-width: 4
824 * indent-tabs-mode: nil
825 * End:
826 */