direct-io.hg

view xen/common/page_alloc.c @ 10734:9b7e1ea4c4d2

[HVM] Sync p2m table across all vcpus on x86_32p xen.
We found VGA acceleration can not work on SMP VMX guests on x86_32p
xen, this is caused by the way we construct p2m table today: only the 1st
l2 page table slot that maps p2m table pages is copied to none-vcpu0 vcpu
monitor page table when VMX is created. But VGA acceleration will
create some p2m table entries beyond the 1st l2 page table slot after HVM is
created, so only vcpu0 can get these p2m entries, and other vcpu can
not do VGA acceleration.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Jul 26 11:34:12 2006 +0100 (2006-07-26)
parents 53f552ad4042
children 7cde0d938ef4
line source
1 /******************************************************************************
2 * page_alloc.c
3 *
4 * Simple buddy heap allocator for Xen.
5 *
6 * Copyright (c) 2002-2004 K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/init.h>
25 #include <xen/types.h>
26 #include <xen/lib.h>
27 #include <xen/perfc.h>
28 #include <xen/sched.h>
29 #include <xen/spinlock.h>
30 #include <xen/mm.h>
31 #include <xen/irq.h>
32 #include <xen/softirq.h>
33 #include <xen/shadow.h>
34 #include <xen/domain_page.h>
35 #include <xen/keyhandler.h>
36 #include <asm/page.h>
38 /*
39 * Comma-separated list of hexadecimal page numbers containing bad bytes.
40 * e.g. 'badpage=0x3f45,0x8a321'.
41 */
42 static char opt_badpage[100] = "";
43 string_param("badpage", opt_badpage);
45 /*
46 * Amount of memory to reserve in a low-memory (<4GB) pool for specific
47 * allocation requests. Ordinary requests will not fall back to the
48 * lowmem emergency pool.
49 */
50 static unsigned long lowmem_emergency_pool_pages;
51 static void parse_lowmem_emergency_pool(char *s)
52 {
53 unsigned long long bytes;
54 bytes = parse_size_and_unit(s);
55 lowmem_emergency_pool_pages = bytes >> PAGE_SHIFT;
56 }
57 custom_param("lowmem_emergency_pool", parse_lowmem_emergency_pool);
59 #define round_pgdown(_p) ((_p)&PAGE_MASK)
60 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
62 static DEFINE_SPINLOCK(page_scrub_lock);
63 LIST_HEAD(page_scrub_list);
64 static unsigned long scrub_pages;
66 /*********************
67 * ALLOCATION BITMAP
68 * One bit per page of memory. Bit set => page is allocated.
69 */
71 static unsigned long *alloc_bitmap;
72 #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
74 #define allocated_in_map(_pn) \
75 ( !! (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & \
76 (1UL<<((_pn)&(PAGES_PER_MAPWORD-1)))) )
78 /*
79 * Hint regarding bitwise arithmetic in map_{alloc,free}:
80 * -(1<<n) sets all bits >= n.
81 * (1<<n)-1 sets all bits < n.
82 * Variable names in map_{alloc,free}:
83 * *_idx == Index into `alloc_bitmap' array.
84 * *_off == Bit offset within an element of the `alloc_bitmap' array.
85 */
87 static void map_alloc(unsigned long first_page, unsigned long nr_pages)
88 {
89 unsigned long start_off, end_off, curr_idx, end_idx;
91 #ifndef NDEBUG
92 unsigned long i;
93 /* Check that the block isn't already allocated. */
94 for ( i = 0; i < nr_pages; i++ )
95 ASSERT(!allocated_in_map(first_page + i));
96 #endif
98 curr_idx = first_page / PAGES_PER_MAPWORD;
99 start_off = first_page & (PAGES_PER_MAPWORD-1);
100 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
101 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
103 if ( curr_idx == end_idx )
104 {
105 alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
106 }
107 else
108 {
109 alloc_bitmap[curr_idx] |= -(1UL<<start_off);
110 while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
111 alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
112 }
113 }
116 static void map_free(unsigned long first_page, unsigned long nr_pages)
117 {
118 unsigned long start_off, end_off, curr_idx, end_idx;
120 #ifndef NDEBUG
121 unsigned long i;
122 /* Check that the block isn't already freed. */
123 for ( i = 0; i < nr_pages; i++ )
124 ASSERT(allocated_in_map(first_page + i));
125 #endif
127 curr_idx = first_page / PAGES_PER_MAPWORD;
128 start_off = first_page & (PAGES_PER_MAPWORD-1);
129 end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
130 end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
132 if ( curr_idx == end_idx )
133 {
134 alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
135 }
136 else
137 {
138 alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
139 while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
140 alloc_bitmap[curr_idx] &= -(1UL<<end_off);
141 }
142 }
146 /*************************
147 * BOOT-TIME ALLOCATOR
148 */
150 /* Initialise allocator to handle up to @max_page pages. */
151 paddr_t init_boot_allocator(paddr_t bitmap_start)
152 {
153 unsigned long bitmap_size;
155 bitmap_start = round_pgup(bitmap_start);
157 /*
158 * Allocate space for the allocation bitmap. Include an extra longword
159 * of padding for possible overrun in map_alloc and map_free.
160 */
161 bitmap_size = max_page / 8;
162 bitmap_size += sizeof(unsigned long);
163 bitmap_size = round_pgup(bitmap_size);
164 alloc_bitmap = (unsigned long *)maddr_to_virt(bitmap_start);
166 /* All allocated by default. */
167 memset(alloc_bitmap, ~0, bitmap_size);
169 return bitmap_start + bitmap_size;
170 }
172 void init_boot_pages(paddr_t ps, paddr_t pe)
173 {
174 unsigned long bad_spfn, bad_epfn, i;
175 char *p;
177 ps = round_pgup(ps);
178 pe = round_pgdown(pe);
179 if ( pe <= ps )
180 return;
182 map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT);
184 /* Check new pages against the bad-page list. */
185 p = opt_badpage;
186 while ( *p != '\0' )
187 {
188 bad_spfn = simple_strtoul(p, &p, 0);
189 bad_epfn = bad_spfn;
191 if ( *p == '-' )
192 {
193 p++;
194 bad_epfn = simple_strtoul(p, &p, 0);
195 if ( bad_epfn < bad_spfn )
196 bad_epfn = bad_spfn;
197 }
199 if ( *p == ',' )
200 p++;
201 else if ( *p != '\0' )
202 break;
204 if ( bad_epfn == bad_spfn )
205 printk("Marking page %lx as bad\n", bad_spfn);
206 else
207 printk("Marking pages %lx through %lx as bad\n",
208 bad_spfn, bad_epfn);
210 for ( i = bad_spfn; i <= bad_epfn; i++ )
211 if ( (i < max_page) && !allocated_in_map(i) )
212 map_alloc(i, 1);
213 }
214 }
216 unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
217 {
218 unsigned long pg, i;
220 for ( pg = 0; (pg + nr_pfns) < max_page; pg += pfn_align )
221 {
222 for ( i = 0; i < nr_pfns; i++ )
223 if ( allocated_in_map(pg + i) )
224 break;
226 if ( i == nr_pfns )
227 {
228 map_alloc(pg, nr_pfns);
229 return pg;
230 }
231 }
233 return 0;
234 }
238 /*************************
239 * BINARY BUDDY ALLOCATOR
240 */
242 #define MEMZONE_XEN 0
243 #define MEMZONE_DOM 1
244 #define MEMZONE_DMADOM 2
245 #define NR_ZONES 3
247 #define pfn_dom_zone_type(_pfn) \
248 (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
250 static struct list_head heap[NR_ZONES][MAX_ORDER+1];
252 static unsigned long avail[NR_ZONES];
254 static DEFINE_SPINLOCK(heap_lock);
256 void end_boot_allocator(void)
257 {
258 unsigned long i, j;
259 int curr_free = 0, next_free = 0;
261 memset(avail, 0, sizeof(avail));
263 for ( i = 0; i < NR_ZONES; i++ )
264 for ( j = 0; j <= MAX_ORDER; j++ )
265 INIT_LIST_HEAD(&heap[i][j]);
267 /* Pages that are free now go to the domain sub-allocator. */
268 for ( i = 0; i < max_page; i++ )
269 {
270 curr_free = next_free;
271 next_free = !allocated_in_map(i+1);
272 if ( next_free )
273 map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
274 if ( curr_free )
275 free_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 0);
276 }
277 }
279 /* Hand the specified arbitrary page range to the specified heap zone. */
280 void init_heap_pages(
281 unsigned int zone, struct page_info *pg, unsigned long nr_pages)
282 {
283 unsigned long i;
285 ASSERT(zone < NR_ZONES);
287 for ( i = 0; i < nr_pages; i++ )
288 free_heap_pages(zone, pg+i, 0);
289 }
292 /* Allocate 2^@order contiguous pages. */
293 struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order)
294 {
295 int i;
296 struct page_info *pg;
298 ASSERT(zone < NR_ZONES);
300 if ( unlikely(order > MAX_ORDER) )
301 return NULL;
303 spin_lock(&heap_lock);
305 /* Find smallest order which can satisfy the request. */
306 for ( i = order; i <= MAX_ORDER; i++ )
307 if ( !list_empty(&heap[zone][i]) )
308 goto found;
310 /* No suitable memory blocks. Fail the request. */
311 spin_unlock(&heap_lock);
312 return NULL;
314 found:
315 pg = list_entry(heap[zone][i].next, struct page_info, list);
316 list_del(&pg->list);
318 /* We may have to halve the chunk a number of times. */
319 while ( i != order )
320 {
321 PFN_ORDER(pg) = --i;
322 list_add_tail(&pg->list, &heap[zone][i]);
323 pg += 1 << i;
324 }
326 map_alloc(page_to_mfn(pg), 1 << order);
327 avail[zone] -= 1 << order;
329 spin_unlock(&heap_lock);
331 return pg;
332 }
335 /* Free 2^@order set of pages. */
336 void free_heap_pages(
337 unsigned int zone, struct page_info *pg, unsigned int order)
338 {
339 unsigned long mask;
341 ASSERT(zone < NR_ZONES);
342 ASSERT(order <= MAX_ORDER);
344 spin_lock(&heap_lock);
346 map_free(page_to_mfn(pg), 1 << order);
347 avail[zone] += 1 << order;
349 /* Merge chunks as far as possible. */
350 while ( order < MAX_ORDER )
351 {
352 mask = 1 << order;
354 if ( (page_to_mfn(pg) & mask) )
355 {
356 /* Merge with predecessor block? */
357 if ( allocated_in_map(page_to_mfn(pg)-mask) ||
358 (PFN_ORDER(pg-mask) != order) )
359 break;
360 list_del(&(pg-mask)->list);
361 pg -= mask;
362 }
363 else
364 {
365 /* Merge with successor block? */
366 if ( allocated_in_map(page_to_mfn(pg)+mask) ||
367 (PFN_ORDER(pg+mask) != order) )
368 break;
369 list_del(&(pg+mask)->list);
370 }
372 order++;
373 }
375 PFN_ORDER(pg) = order;
376 list_add_tail(&pg->list, &heap[zone][order]);
378 spin_unlock(&heap_lock);
379 }
382 /*
383 * Scrub all unallocated pages in all heap zones. This function is more
384 * convoluted than appears necessary because we do not want to continuously
385 * hold the lock or disable interrupts while scrubbing very large memory areas.
386 */
387 void scrub_heap_pages(void)
388 {
389 void *p;
390 unsigned long pfn;
392 printk("Scrubbing Free RAM: ");
394 for ( pfn = 0; pfn < max_page; pfn++ )
395 {
396 /* Every 100MB, print a progress dot. */
397 if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 )
398 printk(".");
400 process_pending_timers();
402 /* Quick lock-free check. */
403 if ( allocated_in_map(pfn) )
404 continue;
406 spin_lock_irq(&heap_lock);
408 /* Re-check page status with lock held. */
409 if ( !allocated_in_map(pfn) )
410 {
411 if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) )
412 {
413 p = page_to_virt(mfn_to_page(pfn));
414 memguard_unguard_range(p, PAGE_SIZE);
415 clear_page(p);
416 memguard_guard_range(p, PAGE_SIZE);
417 }
418 else
419 {
420 p = map_domain_page(pfn);
421 clear_page(p);
422 unmap_domain_page(p);
423 }
424 }
426 spin_unlock_irq(&heap_lock);
427 }
429 printk("done.\n");
430 }
434 /*************************
435 * XEN-HEAP SUB-ALLOCATOR
436 */
438 void init_xenheap_pages(paddr_t ps, paddr_t pe)
439 {
440 unsigned long flags;
442 ps = round_pgup(ps);
443 pe = round_pgdown(pe);
444 if ( pe <= ps )
445 return;
447 memguard_guard_range(maddr_to_virt(ps), pe - ps);
449 /*
450 * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
451 * prevent merging of power-of-two blocks across the zone boundary.
452 */
453 if ( !IS_XEN_HEAP_FRAME(maddr_to_page(pe)) )
454 pe -= PAGE_SIZE;
456 local_irq_save(flags);
457 init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
458 local_irq_restore(flags);
459 }
462 void *alloc_xenheap_pages(unsigned int order)
463 {
464 unsigned long flags;
465 struct page_info *pg;
466 int i;
468 local_irq_save(flags);
469 pg = alloc_heap_pages(MEMZONE_XEN, order);
470 local_irq_restore(flags);
472 if ( unlikely(pg == NULL) )
473 goto no_memory;
475 memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
477 for ( i = 0; i < (1 << order); i++ )
478 {
479 pg[i].count_info = 0;
480 pg[i].u.inuse._domain = 0;
481 pg[i].u.inuse.type_info = 0;
482 }
484 return page_to_virt(pg);
486 no_memory:
487 printk("Cannot handle page request order %d!\n", order);
488 return NULL;
489 }
492 void free_xenheap_pages(void *v, unsigned int order)
493 {
494 unsigned long flags;
496 if ( v == NULL )
497 return;
499 memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
501 local_irq_save(flags);
502 free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
503 local_irq_restore(flags);
504 }
508 /*************************
509 * DOMAIN-HEAP SUB-ALLOCATOR
510 */
512 void init_domheap_pages(paddr_t ps, paddr_t pe)
513 {
514 unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
516 ASSERT(!in_irq());
518 s_tot = round_pgup(ps) >> PAGE_SHIFT;
519 e_tot = round_pgdown(pe) >> PAGE_SHIFT;
521 s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
522 e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
523 if ( s_dma < e_dma )
524 init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma);
526 s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
527 e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
528 if ( s_nrm < e_nrm )
529 init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm);
530 }
533 int assign_pages(
534 struct domain *d,
535 struct page_info *pg,
536 unsigned int order,
537 unsigned int memflags)
538 {
539 unsigned long i;
541 spin_lock(&d->page_alloc_lock);
543 if ( unlikely(test_bit(_DOMF_dying, &d->domain_flags)) )
544 {
545 DPRINTK("Cannot assign page to domain%d -- dying.\n", d->domain_id);
546 goto fail;
547 }
549 if ( !(memflags & MEMF_no_refcount) )
550 {
551 if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
552 {
553 DPRINTK("Over-allocation for domain %u: %u > %u\n",
554 d->domain_id, d->tot_pages + (1 << order), d->max_pages);
555 goto fail;
556 }
558 if ( unlikely(d->tot_pages == 0) )
559 get_knownalive_domain(d);
561 d->tot_pages += 1 << order;
562 }
564 for ( i = 0; i < (1 << order); i++ )
565 {
566 ASSERT(page_get_owner(&pg[i]) == NULL);
567 ASSERT((pg[i].count_info & ~(PGC_allocated | 1)) == 0);
568 page_set_owner(&pg[i], d);
569 wmb(); /* Domain pointer must be visible before updating refcnt. */
570 pg[i].count_info = PGC_allocated | 1;
571 list_add_tail(&pg[i].list, &d->page_list);
572 }
574 spin_unlock(&d->page_alloc_lock);
575 return 0;
577 fail:
578 spin_unlock(&d->page_alloc_lock);
579 return -1;
580 }
583 struct page_info *alloc_domheap_pages(
584 struct domain *d, unsigned int order, unsigned int memflags)
585 {
586 struct page_info *pg = NULL;
587 cpumask_t mask;
588 unsigned long i;
590 ASSERT(!in_irq());
592 if ( !(memflags & MEMF_dma) )
593 {
594 pg = alloc_heap_pages(MEMZONE_DOM, order);
595 /* Failure? Then check if we can fall back to the DMA pool. */
596 if ( unlikely(pg == NULL) &&
597 ((order > MAX_ORDER) ||
598 (avail[MEMZONE_DMADOM] <
599 (lowmem_emergency_pool_pages + (1UL << order)))) )
600 return NULL;
601 }
603 if ( pg == NULL )
604 if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
605 return NULL;
607 mask = pg->u.free.cpumask;
608 tlbflush_filter(mask, pg->tlbflush_timestamp);
610 pg->count_info = 0;
611 pg->u.inuse._domain = 0;
612 pg->u.inuse.type_info = 0;
614 for ( i = 1; i < (1 << order); i++ )
615 {
616 /* Add in any extra CPUs that need flushing because of this page. */
617 cpumask_t extra_cpus_mask;
618 cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
619 tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
620 cpus_or(mask, mask, extra_cpus_mask);
622 pg[i].count_info = 0;
623 pg[i].u.inuse._domain = 0;
624 pg[i].u.inuse.type_info = 0;
625 page_set_owner(&pg[i], NULL);
626 }
628 if ( unlikely(!cpus_empty(mask)) )
629 {
630 perfc_incrc(need_flush_tlb_flush);
631 flush_tlb_mask(mask);
632 }
634 if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
635 {
636 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
637 return NULL;
638 }
640 return pg;
641 }
644 void free_domheap_pages(struct page_info *pg, unsigned int order)
645 {
646 int i, drop_dom_ref;
647 struct domain *d = page_get_owner(pg);
649 ASSERT(!in_irq());
651 if ( unlikely(IS_XEN_HEAP_FRAME(pg)) )
652 {
653 /* NB. May recursively lock from relinquish_memory(). */
654 spin_lock_recursive(&d->page_alloc_lock);
656 for ( i = 0; i < (1 << order); i++ )
657 list_del(&pg[i].list);
659 d->xenheap_pages -= 1 << order;
660 drop_dom_ref = (d->xenheap_pages == 0);
662 spin_unlock_recursive(&d->page_alloc_lock);
663 }
664 else if ( likely(d != NULL) )
665 {
666 /* NB. May recursively lock from relinquish_memory(). */
667 spin_lock_recursive(&d->page_alloc_lock);
669 for ( i = 0; i < (1 << order); i++ )
670 {
671 shadow_drop_references(d, &pg[i]);
672 ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
673 pg[i].tlbflush_timestamp = tlbflush_current_time();
674 pg[i].u.free.cpumask = d->domain_dirty_cpumask;
675 list_del(&pg[i].list);
676 }
678 d->tot_pages -= 1 << order;
679 drop_dom_ref = (d->tot_pages == 0);
681 spin_unlock_recursive(&d->page_alloc_lock);
683 if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
684 {
685 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
686 }
687 else
688 {
689 /*
690 * Normally we expect a domain to clear pages before freeing them,
691 * if it cares about the secrecy of their contents. However, after
692 * a domain has died we assume responsibility for erasure.
693 */
694 for ( i = 0; i < (1 << order); i++ )
695 {
696 spin_lock(&page_scrub_lock);
697 list_add(&pg[i].list, &page_scrub_list);
698 scrub_pages++;
699 spin_unlock(&page_scrub_lock);
700 }
701 }
702 }
703 else
704 {
705 /* Freeing anonymous domain-heap pages. */
706 for ( i = 0; i < (1 << order); i++ )
707 pg[i].u.free.cpumask = CPU_MASK_NONE;
708 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
709 drop_dom_ref = 0;
710 }
712 if ( drop_dom_ref )
713 put_domain(d);
714 }
717 unsigned long avail_domheap_pages(void)
718 {
719 unsigned long avail_nrm, avail_dma;
721 avail_nrm = avail[MEMZONE_DOM];
723 avail_dma = avail[MEMZONE_DMADOM];
724 if ( avail_dma > lowmem_emergency_pool_pages )
725 avail_dma -= lowmem_emergency_pool_pages;
726 else
727 avail_dma = 0;
729 return avail_nrm + avail_dma;
730 }
733 static void pagealloc_keyhandler(unsigned char key)
734 {
735 printk("Physical memory information:\n");
736 printk(" Xen heap: %lukB free\n"
737 " DMA heap: %lukB free\n"
738 " Dom heap: %lukB free\n",
739 avail[MEMZONE_XEN]<<(PAGE_SHIFT-10),
740 avail[MEMZONE_DMADOM]<<(PAGE_SHIFT-10),
741 avail[MEMZONE_DOM]<<(PAGE_SHIFT-10));
742 }
745 static __init int pagealloc_keyhandler_init(void)
746 {
747 register_keyhandler('m', pagealloc_keyhandler, "memory info");
748 return 0;
749 }
750 __initcall(pagealloc_keyhandler_init);
754 /*************************
755 * PAGE SCRUBBING
756 */
758 static void page_scrub_softirq(void)
759 {
760 struct list_head *ent;
761 struct page_info *pg;
762 void *p;
763 int i;
764 s_time_t start = NOW();
766 /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
767 do {
768 spin_lock(&page_scrub_lock);
770 if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
771 {
772 spin_unlock(&page_scrub_lock);
773 return;
774 }
776 /* Peel up to 16 pages from the list. */
777 for ( i = 0; i < 16; i++ )
778 {
779 if ( ent->next == &page_scrub_list )
780 break;
781 ent = ent->next;
782 }
784 /* Remove peeled pages from the list. */
785 ent->next->prev = &page_scrub_list;
786 page_scrub_list.next = ent->next;
787 scrub_pages -= (i+1);
789 spin_unlock(&page_scrub_lock);
791 /* Working backwards, scrub each page in turn. */
792 while ( ent != &page_scrub_list )
793 {
794 pg = list_entry(ent, struct page_info, list);
795 ent = ent->prev;
796 p = map_domain_page(page_to_mfn(pg));
797 clear_page(p);
798 unmap_domain_page(p);
799 free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
800 }
801 } while ( (NOW() - start) < MILLISECS(1) );
802 }
804 unsigned long avail_scrub_pages(void)
805 {
806 return scrub_pages;
807 }
809 static __init int page_scrub_init(void)
810 {
811 open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
812 return 0;
813 }
814 __initcall(page_scrub_init);
816 /*
817 * Local variables:
818 * mode: C
819 * c-set-style: "BSD"
820 * c-basic-offset: 4
821 * tab-width: 4
822 * indent-tabs-mode: nil
823 * End:
824 */