ia64/xen-unstable

annotate xen/include/xen/mm.h @ 4267:3868fc815598

bitkeeper revision 1.1159.272.8 (4240716dixo5jLBihZPvbRrP21dn4g)

Schedule page scrubbing for dead domains off the per-cpu periodic
ticker. We take 10% of busy cpus and all of idle cpu time.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Mar 22 19:26:37 2005 +0000 (2005-03-22)
parents 04f50d583813
children 4a49af0cc9d9 2fa2e30bcbde
rev   line source
kaf24@1210 1
kaf24@1211 2 #ifndef __XEN_MM_H__
kaf24@1211 3 #define __XEN_MM_H__
kaf24@1210 4
kaf24@4267 5 #include <xen/config.h>
kaf24@4267 6 #include <xen/list.h>
kaf24@4267 7 #include <xen/spinlock.h>
kaf24@4267 8
kaf24@1941 9 struct domain;
kaf24@1941 10 struct pfn_info;
kaf24@1936 11
kaf24@3354 12 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
kaf24@3354 13 unsigned long init_boot_allocator(unsigned long bitmap_start);
kaf24@3354 14 void init_boot_pages(unsigned long ps, unsigned long pe);
kaf24@3354 15 unsigned long alloc_boot_pages(unsigned long size, unsigned long align);
kaf24@3354 16 void end_boot_allocator(void);
kaf24@3354 17
kaf24@2806 18 /* Generic allocator. These functions are *not* interrupt-safe. */
kaf24@3461 19 void init_heap_pages(
kaf24@3461 20 unsigned int zone, struct pfn_info *pg, unsigned long nr_pages);
kaf24@3461 21 struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order);
kaf24@3461 22 void free_heap_pages(
kaf24@3461 23 unsigned int zone, struct pfn_info *pg, unsigned int order);
kaf24@2772 24 void scrub_heap_pages(void);
kaf24@1936 25
kaf24@2806 26 /* Xen suballocator. These functions are interrupt-safe. */
kaf24@1936 27 void init_xenheap_pages(unsigned long ps, unsigned long pe);
kaf24@3461 28 unsigned long alloc_xenheap_pages(unsigned int order);
kaf24@3461 29 void free_xenheap_pages(unsigned long p, unsigned int order);
kaf24@1920 30 #define alloc_xenheap_page() (alloc_xenheap_pages(0))
kaf24@1920 31 #define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
kaf24@1749 32
kaf24@2806 33 /* Domain suballocator. These functions are *not* interrupt-safe.*/
kaf24@1936 34 void init_domheap_pages(unsigned long ps, unsigned long pe);
kaf24@3461 35 struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
kaf24@3461 36 void free_domheap_pages(struct pfn_info *pg, unsigned int order);
kaf24@1936 37 unsigned long avail_domheap_pages(void);
kaf24@1941 38 #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
kaf24@1936 39 #define free_domheap_page(_p) (free_domheap_pages(_p,0))
kaf24@1210 40
kaf24@4267 41 /* Automatic page scrubbing for dead domains. */
kaf24@4267 42 extern spinlock_t page_scrub_lock;
kaf24@4267 43 extern struct list_head page_scrub_list;
kaf24@4267 44 #define page_scrub_schedule_work() \
kaf24@4267 45 do { \
kaf24@4267 46 if ( !list_empty(&page_scrub_list) ) \
kaf24@4267 47 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
kaf24@4267 48 } while ( 0 )
kaf24@4267 49
kaf24@1941 50 #include <asm/mm.h>
kaf24@1941 51
kaf24@1211 52 #endif /* __XEN_MM_H__ */