ia64/xen-unstable

annotate xen/include/xen/mm.h @ 5398:fcbdfa6fe74d

bitkeeper revision 1.1699.1.1 (42a85f6955KSFCuD5KSRtCwU-dzakQ)

Clean up the page allocator interface a little. In particular
physical addresses are now passed as physaddr_t rather than unsigned
long (required for 32-bit pae mode).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jun 09 15:25:29 2005 +0000 (2005-06-09)
parents 79fea09c3b44
children 849b58da37b7
rev   line source
kaf24@1210 1
kaf24@1211 2 #ifndef __XEN_MM_H__
kaf24@1211 3 #define __XEN_MM_H__
kaf24@1210 4
kaf24@4267 5 #include <xen/config.h>
kaf24@5398 6 #include <xen/types.h>
kaf24@4267 7 #include <xen/list.h>
kaf24@4267 8 #include <xen/spinlock.h>
kaf24@4267 9
kaf24@1941 10 struct domain;
kaf24@1941 11 struct pfn_info;
kaf24@1936 12
kaf24@3354 13 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
kaf24@5398 14 physaddr_t init_boot_allocator(physaddr_t bitmap_start);
kaf24@5398 15 void init_boot_pages(physaddr_t ps, physaddr_t pe);
kaf24@5398 16 unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
kaf24@3354 17 void end_boot_allocator(void);
kaf24@3354 18
kaf24@2806 19 /* Generic allocator. These functions are *not* interrupt-safe. */
kaf24@3461 20 void init_heap_pages(
kaf24@3461 21 unsigned int zone, struct pfn_info *pg, unsigned long nr_pages);
kaf24@3461 22 struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order);
kaf24@3461 23 void free_heap_pages(
kaf24@3461 24 unsigned int zone, struct pfn_info *pg, unsigned int order);
kaf24@2772 25 void scrub_heap_pages(void);
kaf24@1936 26
kaf24@2806 27 /* Xen suballocator. These functions are interrupt-safe. */
kaf24@5398 28 void init_xenheap_pages(physaddr_t ps, physaddr_t pe);
kaf24@5398 29 void *alloc_xenheap_pages(unsigned int order);
kaf24@5398 30 void free_xenheap_pages(void *v, unsigned int order);
kaf24@1920 31 #define alloc_xenheap_page() (alloc_xenheap_pages(0))
kaf24@5398 32 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
kaf24@1749 33
kaf24@2806 34 /* Domain suballocator. These functions are *not* interrupt-safe.*/
kaf24@5398 35 void init_domheap_pages(physaddr_t ps, physaddr_t pe);
kaf24@3461 36 struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
kaf24@3461 37 void free_domheap_pages(struct pfn_info *pg, unsigned int order);
kaf24@1936 38 unsigned long avail_domheap_pages(void);
kaf24@5398 39 #define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
kaf24@5398 40 #define free_domheap_page(p) (free_domheap_pages(p,0))
kaf24@1210 41
kaf24@4267 42 /* Automatic page scrubbing for dead domains. */
kaf24@4267 43 extern struct list_head page_scrub_list;
kaf24@4267 44 #define page_scrub_schedule_work() \
kaf24@4267 45 do { \
kaf24@4267 46 if ( !list_empty(&page_scrub_list) ) \
kaf24@4267 47 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
kaf24@4267 48 } while ( 0 )
kaf24@4267 49
kaf24@1941 50 #include <asm/mm.h>
kaf24@1941 51
kaf24@5256 52 #ifndef sync_pagetable_state
kaf24@5256 53 #define sync_pagetable_state(d) ((void)0)
kaf24@5256 54 #endif
kaf24@5256 55
kaf24@1211 56 #endif /* __XEN_MM_H__ */