ia64/xen-unstable

view xen/include/xen/mm.h @ 19107:696351cde9a4

Allow memflags to be specified to alloc_xenheap_pages().

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 28 16:58:41 2009 +0000 (2009-01-28)
parents 2f993fde1dc6
children 5848b49b74fc
line source
1 /******************************************************************************
2 * include/xen/mm.h
3 *
4 * Definitions for memory pages, frame numbers, addresses, allocations, etc.
5 *
6 * Note that Xen must handle several different physical 'address spaces' and
7 * there is a consistent terminology for these:
8 *
9 * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
10 * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
11 * 3. mfn/maddr: A real machine frame number or address.
12 * 4. pfn/paddr: Used in 'polymorphic' functions that work across all
13 * address spaces, depending on context. See the pagetable
14 * conversion macros in asm-x86/page.h for examples.
15 * Also 'paddr_t' is big enough to store any physical address.
16 *
17 * This scheme provides consistent function and variable names even when
18 * different guests are running in different memory-management modes.
19 * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
20 * will have gpfn == gmfn and gmfn != mfn.
21 * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
22 * 3. A paravirtualised guest with no pseudophysical overlay will have
23 * gpfn == gpmfn == mfn.
24 *
25 * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
26 */
28 #ifndef __XEN_MM_H__
29 #define __XEN_MM_H__
31 #include <xen/config.h>
32 #include <xen/types.h>
33 #include <xen/list.h>
34 #include <xen/spinlock.h>
36 struct domain;
37 struct page_info;
39 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
40 paddr_t init_boot_allocator(paddr_t bitmap_start);
41 void init_boot_pages(paddr_t ps, paddr_t pe);
42 unsigned long alloc_boot_pages(
43 unsigned long nr_pfns, unsigned long pfn_align);
44 void end_boot_allocator(void);
46 /* Xen suballocator. These functions are interrupt-safe. */
47 void init_xenheap_pages(paddr_t ps, paddr_t pe);
48 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
49 void free_xenheap_pages(void *v, unsigned int order);
50 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
51 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
53 /* Domain suballocator. These functions are *not* interrupt-safe.*/
54 void init_domheap_pages(paddr_t ps, paddr_t pe);
55 struct page_info *alloc_domheap_pages(
56 struct domain *d, unsigned int order, unsigned int memflags);
57 void free_domheap_pages(struct page_info *pg, unsigned int order);
58 unsigned long avail_domheap_pages_region(
59 unsigned int node, unsigned int min_width, unsigned int max_width);
60 unsigned long avail_domheap_pages(void);
61 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
62 #define free_domheap_page(p) (free_domheap_pages(p,0))
64 void scrub_heap_pages(void);
66 int assign_pages(
67 struct domain *d,
68 struct page_info *pg,
69 unsigned int order,
70 unsigned int memflags);
72 /* memflags: */
73 #define _MEMF_no_refcount 0
74 #define MEMF_no_refcount (1U<<_MEMF_no_refcount)
75 #define _MEMF_populate_on_demand 1
76 #define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
77 #define _MEMF_node 8
78 #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
79 #define _MEMF_bits 24
80 #define MEMF_bits(n) ((n)<<_MEMF_bits)
82 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
83 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
84 #else
85 #define MAX_ORDER 20 /* 2^20 contiguous pages */
86 #endif
88 /* Automatic page scrubbing for dead domains. */
89 extern struct list_head page_scrub_list;
90 #define page_scrub_schedule_work() \
91 do { \
92 if ( !list_empty(&page_scrub_list) ) \
93 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
94 } while ( 0 )
95 #define page_scrub_kick() \
96 do { \
97 if ( !list_empty(&page_scrub_list) ) \
98 cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
99 } while ( 0 )
100 unsigned long avail_scrub_pages(void);
102 #include <asm/mm.h>
104 int guest_remove_page(struct domain *d, unsigned long gmfn);
106 /* Returns TRUE if the whole page at @mfn is ordinary RAM. */
107 int page_is_conventional_ram(unsigned long mfn);
109 extern unsigned long *alloc_bitmap; /* for vmcoreinfo */
111 #endif /* __XEN_MM_H__ */