ia64/xen-unstable

annotate xen/include/xen/mm.h @ 19259:507b264f0a21

vtd: boolean boot parameter to allow inclusive mapping of all memory below 4GB

Signed-off-by: Ross Philipson <ross.philipson@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 02 11:23:23 2009 +0000 (2009-03-02)
parents 97ca3400d17c
children dd489125a2e7
rev   line source
kaf24@8726 1 /******************************************************************************
kaf24@8726 2 * include/xen/mm.h
kaf24@8726 3 *
kaf24@8726 4 * Definitions for memory pages, frame numbers, addresses, allocations, etc.
kaf24@8726 5 *
kaf24@8726 6 * Note that Xen must handle several different physical 'address spaces' and
kaf24@8726 7 * there is a consistent terminology for these:
kaf24@8726 8 *
kaf24@8726 9 * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
kaf24@8726 10 * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
kaf24@8726 11 * 3. mfn/maddr: A real machine frame number or address.
kaf24@8726 12 * 4. pfn/paddr: Used in 'polymorphic' functions that work across all
kaf24@8726 13 * address spaces, depending on context. See the pagetable
kaf24@8726 14 * conversion macros in asm-x86/page.h for examples.
kaf24@8726 15 * Also 'paddr_t' is big enough to store any physical address.
kaf24@8726 16 *
kaf24@8726 17 * This scheme provides consistent function and variable names even when
kaf24@8726 18 * different guests are running in different memory-management modes.
kaf24@8726 19 * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
kaf24@8726 20 * will have gpfn == gmfn and gmfn != mfn.
kaf24@8726 21 * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
kaf24@8726 22 * 3. A paravirtualised guest with no pseudophysical overlay will have
kaf24@8726 23 * gpfn == gpmfn == mfn.
kaf24@8726 24 *
kaf24@8726 25 * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
kaf24@8726 26 */
kaf24@1210 27
kaf24@1211 28 #ifndef __XEN_MM_H__
kaf24@1211 29 #define __XEN_MM_H__
kaf24@1210 30
kaf24@4267 31 #include <xen/config.h>
kaf24@5398 32 #include <xen/types.h>
kaf24@4267 33 #include <xen/list.h>
kaf24@4267 34 #include <xen/spinlock.h>
kaf24@4267 35
kaf24@1941 36 struct domain;
kaf24@8726 37 struct page_info;
kaf24@1936 38
kaf24@3354 39 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
kaf24@8726 40 paddr_t init_boot_allocator(paddr_t bitmap_start);
kaf24@8726 41 void init_boot_pages(paddr_t ps, paddr_t pe);
kfraser@14083 42 unsigned long alloc_boot_pages(
kfraser@14083 43 unsigned long nr_pfns, unsigned long pfn_align);
kaf24@3354 44 void end_boot_allocator(void);
kaf24@3354 45
kaf24@2806 46 /* Xen suballocator. These functions are interrupt-safe. */
kaf24@8726 47 void init_xenheap_pages(paddr_t ps, paddr_t pe);
keir@19107 48 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
kaf24@5398 49 void free_xenheap_pages(void *v, unsigned int order);
keir@19107 50 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
kaf24@5398 51 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
kaf24@1749 52
kaf24@2806 53 /* Domain suballocator. These functions are *not* interrupt-safe.*/
kaf24@8726 54 void init_domheap_pages(paddr_t ps, paddr_t pe);
kaf24@8726 55 struct page_info *alloc_domheap_pages(
kfraser@10418 56 struct domain *d, unsigned int order, unsigned int memflags);
kaf24@8726 57 void free_domheap_pages(struct page_info *pg, unsigned int order);
kfraser@15580 58 unsigned long avail_domheap_pages_region(
kfraser@15580 59 unsigned int node, unsigned int min_width, unsigned int max_width);
kaf24@1936 60 unsigned long avail_domheap_pages(void);
keir@17385 61 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
kaf24@5398 62 #define free_domheap_page(p) (free_domheap_pages(p,0))
kaf24@1210 63
kfraser@14098 64 void scrub_heap_pages(void);
kfraser@14098 65
kfraser@10418 66 int assign_pages(
kfraser@10418 67 struct domain *d,
kfraser@10418 68 struct page_info *pg,
kfraser@10418 69 unsigned int order,
kfraser@10418 70 unsigned int memflags);
kfraser@10418 71
kfraser@10418 72 /* memflags: */
kfraser@14103 73 #define _MEMF_no_refcount 0
kfraser@10418 74 #define MEMF_no_refcount (1U<<_MEMF_no_refcount)
keir@18975 75 #define _MEMF_populate_on_demand 1
keir@18975 76 #define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
keir@17385 77 #define _MEMF_node 8
keir@17385 78 #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
kfraser@14103 79 #define _MEMF_bits 24
kfraser@14103 80 #define MEMF_bits(n) ((n)<<_MEMF_bits)
kaf24@5929 81
kaf24@10340 82 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
kaf24@10340 83 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
kaf24@10340 84 #else
kaf24@10341 85 #define MAX_ORDER 20 /* 2^20 contiguous pages */
kaf24@10340 86 #endif
kaf24@9456 87
keir@19134 88 #define page_list_entry list_head
keir@19134 89
keir@19134 90 #include <asm/mm.h>
keir@19134 91
keir@19134 92 #ifndef page_list_entry
keir@19134 93 struct page_list_head
keir@19134 94 {
keir@19134 95 struct page_info *next, *tail;
keir@19134 96 };
keir@19134 97 /* These must only have instances in struct page_info. */
keir@19134 98 # define page_list_entry
keir@19134 99
keir@19134 100 # define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
keir@19134 101 # define PAGE_LIST_HEAD(name) \
keir@19134 102 struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
keir@19134 103 # define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
keir@19134 104 # define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = ~0)
keir@19134 105
keir@19134 106 static inline int
keir@19134 107 page_list_empty(const struct page_list_head *head)
keir@19134 108 {
keir@19134 109 return !head->next;
keir@19134 110 }
keir@19134 111 static inline struct page_info *
keir@19134 112 page_list_first(const struct page_list_head *head)
keir@19134 113 {
keir@19134 114 return head->next;
keir@19134 115 }
keir@19134 116 static inline struct page_info *
keir@19134 117 page_list_next(const struct page_info *page,
keir@19134 118 const struct page_list_head *head)
keir@19134 119 {
keir@19134 120 return page != head->tail ? mfn_to_page(page->list.next) : NULL;
keir@19134 121 }
keir@19134 122 static inline struct page_info *
keir@19134 123 page_list_prev(const struct page_info *page,
keir@19134 124 const struct page_list_head *head)
keir@19134 125 {
keir@19134 126 return page != head->next ? mfn_to_page(page->list.prev) : NULL;
keir@19134 127 }
keir@19134 128 static inline void
keir@19134 129 page_list_add(struct page_info *page, struct page_list_head *head)
keir@19134 130 {
keir@19134 131 if ( head->next )
keir@19134 132 {
keir@19134 133 page->list.next = page_to_mfn(head->next);
keir@19134 134 head->next->list.prev = page_to_mfn(page);
keir@19134 135 }
keir@19134 136 else
keir@19134 137 {
keir@19134 138 head->tail = page;
keir@19134 139 page->list.next = ~0;
keir@19134 140 }
keir@19134 141 page->list.prev = ~0;
keir@19134 142 head->next = page;
keir@19134 143 }
keir@19134 144 static inline void
keir@19134 145 page_list_add_tail(struct page_info *page, struct page_list_head *head)
keir@19134 146 {
keir@19134 147 page->list.next = ~0;
keir@19134 148 if ( head->next )
keir@19134 149 {
keir@19134 150 page->list.prev = page_to_mfn(head->tail);
keir@19134 151 head->tail->list.next = page_to_mfn(page);
keir@19134 152 }
keir@19134 153 else
keir@19134 154 {
keir@19134 155 page->list.prev = ~0;
keir@19134 156 head->next = page;
keir@19134 157 }
keir@19134 158 head->tail = page;
keir@19134 159 }
keir@19134 160 static inline bool_t
keir@19134 161 __page_list_del_head(struct page_info *page, struct page_list_head *head,
keir@19134 162 struct page_info *next, struct page_info *prev)
keir@19134 163 {
keir@19134 164 if ( head->next == page )
keir@19134 165 {
keir@19134 166 if ( head->tail != page )
keir@19134 167 {
keir@19134 168 next->list.prev = ~0;
keir@19134 169 head->next = next;
keir@19134 170 }
keir@19134 171 else
keir@19134 172 head->tail = head->next = NULL;
keir@19134 173 return 1;
keir@19134 174 }
keir@19134 175
keir@19134 176 if ( head->tail == page )
keir@19134 177 {
keir@19134 178 prev->list.next = ~0;
keir@19134 179 head->tail = prev;
keir@19134 180 return 1;
keir@19134 181 }
keir@19134 182
keir@19134 183 return 0;
keir@19134 184 }
keir@19134 185 static inline void
keir@19134 186 page_list_del(struct page_info *page, struct page_list_head *head)
keir@19134 187 {
keir@19134 188 struct page_info *next = mfn_to_page(page->list.next);
keir@19134 189 struct page_info *prev = mfn_to_page(page->list.prev);
keir@19134 190
keir@19134 191 if ( !__page_list_del_head(page, head, next, prev) )
keir@19134 192 {
keir@19134 193 next->list.prev = page->list.prev;
keir@19134 194 prev->list.next = page->list.next;
keir@19134 195 }
keir@19134 196 }
keir@19134 197 static inline void
keir@19134 198 page_list_del2(struct page_info *page, struct page_list_head *head1,
keir@19134 199 struct page_list_head *head2)
keir@19134 200 {
keir@19134 201 struct page_info *next = mfn_to_page(page->list.next);
keir@19134 202 struct page_info *prev = mfn_to_page(page->list.prev);
keir@19134 203
keir@19134 204 if ( !__page_list_del_head(page, head1, next, prev) &&
keir@19134 205 !__page_list_del_head(page, head2, next, prev) )
keir@19134 206 {
keir@19134 207 next->list.prev = page->list.prev;
keir@19134 208 prev->list.next = page->list.next;
keir@19134 209 }
keir@19134 210 }
keir@19134 211 static inline struct page_info *
keir@19134 212 page_list_remove_head(struct page_list_head *head)
keir@19134 213 {
keir@19134 214 struct page_info *page = head->next;
keir@19134 215
keir@19134 216 if ( page )
keir@19134 217 page_list_del(page, head);
keir@19134 218
keir@19134 219 return page;
keir@19134 220 }
keir@19134 221
keir@19134 222 #define page_list_for_each(pos, head) \
keir@19134 223 for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
keir@19134 224 #define page_list_for_each_safe(pos, tmp, head) \
keir@19134 225 for ( pos = (head)->next; \
keir@19134 226 pos ? (tmp = page_list_next(pos, head), 1) : 0; \
keir@19134 227 pos = tmp )
keir@19134 228 #define page_list_for_each_safe_reverse(pos, tmp, head) \
keir@19134 229 for ( pos = (head)->tail; \
keir@19134 230 pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
keir@19134 231 pos = tmp )
keir@19134 232 #else
keir@19134 233 # define page_list_head list_head
keir@19134 234 # define PAGE_LIST_HEAD_INIT LIST_HEAD_INIT
keir@19134 235 # define PAGE_LIST_HEAD LIST_HEAD
keir@19134 236 # define INIT_PAGE_LIST_HEAD INIT_LIST_HEAD
keir@19134 237 # define INIT_PAGE_LIST_ENTRY INIT_LIST_HEAD
keir@19134 238 # define page_list_empty list_empty
keir@19134 239 # define page_list_first(hd) list_entry((hd)->next, \
keir@19134 240 struct page_info, list)
keir@19134 241 # define page_list_next(pg, hd) list_entry((pg)->list.next, \
keir@19134 242 struct page_info, list)
keir@19134 243 # define page_list_add(pg, hd) list_add(&(pg)->list, hd)
keir@19134 244 # define page_list_add_tail(pg, hd) list_add_tail(&(pg)->list, hd)
keir@19134 245 # define page_list_del(pg, hd) list_del(&(pg)->list)
keir@19134 246 # define page_list_del2(pg, hd1, hd2) list_del(&(pg)->list)
keir@19134 247 # define page_list_remove_head(hd) (!page_list_empty(hd) ? \
keir@19134 248 ({ \
keir@19134 249 struct page_info *__pg = page_list_first(hd); \
keir@19134 250 list_del(&__pg->list); \
keir@19134 251 __pg; \
keir@19134 252 }) : NULL)
keir@19134 253 # define page_list_for_each(pos, head) list_for_each_entry(pos, head, list)
keir@19134 254 # define page_list_for_each_safe(pos, tmp, head) \
keir@19134 255 list_for_each_entry_safe(pos, tmp, head, list)
keir@19134 256 # define page_list_for_each_safe_reverse(pos, tmp, head) \
keir@19134 257 list_for_each_entry_safe_reverse(pos, tmp, head, list)
keir@19134 258 #endif
keir@19134 259
kaf24@4267 260 /* Automatic page scrubbing for dead domains. */
keir@19134 261 extern struct page_list_head page_scrub_list;
keir@19134 262 #define page_scrub_schedule_work() \
keir@19134 263 do { \
keir@19134 264 if ( !page_list_empty(&page_scrub_list) ) \
keir@19134 265 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
kaf24@4267 266 } while ( 0 )
kfraser@14340 267 #define page_scrub_kick() \
kfraser@14340 268 do { \
keir@19134 269 if ( !page_list_empty(&page_scrub_list) ) \
kfraser@14340 270 cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
kfraser@14340 271 } while ( 0 )
kaf24@10541 272 unsigned long avail_scrub_pages(void);
kaf24@4267 273
cl349@9211 274 int guest_remove_page(struct domain *d, unsigned long gmfn);
cl349@9211 275
keir@19259 276 #define RAM_TYPE_CONVENTIONAL 0x00000001
keir@19259 277 #define RAM_TYPE_RESERVED 0x00000002
keir@19259 278 #define RAM_TYPE_UNUSABLE 0x00000004
keir@19259 279 #define RAM_TYPE_ACPI 0x00000008
keir@19259 280 /* Returns TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
keir@19259 281 int page_is_ram_type(unsigned long mfn, unsigned long mem_type);
kfraser@11177 282
keir@17853 283 extern unsigned long *alloc_bitmap; /* for vmcoreinfo */
keir@17853 284
kaf24@1211 285 #endif /* __XEN_MM_H__ */