ia64/xen-unstable

annotate xen/include/xen/mm.h @ 19134:5848b49b74fc

x86-64: use MFNs for linking together pages on lists

Unless more than 16Tb are going to ever be supported in Xen, this will
allow reducing the linked list entries in struct page_info from 16 to
8 bytes.

This doesn't modify struct shadow_page_info, yet, so in order to meet
the constraints of that 'mirror' structure the list entry gets
artificially forced to be 16 bytes in size. That workaround will be
removed in a subsequent patch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:03:28 2009 +0000 (2009-01-30)
parents 696351cde9a4
children 97ca3400d17c
rev   line source
kaf24@8726 1 /******************************************************************************
kaf24@8726 2 * include/xen/mm.h
kaf24@8726 3 *
kaf24@8726 4 * Definitions for memory pages, frame numbers, addresses, allocations, etc.
kaf24@8726 5 *
kaf24@8726 6 * Note that Xen must handle several different physical 'address spaces' and
kaf24@8726 7 * there is a consistent terminology for these:
kaf24@8726 8 *
kaf24@8726 9 * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
kaf24@8726 10 * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
kaf24@8726 11 * 3. mfn/maddr: A real machine frame number or address.
kaf24@8726 12 * 4. pfn/paddr: Used in 'polymorphic' functions that work across all
kaf24@8726 13 * address spaces, depending on context. See the pagetable
kaf24@8726 14 * conversion macros in asm-x86/page.h for examples.
kaf24@8726 15 * Also 'paddr_t' is big enough to store any physical address.
kaf24@8726 16 *
kaf24@8726 17 * This scheme provides consistent function and variable names even when
kaf24@8726 18 * different guests are running in different memory-management modes.
kaf24@8726 19 * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
kaf24@8726 20 * will have gpfn == gmfn and gmfn != mfn.
kaf24@8726 21 * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
kaf24@8726 22 * 3. A paravirtualised guest with no pseudophysical overlay will have
kaf24@8726 23 * gpfn == gpmfn == mfn.
kaf24@8726 24 *
kaf24@8726 25 * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
kaf24@8726 26 */
kaf24@1210 27
kaf24@1211 28 #ifndef __XEN_MM_H__
kaf24@1211 29 #define __XEN_MM_H__
kaf24@1210 30
kaf24@4267 31 #include <xen/config.h>
kaf24@5398 32 #include <xen/types.h>
kaf24@4267 33 #include <xen/list.h>
kaf24@4267 34 #include <xen/spinlock.h>
kaf24@4267 35
kaf24@1941 36 struct domain;
kaf24@8726 37 struct page_info;
kaf24@1936 38
kaf24@3354 39 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
kaf24@8726 40 paddr_t init_boot_allocator(paddr_t bitmap_start);
kaf24@8726 41 void init_boot_pages(paddr_t ps, paddr_t pe);
kfraser@14083 42 unsigned long alloc_boot_pages(
kfraser@14083 43 unsigned long nr_pfns, unsigned long pfn_align);
kaf24@3354 44 void end_boot_allocator(void);
kaf24@3354 45
kaf24@2806 46 /* Xen suballocator. These functions are interrupt-safe. */
kaf24@8726 47 void init_xenheap_pages(paddr_t ps, paddr_t pe);
keir@19107 48 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
kaf24@5398 49 void free_xenheap_pages(void *v, unsigned int order);
keir@19107 50 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
kaf24@5398 51 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
kaf24@1749 52
kaf24@2806 53 /* Domain suballocator. These functions are *not* interrupt-safe.*/
kaf24@8726 54 void init_domheap_pages(paddr_t ps, paddr_t pe);
kaf24@8726 55 struct page_info *alloc_domheap_pages(
kfraser@10418 56 struct domain *d, unsigned int order, unsigned int memflags);
kaf24@8726 57 void free_domheap_pages(struct page_info *pg, unsigned int order);
kfraser@15580 58 unsigned long avail_domheap_pages_region(
kfraser@15580 59 unsigned int node, unsigned int min_width, unsigned int max_width);
kaf24@1936 60 unsigned long avail_domheap_pages(void);
keir@17385 61 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
kaf24@5398 62 #define free_domheap_page(p) (free_domheap_pages(p,0))
kaf24@1210 63
kfraser@14098 64 void scrub_heap_pages(void);
kfraser@14098 65
kfraser@10418 66 int assign_pages(
kfraser@10418 67 struct domain *d,
kfraser@10418 68 struct page_info *pg,
kfraser@10418 69 unsigned int order,
kfraser@10418 70 unsigned int memflags);
kfraser@10418 71
kfraser@10418 72 /* memflags: */
kfraser@14103 73 #define _MEMF_no_refcount 0
kfraser@10418 74 #define MEMF_no_refcount (1U<<_MEMF_no_refcount)
keir@18975 75 #define _MEMF_populate_on_demand 1
keir@18975 76 #define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
keir@17385 77 #define _MEMF_node 8
keir@17385 78 #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
kfraser@14103 79 #define _MEMF_bits 24
kfraser@14103 80 #define MEMF_bits(n) ((n)<<_MEMF_bits)
kaf24@5929 81
kaf24@10340 82 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
kaf24@10340 83 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
kaf24@10340 84 #else
kaf24@10341 85 #define MAX_ORDER 20 /* 2^20 contiguous pages */
kaf24@10340 86 #endif
kaf24@9456 87
keir@19134 88 #define page_list_entry list_head
keir@19134 89
keir@19134 90 #include <asm/mm.h>
keir@19134 91
keir@19134 92 #ifndef page_list_entry
keir@19134 93 struct page_list_head
keir@19134 94 {
keir@19134 95 struct page_info *next, *tail;
keir@19134 96 };
keir@19134 97 /* These must only have instances in struct page_info. */
keir@19134 98 # define page_list_entry
keir@19134 99
keir@19134 100 # define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
keir@19134 101 # define PAGE_LIST_HEAD(name) \
keir@19134 102 struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
keir@19134 103 # define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
keir@19134 104 # define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = ~0)
keir@19134 105
keir@19134 106 static inline int
keir@19134 107 page_list_empty(const struct page_list_head *head)
keir@19134 108 {
keir@19134 109 return !head->next;
keir@19134 110 }
keir@19134 111 static inline struct page_info *
keir@19134 112 page_list_first(const struct page_list_head *head)
keir@19134 113 {
keir@19134 114 return head->next;
keir@19134 115 }
keir@19134 116 static inline struct page_info *
keir@19134 117 page_list_next(const struct page_info *page,
keir@19134 118 const struct page_list_head *head)
keir@19134 119 {
keir@19134 120 return page != head->tail ? mfn_to_page(page->list.next) : NULL;
keir@19134 121 }
keir@19134 122 static inline struct page_info *
keir@19134 123 page_list_prev(const struct page_info *page,
keir@19134 124 const struct page_list_head *head)
keir@19134 125 {
keir@19134 126 return page != head->next ? mfn_to_page(page->list.prev) : NULL;
keir@19134 127 }
keir@19134 128 static inline int
keir@19134 129 page_list_is_eol(const struct page_info *page,
keir@19134 130 const struct page_list_head *head)
keir@19134 131 {
keir@19134 132 return !page;
keir@19134 133 }
keir@19134 134 static inline void
keir@19134 135 page_list_add(struct page_info *page, struct page_list_head *head)
keir@19134 136 {
keir@19134 137 if ( head->next )
keir@19134 138 {
keir@19134 139 page->list.next = page_to_mfn(head->next);
keir@19134 140 head->next->list.prev = page_to_mfn(page);
keir@19134 141 }
keir@19134 142 else
keir@19134 143 {
keir@19134 144 head->tail = page;
keir@19134 145 page->list.next = ~0;
keir@19134 146 }
keir@19134 147 page->list.prev = ~0;
keir@19134 148 head->next = page;
keir@19134 149 }
keir@19134 150 static inline void
keir@19134 151 page_list_add_tail(struct page_info *page, struct page_list_head *head)
keir@19134 152 {
keir@19134 153 page->list.next = ~0;
keir@19134 154 if ( head->next )
keir@19134 155 {
keir@19134 156 page->list.prev = page_to_mfn(head->tail);
keir@19134 157 head->tail->list.next = page_to_mfn(page);
keir@19134 158 }
keir@19134 159 else
keir@19134 160 {
keir@19134 161 page->list.prev = ~0;
keir@19134 162 head->next = page;
keir@19134 163 }
keir@19134 164 head->tail = page;
keir@19134 165 }
keir@19134 166 static inline bool_t
keir@19134 167 __page_list_del_head(struct page_info *page, struct page_list_head *head,
keir@19134 168 struct page_info *next, struct page_info *prev)
keir@19134 169 {
keir@19134 170 if ( head->next == page )
keir@19134 171 {
keir@19134 172 if ( head->tail != page )
keir@19134 173 {
keir@19134 174 next->list.prev = ~0;
keir@19134 175 head->next = next;
keir@19134 176 }
keir@19134 177 else
keir@19134 178 head->tail = head->next = NULL;
keir@19134 179 return 1;
keir@19134 180 }
keir@19134 181
keir@19134 182 if ( head->tail == page )
keir@19134 183 {
keir@19134 184 prev->list.next = ~0;
keir@19134 185 head->tail = prev;
keir@19134 186 return 1;
keir@19134 187 }
keir@19134 188
keir@19134 189 return 0;
keir@19134 190 }
keir@19134 191 static inline void
keir@19134 192 page_list_del(struct page_info *page, struct page_list_head *head)
keir@19134 193 {
keir@19134 194 struct page_info *next = mfn_to_page(page->list.next);
keir@19134 195 struct page_info *prev = mfn_to_page(page->list.prev);
keir@19134 196
keir@19134 197 if ( !__page_list_del_head(page, head, next, prev) )
keir@19134 198 {
keir@19134 199 next->list.prev = page->list.prev;
keir@19134 200 prev->list.next = page->list.next;
keir@19134 201 }
keir@19134 202 }
keir@19134 203 static inline void
keir@19134 204 page_list_del2(struct page_info *page, struct page_list_head *head1,
keir@19134 205 struct page_list_head *head2)
keir@19134 206 {
keir@19134 207 struct page_info *next = mfn_to_page(page->list.next);
keir@19134 208 struct page_info *prev = mfn_to_page(page->list.prev);
keir@19134 209
keir@19134 210 if ( !__page_list_del_head(page, head1, next, prev) &&
keir@19134 211 !__page_list_del_head(page, head2, next, prev) )
keir@19134 212 {
keir@19134 213 next->list.prev = page->list.prev;
keir@19134 214 prev->list.next = page->list.next;
keir@19134 215 }
keir@19134 216 }
keir@19134 217 static inline void
keir@19134 218 page_list_move_tail(struct page_info *page, struct page_list_head *list,
keir@19134 219 struct page_list_head *head)
keir@19134 220 {
keir@19134 221 page_list_del(page, list);
keir@19134 222 page_list_add_tail(page, head);
keir@19134 223 }
keir@19134 224 static inline struct page_info *
keir@19134 225 page_list_remove_head(struct page_list_head *head)
keir@19134 226 {
keir@19134 227 struct page_info *page = head->next;
keir@19134 228
keir@19134 229 if ( page )
keir@19134 230 page_list_del(page, head);
keir@19134 231
keir@19134 232 return page;
keir@19134 233 }
keir@19134 234 static inline void
keir@19134 235 page_list_splice_init(struct page_list_head *list, struct page_list_head *head)
keir@19134 236 {
keir@19134 237 if ( !page_list_empty(list) )
keir@19134 238 {
keir@19134 239 if ( head->next )
keir@19134 240 head->tail->list.next = page_to_mfn(list->next);
keir@19134 241 else
keir@19134 242 head->next = list->next;
keir@19134 243 head->tail = list->tail;
keir@19134 244 INIT_PAGE_LIST_HEAD(list);
keir@19134 245 }
keir@19134 246 }
keir@19134 247
keir@19134 248 #define page_list_for_each(pos, head) \
keir@19134 249 for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
keir@19134 250 #define page_list_for_each_safe(pos, tmp, head) \
keir@19134 251 for ( pos = (head)->next; \
keir@19134 252 pos ? (tmp = page_list_next(pos, head), 1) : 0; \
keir@19134 253 pos = tmp )
keir@19134 254 #define page_list_for_each_safe_reverse(pos, tmp, head) \
keir@19134 255 for ( pos = (head)->tail; \
keir@19134 256 pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
keir@19134 257 pos = tmp )
keir@19134 258 #else
keir@19134 259 # define page_list_head list_head
keir@19134 260 # define PAGE_LIST_HEAD_INIT LIST_HEAD_INIT
keir@19134 261 # define PAGE_LIST_HEAD LIST_HEAD
keir@19134 262 # define INIT_PAGE_LIST_HEAD INIT_LIST_HEAD
keir@19134 263 # define INIT_PAGE_LIST_ENTRY INIT_LIST_HEAD
keir@19134 264 # define page_list_empty list_empty
keir@19134 265 # define page_list_first(hd) list_entry((hd)->next, \
keir@19134 266 struct page_info, list)
keir@19134 267 # define page_list_next(pg, hd) list_entry((pg)->list.next, \
keir@19134 268 struct page_info, list)
keir@19134 269 # define page_list_is_eol(pg, hd) (&(pg)->list == (hd))
keir@19134 270 # define page_list_add(pg, hd) list_add(&(pg)->list, hd)
keir@19134 271 # define page_list_add_tail(pg, hd) list_add_tail(&(pg)->list, hd)
keir@19134 272 # define page_list_del(pg, hd) list_del(&(pg)->list)
keir@19134 273 # define page_list_del2(pg, hd1, hd2) list_del(&(pg)->list)
keir@19134 274 # define page_list_move_tail(pg, o, n) list_move_tail(&(pg)->list, n)
keir@19134 275 # define page_list_remove_head(hd) (!page_list_empty(hd) ? \
keir@19134 276 ({ \
keir@19134 277 struct page_info *__pg = page_list_first(hd); \
keir@19134 278 list_del(&__pg->list); \
keir@19134 279 __pg; \
keir@19134 280 }) : NULL)
keir@19134 281 # define page_list_splice_init list_splice_init
keir@19134 282 # define page_list_for_each(pos, head) list_for_each_entry(pos, head, list)
keir@19134 283 # define page_list_for_each_safe(pos, tmp, head) \
keir@19134 284 list_for_each_entry_safe(pos, tmp, head, list)
keir@19134 285 # define page_list_for_each_safe_reverse(pos, tmp, head) \
keir@19134 286 list_for_each_entry_safe_reverse(pos, tmp, head, list)
keir@19134 287 #endif
keir@19134 288
kaf24@4267 289 /* Automatic page scrubbing for dead domains. */
keir@19134 290 extern struct page_list_head page_scrub_list;
keir@19134 291 #define page_scrub_schedule_work() \
keir@19134 292 do { \
keir@19134 293 if ( !page_list_empty(&page_scrub_list) ) \
keir@19134 294 raise_softirq(PAGE_SCRUB_SOFTIRQ); \
kaf24@4267 295 } while ( 0 )
kfraser@14340 296 #define page_scrub_kick() \
kfraser@14340 297 do { \
keir@19134 298 if ( !page_list_empty(&page_scrub_list) ) \
kfraser@14340 299 cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
kfraser@14340 300 } while ( 0 )
kaf24@10541 301 unsigned long avail_scrub_pages(void);
kaf24@4267 302
cl349@9211 303 int guest_remove_page(struct domain *d, unsigned long gmfn);
cl349@9211 304
keir@19086 305 /* Returns TRUE if the whole page at @mfn is ordinary RAM. */
keir@19086 306 int page_is_conventional_ram(unsigned long mfn);
kfraser@11177 307
keir@17853 308 extern unsigned long *alloc_bitmap; /* for vmcoreinfo */
keir@17853 309
kaf24@1211 310 #endif /* __XEN_MM_H__ */