ia64/xen-unstable

annotate xen/include/asm-powerpc/mm.h @ 14107:1e5a83fb928b

xen memory allocator: Allow per-domain bitwidth restrictions.
Original patch by Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Sat Feb 24 13:57:34 2007 +0000 (2007-02-24)
parents 741adb202b82
children f56981f78d73
rev   line source
kfraser@10748 1 /*
kfraser@10748 2 * This program is free software; you can redistribute it and/or modify
kfraser@10748 3 * it under the terms of the GNU General Public License as published by
kfraser@10748 4 * the Free Software Foundation; either version 2 of the License, or
kfraser@10748 5 * (at your option) any later version.
kfraser@10748 6 *
kfraser@10748 7 * This program is distributed in the hope that it will be useful,
kfraser@10748 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kfraser@10748 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kfraser@10748 10 * GNU General Public License for more details.
kfraser@10748 11 *
kfraser@10748 12 * You should have received a copy of the GNU General Public License
kfraser@10748 13 * along with this program; if not, write to the Free Software
kfraser@10748 14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
kfraser@10748 15 *
hollisb@13932 16 * Copyright IBM Corp. 2005, 2006, 2007
kfraser@10748 17 *
kfraser@10748 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
jimix@12945 19 * Jimi Xenidis <jimix@watson.ibm.com>
kfraser@10748 20 */
kfraser@10748 21
kfraser@10748 22 #ifndef _ASM_MM_H_
kfraser@10748 23 #define _ASM_MM_H_
kfraser@10748 24
kfraser@10748 25 #include <public/xen.h>
kfraser@10748 26 #include <xen/list.h>
kfraser@10748 27 #include <xen/types.h>
jimix@11381 28 #include <xen/mm.h>
kfraser@10748 29 #include <asm/system.h>
kfraser@10748 30 #include <asm/flushtlb.h>
hollisb@12976 31 #include <asm/page.h>
hollisb@12934 32 #include <asm/debugger.h>
kfraser@10748 33
kfraser@10748 34 #define memguard_guard_range(_p,_l) ((void)0)
kfraser@10748 35 #define memguard_unguard_range(_p,_l) ((void)0)
kfraser@10748 36
kfraser@10748 37 extern unsigned long xenheap_phys_end;
hollisb@13932 38 extern int boot_of_mem_avail(int pos, ulong *start, ulong *end);
kfraser@10748 39
kfraser@10748 40 /*
kfraser@10748 41 * Per-page-frame information.
kfraser@10748 42 *
kfraser@10748 43 * Every architecture must ensure the following:
kfraser@10748 44 * 1. 'struct page_info' contains a 'struct list_head list'.
kfraser@10748 45 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
kfraser@10748 46 */
kfraser@10748 47 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
kfraser@10748 48
kfraser@10748 49 /* XXX copy-and-paste job; re-examine me */
kfraser@10748 50 struct page_info
kfraser@10748 51 {
kfraser@10748 52 /* Each frame can be threaded onto a doubly-linked list. */
kfraser@10748 53 struct list_head list;
kfraser@10748 54
kfraser@10748 55 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
kfraser@10748 56 u32 tlbflush_timestamp;
kfraser@10748 57
kfraser@10748 58 /* Reference count and various PGC_xxx flags and fields. */
kfraser@10748 59 unsigned long count_info;
kfraser@10748 60
kfraser@10748 61 /* Context-dependent fields follow... */
kfraser@10748 62 union {
kfraser@10748 63
kfraser@10748 64 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
kfraser@10748 65 struct {
kfraser@10748 66 /* Owner of this page (NULL if page is anonymous). */
jimix@11381 67 u32 _domain;
kfraser@10748 68 /* Type reference count and various PGT_xxx flags and fields. */
kfraser@10748 69 unsigned long type_info;
kfraser@10748 70 } inuse;
kfraser@10748 71
kfraser@10748 72 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
kfraser@10748 73 struct {
kfraser@10748 74 /* Mask of possibly-tainted TLBs. */
kfraser@10748 75 cpumask_t cpumask;
kfraser@10748 76 /* Order-size of the free chunk this page is the head of. */
kfraser@10748 77 u8 order;
kfraser@10748 78 } free;
kfraser@10748 79
kfraser@10748 80 } u;
kfraser@10748 81
kfraser@10748 82 };
kfraser@10748 83
jimix@11381 84 struct page_extents {
jimix@11381 85 /* Each frame can be threaded onto a doubly-linked list. */
jimix@11381 86 struct list_head pe_list;
jimix@11381 87
jimix@11381 88 /* page extent */
jimix@11381 89 struct page_info *pg;
jimix@11381 90 uint order;
jimix@11381 91 };
jimix@11381 92
kfraser@10748 93 /* The following page types are MUTUALLY EXCLUSIVE. */
jimix@12945 94 #define PGT_none (0UL<<29) /* no special uses of this page */
jimix@12945 95 #define PGT_RMA (1UL<<29) /* This page is an RMA page? */
jimix@12945 96 #define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */
jimix@12945 97 #define PGT_type_mask (7UL<<29) /* Bits 29-31. */
jimix@11381 98
kfraser@10748 99 /* Owning guest has pinned this page to its current type? */
jimix@11381 100 #define _PGT_pinned 28
jimix@12945 101 #define PGT_pinned (1UL<<_PGT_pinned)
jimix@11381 102 /* Has this page been validated for use as its current type? */
jimix@11381 103 #define _PGT_validated 27
jimix@12945 104 #define PGT_validated (1UL<<_PGT_validated)
jimix@11381 105
jimix@11381 106 /* 16-bit count of uses of this frame as its current type. */
jimix@12945 107 #define PGT_count_mask ((1UL<<16)-1)
kfraser@10748 108
kfraser@10748 109 /* Cleared when the owning guest 'frees' this page. */
kfraser@10748 110 #define _PGC_allocated 31
jimix@12945 111 #define PGC_allocated (1UL<<_PGC_allocated)
jimix@11381 112 /* Set on a *guest* page to mark it out-of-sync with its shadow */
jimix@11381 113 #define _PGC_out_of_sync 30
jimix@12945 114 #define PGC_out_of_sync (1UL<<_PGC_out_of_sync)
jimix@11381 115 /* Set when is using a page as a page table */
jimix@11381 116 #define _PGC_page_table 29
jimix@12945 117 #define PGC_page_table (1UL<<_PGC_page_table)
jimix@11519 118 /* Set when using page for RMA */
jimix@11519 119 #define _PGC_page_RMA 28
jimix@12945 120 #define PGC_page_RMA (1UL<<_PGC_page_RMA)
jimix@11381 121 /* 29-bit count of references to this frame. */
jimix@12945 122 #define PGC_count_mask ((1UL<<28)-1)
jimix@11381 123
jimix@11381 124 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
jimix@11381 125
jimix@11381 126 static inline struct domain *unpickle_domptr(u32 _domain)
jimix@11381 127 { return ((_domain == 0) || (_domain & 1)) ? NULL : __va(_domain); }
jimix@11381 128
jimix@11381 129 static inline u32 pickle_domptr(struct domain *domain)
jimix@11381 130 { return (domain == NULL) ? 0 : (u32)__pa(domain); }
jimix@11381 131
jimix@11381 132 #define PRtype_info "016lx"/* should only be used for printk's */
jimix@11381 133
jimix@11381 134 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
jimix@11381 135 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
jimix@11381 136
jimix@12945 137 #define XENSHARE_writable 0
jimix@12945 138 #define XENSHARE_readonly 1
jimix@12945 139 extern void share_xen_page_with_guest(
jimix@12945 140 struct page_info *page, struct domain *d, int readonly);
jimix@12945 141 extern void share_xen_page_with_privileged_guests(
jimix@12945 142 struct page_info *page, int readonly);
jimix@12945 143
jimix@11381 144 extern struct page_info *frame_table;
jimix@11381 145 extern unsigned long max_page;
jimix@11381 146 extern unsigned long total_pages;
jimix@11381 147 void init_frametable(void);
jimix@11519 148 void free_rma_check(struct page_info *page);
kfraser@10748 149
kfraser@10748 150 static inline void put_page(struct page_info *page)
kfraser@10748 151 {
jimix@11381 152 u32 nx, x, y = page->count_info;
kfraser@10748 153
jimix@11381 154 do {
jimix@11381 155 x = y;
jimix@11381 156 nx = x - 1;
jimix@11381 157 }
jimix@11381 158 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
jimix@11381 159
jimix@11381 160 if ( unlikely((nx & PGC_count_mask) == 0) ) {
jimix@11519 161 /* RMA pages can only be released while the domain is dying */
jimix@11519 162 free_rma_check(page);
kfraser@10748 163 free_domheap_page(page);
jimix@11381 164 }
kfraser@10748 165 }
kfraser@10748 166
kfraser@10748 167 static inline int get_page(struct page_info *page,
kfraser@10748 168 struct domain *domain)
kfraser@10748 169 {
jimix@11381 170 u32 x, nx, y = page->count_info;
jimix@11381 171 u32 d, nd = page->u.inuse._domain;
jimix@11381 172 u32 _domain = pickle_domptr(domain);
kfraser@10748 173
jimix@11381 174 do {
jimix@11381 175 x = y;
jimix@11381 176 nx = x + 1;
jimix@11381 177 d = nd;
jimix@11381 178 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
jimix@11381 179 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
jimix@11381 180 unlikely(d != _domain) ) /* Wrong owner? */
jimix@11381 181 {
jimix@11381 182 return 0;
jimix@11381 183 }
jimix@11381 184 y = cmpxchg(&page->count_info, x, nx);
kfraser@10748 185 }
jimix@11381 186 while ( unlikely(y != x) );
kfraser@10748 187
kfraser@10748 188 return 1;
kfraser@10748 189 }
kfraser@10748 190
jimix@11381 191 extern void put_page_type(struct page_info *page);
jimix@11381 192 extern int get_page_type(struct page_info *page, unsigned long type);
jimix@11381 193
jimix@11381 194 static inline void put_page_and_type(struct page_info *page)
jimix@11381 195 {
jimix@11381 196 put_page_type(page);
jimix@11381 197 put_page(page);
jimix@11381 198 }
jimix@11381 199
kfraser@10748 200 static inline int get_page_and_type(struct page_info *page,
kfraser@10748 201 struct domain *domain,
jimix@11381 202 unsigned long type)
kfraser@10748 203 {
jimix@11381 204 int rc = get_page(page, domain);
jimix@11381 205
jimix@11381 206 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
jimix@11381 207 {
jimix@11381 208 put_page(page);
jimix@11381 209 rc = 0;
jimix@11381 210 }
jimix@11381 211
jimix@11381 212 return rc;
kfraser@10748 213 }
kfraser@10748 214
hollisb@10838 215 static inline int page_is_removable(struct page_info *page)
hollisb@10838 216 {
hollisb@10838 217 return ((page->count_info & PGC_count_mask) == 1);
hollisb@10838 218 }
hollisb@10838 219
kfraser@10748 220 extern void synchronise_pagetables(unsigned long cpu_mask);
kfraser@10748 221
kfraser@10748 222 /* XXX don't know what this is for */
kfraser@10748 223 typedef struct {
kfraser@10748 224 void (*enable)(struct domain *);
kfraser@10748 225 void (*disable)(struct domain *);
kfraser@10748 226 } vm_assist_info_t;
kfraser@10748 227 extern vm_assist_info_t vm_assist_info[];
kfraser@10748 228
kfraser@10748 229
kfraser@10748 230 /* hope that accesses to this will fail spectacularly */
jimix@12945 231 #undef machine_to_phys_mapping
jimix@12945 232 #define INVALID_M2P_ENTRY (~0UL)
jimix@12945 233
jimix@12945 234 /* do nothing, its all calculated */
jimix@12945 235 #define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
jimix@12945 236 #define get_gpfn_from_mfn(mfn) (mfn)
jimix@12945 237
jimix@12945 238 extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn);
jimix@12945 239
jimix@12945 240 extern unsigned long paddr_to_maddr(unsigned long paddr);
jimix@12945 241
jimix@12945 242 #define INVALID_MFN (~0UL)
jimix@12945 243 #define PFN_TYPE_NONE 0
jimix@12945 244 #define PFN_TYPE_RMA 1
jimix@12945 245 #define PFN_TYPE_LOGICAL 2
jimix@12945 246 #define PFN_TYPE_IO 3
jimix@12945 247 #define PFN_TYPE_FOREIGN 4
jimix@12945 248 #define PFN_TYPE_GNTTAB 5
jimix@12945 249
jimix@12945 250 extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
jimix@12945 251 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
jimix@12945 252 {
jimix@12945 253 int mtype;
jimix@12945 254 ulong mfn;
jimix@12945 255
jimix@12945 256 mfn = pfn2mfn(d, gmfn, &mtype);
jimix@12945 257 if (mfn != INVALID_MFN) {
jimix@12945 258 switch (mtype) {
jimix@12945 259 case PFN_TYPE_RMA:
jimix@12945 260 case PFN_TYPE_LOGICAL:
jimix@12945 261 break;
jimix@12945 262 default:
jimix@12945 263 WARN();
jimix@12945 264 mfn = INVALID_MFN;
jimix@12945 265 break;
jimix@12945 266 }
jimix@12945 267 }
jimix@12945 268 return mfn;
jimix@12945 269 }
kfraser@10748 270
kfraser@10748 271 extern int update_grant_va_mapping(unsigned long va,
kfraser@10748 272 unsigned long val,
kfraser@10748 273 struct domain *,
kfraser@10748 274 struct vcpu *);
kfraser@10748 275
kfraser@10748 276 /* Arch-specific portion of memory_op hypercall. */
kfraser@10748 277 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
kfraser@10748 278
hollisb@11383 279 extern int allocate_rma(struct domain *d, unsigned int order_pages);
jimix@11384 280 extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
jimix@11384 281 extern void free_extents(struct domain *d);
hollisb@11383 282
hollisb@12976 283 extern int arch_domain_add_extent(struct domain *d, struct page_info *page,
hollisb@12976 284 int order);
hollisb@12976 285
kfraser@10748 286 extern int steal_page(struct domain *d, struct page_info *page,
kfraser@10748 287 unsigned int memflags);
kfraser@10748 288
hollisb@12976 289 /* XXX these just exist until we can stop #including x86 code */
hollisb@12976 290 #define access_ok(addr,size) 1
hollisb@12976 291 #define array_access_ok(addr,count,size) 1
keir@14107 292
keir@14107 293 #define domain_clamp_alloc_bitsize(d, b) (b)
keir@14107 294
kfraser@10748 295 #endif