ia64/xen-unstable

view xen/include/asm-powerpc/mm.h @ 14238:f56981f78d73

[POWERPC][XEN] Implement guest_physmap_max_mem_pages().
- Create a p2m array large enough to cover d->max_pages.
- Free in domain_relinquish_resources().
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Mar 02 17:07:01 2007 -0600 (2007-03-02)
parents 1e5a83fb928b
children b75609e1fa81
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright IBM Corp. 2005, 2006, 2007
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 */
22 #ifndef _ASM_MM_H_
23 #define _ASM_MM_H_
25 #include <public/xen.h>
26 #include <xen/list.h>
27 #include <xen/types.h>
28 #include <xen/mm.h>
29 #include <asm/system.h>
30 #include <asm/flushtlb.h>
31 #include <asm/page.h>
32 #include <asm/debugger.h>
34 #define memguard_guard_range(_p,_l) ((void)0)
35 #define memguard_unguard_range(_p,_l) ((void)0)
37 extern unsigned long xenheap_phys_end;
38 extern int boot_of_mem_avail(int pos, ulong *start, ulong *end);
40 /*
41 * Per-page-frame information.
42 *
43 * Every architecture must ensure the following:
44 * 1. 'struct page_info' contains a 'struct list_head list'.
45 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
46 */
47 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
49 /* XXX copy-and-paste job; re-examine me */
50 struct page_info
51 {
52 /* Each frame can be threaded onto a doubly-linked list. */
53 struct list_head list;
55 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
56 u32 tlbflush_timestamp;
58 /* Reference count and various PGC_xxx flags and fields. */
59 unsigned long count_info;
61 /* Context-dependent fields follow... */
62 union {
64 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
65 struct {
66 /* Owner of this page (NULL if page is anonymous). */
67 u32 _domain;
68 /* Type reference count and various PGT_xxx flags and fields. */
69 unsigned long type_info;
70 } inuse;
72 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
73 struct {
74 /* Mask of possibly-tainted TLBs. */
75 cpumask_t cpumask;
76 /* Order-size of the free chunk this page is the head of. */
77 u8 order;
78 } free;
80 } u;
82 };
84 struct page_extents {
85 /* Each frame can be threaded onto a doubly-linked list. */
86 struct list_head pe_list;
88 /* page extent */
89 struct page_info *pg;
90 uint order;
91 };
93 /* The following page types are MUTUALLY EXCLUSIVE. */
94 #define PGT_none (0UL<<29) /* no special uses of this page */
95 #define PGT_RMA (1UL<<29) /* This page is an RMA page? */
96 #define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */
97 #define PGT_type_mask (7UL<<29) /* Bits 29-31. */
99 /* Owning guest has pinned this page to its current type? */
100 #define _PGT_pinned 28
101 #define PGT_pinned (1UL<<_PGT_pinned)
102 /* Has this page been validated for use as its current type? */
103 #define _PGT_validated 27
104 #define PGT_validated (1UL<<_PGT_validated)
106 /* 16-bit count of uses of this frame as its current type. */
107 #define PGT_count_mask ((1UL<<16)-1)
109 /* Cleared when the owning guest 'frees' this page. */
110 #define _PGC_allocated 31
111 #define PGC_allocated (1UL<<_PGC_allocated)
112 /* Set on a *guest* page to mark it out-of-sync with its shadow */
113 #define _PGC_out_of_sync 30
114 #define PGC_out_of_sync (1UL<<_PGC_out_of_sync)
115 /* Set when is using a page as a page table */
116 #define _PGC_page_table 29
117 #define PGC_page_table (1UL<<_PGC_page_table)
118 /* Set when using page for RMA */
119 #define _PGC_page_RMA 28
120 #define PGC_page_RMA (1UL<<_PGC_page_RMA)
121 /* 29-bit count of references to this frame. */
122 #define PGC_count_mask ((1UL<<28)-1)
124 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
126 static inline struct domain *unpickle_domptr(u32 _domain)
127 { return ((_domain == 0) || (_domain & 1)) ? NULL : __va(_domain); }
129 static inline u32 pickle_domptr(struct domain *domain)
130 { return (domain == NULL) ? 0 : (u32)__pa(domain); }
132 #define PRtype_info "016lx"/* should only be used for printk's */
134 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
135 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
137 #define XENSHARE_writable 0
138 #define XENSHARE_readonly 1
139 extern void share_xen_page_with_guest(
140 struct page_info *page, struct domain *d, int readonly);
141 extern void share_xen_page_with_privileged_guests(
142 struct page_info *page, int readonly);
144 extern struct page_info *frame_table;
145 extern unsigned long max_page;
146 extern unsigned long total_pages;
147 void init_frametable(void);
148 void free_rma_check(struct page_info *page);
150 static inline void put_page(struct page_info *page)
151 {
152 u32 nx, x, y = page->count_info;
154 do {
155 x = y;
156 nx = x - 1;
157 }
158 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
160 if ( unlikely((nx & PGC_count_mask) == 0) ) {
161 /* RMA pages can only be released while the domain is dying */
162 free_rma_check(page);
163 free_domheap_page(page);
164 }
165 }
167 static inline int get_page(struct page_info *page,
168 struct domain *domain)
169 {
170 u32 x, nx, y = page->count_info;
171 u32 d, nd = page->u.inuse._domain;
172 u32 _domain = pickle_domptr(domain);
174 do {
175 x = y;
176 nx = x + 1;
177 d = nd;
178 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
179 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
180 unlikely(d != _domain) ) /* Wrong owner? */
181 {
182 return 0;
183 }
184 y = cmpxchg(&page->count_info, x, nx);
185 }
186 while ( unlikely(y != x) );
188 return 1;
189 }
191 extern void put_page_type(struct page_info *page);
192 extern int get_page_type(struct page_info *page, unsigned long type);
194 static inline void put_page_and_type(struct page_info *page)
195 {
196 put_page_type(page);
197 put_page(page);
198 }
200 static inline int get_page_and_type(struct page_info *page,
201 struct domain *domain,
202 unsigned long type)
203 {
204 int rc = get_page(page, domain);
206 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
207 {
208 put_page(page);
209 rc = 0;
210 }
212 return rc;
213 }
215 static inline int page_is_removable(struct page_info *page)
216 {
217 return ((page->count_info & PGC_count_mask) == 1);
218 }
220 extern void synchronise_pagetables(unsigned long cpu_mask);
222 /* XXX don't know what this is for */
223 typedef struct {
224 void (*enable)(struct domain *);
225 void (*disable)(struct domain *);
226 } vm_assist_info_t;
227 extern vm_assist_info_t vm_assist_info[];
230 /* hope that accesses to this will fail spectacularly */
231 #undef machine_to_phys_mapping
232 #define INVALID_M2P_ENTRY (~0UL)
234 /* do nothing, its all calculated */
235 #define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
236 #define get_gpfn_from_mfn(mfn) (mfn)
238 extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn);
240 extern unsigned long paddr_to_maddr(unsigned long paddr);
242 /* INVALID_MFN can be any value that fails mfn_valid(). */
243 #define INVALID_MFN (~0U)
245 #define PFN_TYPE_NONE 0
246 #define PFN_TYPE_RMA 1
247 #define PFN_TYPE_LOGICAL 2
248 #define PFN_TYPE_IO 3
249 #define PFN_TYPE_FOREIGN 4
250 #define PFN_TYPE_GNTTAB 5
252 extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
253 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
254 {
255 int mtype;
256 ulong mfn;
258 mfn = pfn2mfn(d, gmfn, &mtype);
259 if (mfn != INVALID_MFN) {
260 switch (mtype) {
261 case PFN_TYPE_RMA:
262 case PFN_TYPE_LOGICAL:
263 break;
264 default:
265 WARN();
266 mfn = INVALID_MFN;
267 break;
268 }
269 }
270 return mfn;
271 }
273 extern int update_grant_va_mapping(unsigned long va,
274 unsigned long val,
275 struct domain *,
276 struct vcpu *);
278 /* Arch-specific portion of memory_op hypercall. */
279 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
281 extern int allocate_rma(struct domain *d, unsigned int order_pages);
282 extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
283 extern void free_extents(struct domain *d);
285 extern int arch_domain_add_extent(struct domain *d, struct page_info *page,
286 int order);
288 extern int steal_page(struct domain *d, struct page_info *page,
289 unsigned int memflags);
291 /* XXX these just exist until we can stop #including x86 code */
292 #define access_ok(addr,size) 1
293 #define array_access_ok(addr,count,size) 1
295 #define domain_clamp_alloc_bitsize(d, b) (b)
297 #endif