ia64/xen-unstable

view xen/include/asm-powerpc/mm.h @ 12932:0379ac3367b2

[XEN][POWERPC] Use gmfn_to_mfn() over pfn2mfn()
Should only use pfn2mfn() if you care about the type of memory.
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Oct 02 21:40:26 2006 -0400 (2006-10-02)
parents 4da585fb62f9
children 8515e163f1df
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
21 #ifndef _ASM_MM_H_
22 #define _ASM_MM_H_
24 #include <public/xen.h>
25 #include <xen/list.h>
26 #include <xen/types.h>
27 #include <xen/mm.h>
28 #include <asm/system.h>
29 #include <asm/flushtlb.h>
30 #include <asm/uaccess.h>
32 #define memguard_guard_range(_p,_l) ((void)0)
33 #define memguard_unguard_range(_p,_l) ((void)0)
35 extern unsigned long xenheap_phys_end;
37 /*
38 * Per-page-frame information.
39 *
40 * Every architecture must ensure the following:
41 * 1. 'struct page_info' contains a 'struct list_head list'.
42 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
43 */
44 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
46 /* XXX copy-and-paste job; re-examine me */
47 struct page_info
48 {
49 /* Each frame can be threaded onto a doubly-linked list. */
50 struct list_head list;
52 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
53 u32 tlbflush_timestamp;
55 /* Reference count and various PGC_xxx flags and fields. */
56 unsigned long count_info;
58 /* Context-dependent fields follow... */
59 union {
61 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
62 struct {
63 /* Owner of this page (NULL if page is anonymous). */
64 u32 _domain;
65 /* Type reference count and various PGT_xxx flags and fields. */
66 unsigned long type_info;
67 } inuse;
69 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
70 struct {
71 /* Mask of possibly-tainted TLBs. */
72 cpumask_t cpumask;
73 /* Order-size of the free chunk this page is the head of. */
74 u8 order;
75 } free;
77 } u;
79 };
81 struct page_extents {
82 /* Each frame can be threaded onto a doubly-linked list. */
83 struct list_head pe_list;
85 /* page extent */
86 struct page_info *pg;
87 uint order;
88 ulong pfn;
89 };
91 /* The following page types are MUTUALLY EXCLUSIVE. */
92 #define PGT_none (0<<29) /* no special uses of this page */
93 #define PGT_RMA (1<<29) /* This page is an RMA page? */
94 #define PGT_writable_page (7<<29) /* has writable mappings of this page? */
95 #define PGT_type_mask (7<<29) /* Bits 29-31. */
97 /* Owning guest has pinned this page to its current type? */
98 #define _PGT_pinned 28
99 #define PGT_pinned (1U<<_PGT_pinned)
100 /* Has this page been validated for use as its current type? */
101 #define _PGT_validated 27
102 #define PGT_validated (1U<<_PGT_validated)
104 /* 16-bit count of uses of this frame as its current type. */
105 #define PGT_count_mask ((1U<<16)-1)
107 /* Cleared when the owning guest 'frees' this page. */
108 #define _PGC_allocated 31
109 #define PGC_allocated (1U<<_PGC_allocated)
110 /* Set on a *guest* page to mark it out-of-sync with its shadow */
111 #define _PGC_out_of_sync 30
112 #define PGC_out_of_sync (1U<<_PGC_out_of_sync)
113 /* Set when is using a page as a page table */
114 #define _PGC_page_table 29
115 #define PGC_page_table (1U<<_PGC_page_table)
116 /* Set when using page for RMA */
117 #define _PGC_page_RMA 28
118 #define PGC_page_RMA (1U<<_PGC_page_RMA)
119 /* 29-bit count of references to this frame. */
120 #define PGC_count_mask ((1U<<28)-1)
122 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
124 static inline struct domain *unpickle_domptr(u32 _domain)
125 { return ((_domain == 0) || (_domain & 1)) ? NULL : __va(_domain); }
127 static inline u32 pickle_domptr(struct domain *domain)
128 { return (domain == NULL) ? 0 : (u32)__pa(domain); }
130 #define PRtype_info "016lx"/* should only be used for printk's */
132 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
133 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
135 extern struct page_info *frame_table;
136 extern unsigned long max_page;
137 extern unsigned long total_pages;
138 void init_frametable(void);
139 void free_rma_check(struct page_info *page);
141 static inline void put_page(struct page_info *page)
142 {
143 u32 nx, x, y = page->count_info;
145 do {
146 x = y;
147 nx = x - 1;
148 }
149 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
151 if ( unlikely((nx & PGC_count_mask) == 0) ) {
152 /* RMA pages can only be released while the domain is dying */
153 free_rma_check(page);
154 free_domheap_page(page);
155 }
156 }
158 static inline int get_page(struct page_info *page,
159 struct domain *domain)
160 {
161 u32 x, nx, y = page->count_info;
162 u32 d, nd = page->u.inuse._domain;
163 u32 _domain = pickle_domptr(domain);
165 do {
166 x = y;
167 nx = x + 1;
168 d = nd;
169 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
170 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
171 unlikely(d != _domain) ) /* Wrong owner? */
172 {
173 return 0;
174 }
175 y = cmpxchg(&page->count_info, x, nx);
176 }
177 while ( unlikely(y != x) );
179 return 1;
180 }
182 extern void put_page_type(struct page_info *page);
183 extern int get_page_type(struct page_info *page, unsigned long type);
185 static inline void put_page_and_type(struct page_info *page)
186 {
187 put_page_type(page);
188 put_page(page);
189 }
191 static inline int get_page_and_type(struct page_info *page,
192 struct domain *domain,
193 unsigned long type)
194 {
195 int rc = get_page(page, domain);
197 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
198 {
199 put_page(page);
200 rc = 0;
201 }
203 return rc;
204 }
206 static inline int page_is_removable(struct page_info *page)
207 {
208 return ((page->count_info & PGC_count_mask) == 1);
209 }
211 extern void synchronise_pagetables(unsigned long cpu_mask);
213 /* XXX don't know what this is for */
214 typedef struct {
215 void (*enable)(struct domain *);
216 void (*disable)(struct domain *);
217 } vm_assist_info_t;
218 extern vm_assist_info_t vm_assist_info[];
220 #define share_xen_page_with_guest(p, d, r) do { } while (0)
221 #define share_xen_page_with_privileged_guests(p, r) do { } while (0)
223 /* hope that accesses to this will fail spectacularly */
224 #define machine_to_phys_mapping ((u32 *)-1UL)
226 extern int update_grant_va_mapping(unsigned long va,
227 unsigned long val,
228 struct domain *,
229 struct vcpu *);
231 #define INVALID_MFN (~0UL)
232 #define PFN_TYPE_NONE 0
233 #define PFN_TYPE_RMA 1
234 #define PFN_TYPE_LOGICAL 2
235 #define PFN_TYPE_IO 3
236 #define PFN_TYPE_FOREIGN 4
238 extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
240 /* Arch-specific portion of memory_op hypercall. */
241 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
243 /* XXX implement me? */
244 #define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
245 /* XXX only used for debug print right now... */
246 #define get_gpfn_from_mfn(mfn) (mfn)
248 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
249 {
250 int mtype;
251 ulong mfn;
253 mfn = pfn2mfn(d, gmfn, &mtype);
254 if (mfn != INVALID_MFN) {
255 switch (mtype) {
256 case PFN_TYPE_RMA:
257 case PFN_TYPE_LOGICAL:
258 break;
259 default:
260 WARN();
261 mfn = INVALID_MFN;
262 break;
263 }
264 }
265 return mfn;
266 }
268 #define mfn_to_gmfn(_d, mfn) (mfn)
270 extern int allocate_rma(struct domain *d, unsigned int order_pages);
271 extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
272 extern void free_extents(struct domain *d);
274 extern int steal_page(struct domain *d, struct page_info *page,
275 unsigned int memflags);
277 #endif