ia64/xen-unstable

view xen/include/asm-x86/mm.h @ 12599:93e657836d07

[XEN] Remove VALID_MFN(); replace uses with mfn_valid().
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Nov 27 17:48:24 2006 +0000 (2006-11-27)
parents 6f0d8434d23f
children c75d6f2aad7a
line source
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
5 #include <xen/config.h>
6 #include <xen/cpumask.h>
7 #include <xen/list.h>
8 #include <asm/io.h>
9 #include <asm/uaccess.h>
11 /*
12 * Per-page-frame information.
13 *
14 * Every architecture must ensure the following:
15 * 1. 'struct page_info' contains a 'struct list_head list'.
16 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
17 */
18 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
20 struct page_info
21 {
22 /* Each frame can be threaded onto a doubly-linked list. */
23 struct list_head list;
25 /* Reference count and various PGC_xxx flags and fields. */
26 u32 count_info;
28 /* Context-dependent fields follow... */
29 union {
31 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
32 struct {
33 /* Owner of this page (NULL if page is anonymous). */
34 u32 _domain; /* pickled format */
35 /* Type reference count and various PGT_xxx flags and fields. */
36 unsigned long type_info;
37 } __attribute__ ((packed)) inuse;
39 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
40 struct {
41 /* Order-size of the free chunk this page is the head of. */
42 u32 order;
43 /* Mask of possibly-tainted TLBs. */
44 cpumask_t cpumask;
45 } __attribute__ ((packed)) free;
47 } u;
49 union {
50 /*
51 * Timestamp from 'TLB clock', used to avoid extra safety flushes.
52 * Only valid for: a) free pages, and b) pages with zero type count
53 * (except page table pages when the guest is in shadow mode).
54 */
55 u32 tlbflush_timestamp;
57 /*
58 * Guest pages with a shadow. This does not conflict with
59 * tlbflush_timestamp since page table pages are explicitly not
60 * tracked for TLB-flush avoidance when a guest runs in shadow mode.
61 */
62 unsigned long shadow_flags;
63 };
64 };
66 /* The following page types are MUTUALLY EXCLUSIVE. */
67 #define PGT_none (0U<<29) /* no special uses of this page */
68 #define PGT_l1_page_table (1U<<29) /* using this page as an L1 page table? */
69 #define PGT_l2_page_table (2U<<29) /* using this page as an L2 page table? */
70 #define PGT_l3_page_table (3U<<29) /* using this page as an L3 page table? */
71 #define PGT_l4_page_table (4U<<29) /* using this page as an L4 page table? */
72 #define PGT_gdt_page (5U<<29) /* using this page in a GDT? */
73 #define PGT_ldt_page (6U<<29) /* using this page in an LDT? */
74 #define PGT_writable_page (7U<<29) /* has writable mappings of this page? */
75 #define PGT_type_mask (7U<<29) /* Bits 29-31. */
77 /* Owning guest has pinned this page to its current type? */
78 #define _PGT_pinned 28
79 #define PGT_pinned (1U<<_PGT_pinned)
80 /* Has this page been validated for use as its current type? */
81 #define _PGT_validated 27
82 #define PGT_validated (1U<<_PGT_validated)
83 /* PAE only: is this an L2 page directory containing Xen-private mappings? */
84 #define _PGT_pae_xen_l2 26
85 #define PGT_pae_xen_l2 (1U<<_PGT_pae_xen_l2)
87 /* 16-bit count of uses of this frame as its current type. */
88 #define PGT_count_mask ((1U<<16)-1)
90 /* Cleared when the owning guest 'frees' this page. */
91 #define _PGC_allocated 31
92 #define PGC_allocated (1U<<_PGC_allocated)
93 /* Set on a *guest* page to mark it out-of-sync with its shadow */
94 #define _PGC_out_of_sync 30
95 #define PGC_out_of_sync (1U<<_PGC_out_of_sync)
96 /* Set when is using a page as a page table */
97 #define _PGC_page_table 29
98 #define PGC_page_table (1U<<_PGC_page_table)
99 /* 29-bit count of references to this frame. */
100 #define PGC_count_mask ((1U<<29)-1)
102 /* We trust the slab allocator in slab.c, and our use of it. */
103 #define PageSlab(page) (1)
104 #define PageSetSlab(page) ((void)0)
105 #define PageClearSlab(page) ((void)0)
107 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
109 #if defined(__i386__)
110 #define pickle_domptr(_d) ((u32)(unsigned long)(_d))
111 static inline struct domain *unpickle_domptr(u32 _domain)
112 { return (_domain & 1) ? NULL : (void *)_domain; }
113 #define PRtype_info "08lx" /* should only be used for printk's */
114 #elif defined(__x86_64__)
115 static inline struct domain *unpickle_domptr(u32 _domain)
116 { return ((_domain == 0) || (_domain & 1)) ? NULL : __va(_domain); }
117 static inline u32 pickle_domptr(struct domain *domain)
118 { return (domain == NULL) ? 0 : (u32)__pa(domain); }
119 #define PRtype_info "016lx"/* should only be used for printk's */
120 #endif
122 /* The order of the largest allocation unit we use for shadow pages */
123 #if CONFIG_PAGING_LEVELS == 2
124 #define SHADOW_MAX_ORDER 0 /* Only ever need 4k allocations */
125 #else
126 #define SHADOW_MAX_ORDER 2 /* Need up to 16k allocs for 32-bit on PAE/64 */
127 #endif
129 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
130 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
132 #define XENSHARE_writable 0
133 #define XENSHARE_readonly 1
134 extern void share_xen_page_with_guest(
135 struct page_info *page, struct domain *d, int readonly);
136 extern void share_xen_page_with_privileged_guests(
137 struct page_info *page, int readonly);
139 extern struct page_info *frame_table;
140 extern unsigned long max_page;
141 extern unsigned long total_pages;
142 void init_frametable(void);
144 int alloc_page_type(struct page_info *page, unsigned long type);
145 void free_page_type(struct page_info *page, unsigned long type);
146 void invalidate_shadow_ldt(struct vcpu *d);
147 int _shadow_mode_refcounts(struct domain *d);
149 static inline void put_page(struct page_info *page)
150 {
151 u32 nx, x, y = page->count_info;
153 do {
154 x = y;
155 nx = x - 1;
156 }
157 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
159 if ( unlikely((nx & PGC_count_mask) == 0) )
160 free_domheap_page(page);
161 }
164 static inline int get_page(struct page_info *page,
165 struct domain *domain)
166 {
167 u32 x, nx, y = page->count_info;
168 u32 d, nd = page->u.inuse._domain;
169 u32 _domain = pickle_domptr(domain);
171 do {
172 x = y;
173 nx = x + 1;
174 d = nd;
175 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
176 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
177 unlikely(d != _domain) ) /* Wrong owner? */
178 {
179 if ( !_shadow_mode_refcounts(domain) )
180 gdprintk(XENLOG_INFO,
181 "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%"
182 PRtype_info "\n",
183 page_to_mfn(page), domain, unpickle_domptr(d),
184 x, page->u.inuse.type_info);
185 return 0;
186 }
187 __asm__ __volatile__(
188 LOCK_PREFIX "cmpxchg8b %3"
189 : "=d" (nd), "=a" (y), "=c" (d),
190 "=m" (*(volatile u64 *)(&page->count_info))
191 : "0" (d), "1" (x), "c" (d), "b" (nx) );
192 }
193 while ( unlikely(nd != d) || unlikely(y != x) );
195 return 1;
196 }
198 void put_page_type(struct page_info *page);
199 int get_page_type(struct page_info *page, unsigned long type);
200 int get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
201 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
203 static inline void put_page_and_type(struct page_info *page)
204 {
205 put_page_type(page);
206 put_page(page);
207 }
210 static inline int get_page_and_type(struct page_info *page,
211 struct domain *domain,
212 unsigned long type)
213 {
214 int rc = get_page(page, domain);
216 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
217 {
218 put_page(page);
219 rc = 0;
220 }
222 return rc;
223 }
225 static inline int page_is_removable(struct page_info *page)
226 {
227 return ((page->count_info & PGC_count_mask) == 1);
228 }
230 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
231 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
232 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
233 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
234 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
235 ASSERT(page_get_owner(_p) == (_d))
237 // Quick test for whether a given page can be represented directly in CR3.
238 //
239 #if CONFIG_PAGING_LEVELS == 3
240 #define MFN_FITS_IN_CR3(_MFN) !(mfn_x(_MFN) >> 20)
242 /* returns a lowmem machine address of the copied L3 root table */
243 unsigned long
244 pae_copy_root(struct vcpu *v, l3_pgentry_t *l3tab);
245 #endif /* CONFIG_PAGING_LEVELS == 3 */
247 int check_descriptor(struct desc_struct *d);
249 /*
250 * The MPT (machine->physical mapping table) is an array of word-sized
251 * values, indexed on machine frame number. It is expected that guest OSes
252 * will use it to store a "physical" frame number to give the appearance of
253 * contiguous (or near contiguous) physical memory.
254 */
255 #undef machine_to_phys_mapping
256 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
257 #define INVALID_M2P_ENTRY (~0UL)
258 #define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
260 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
261 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
264 #define mfn_to_gmfn(_d, mfn) \
265 ( (shadow_mode_translate(_d)) \
266 ? get_gpfn_from_mfn(mfn) \
267 : (mfn) )
269 #define gmfn_to_mfn(_d, gpfn) mfn_x(sh_gfn_to_mfn(_d, gpfn))
271 #define INVALID_MFN (~0UL)
273 #ifdef MEMORY_GUARD
274 void memguard_init(void);
275 void memguard_guard_range(void *p, unsigned long l);
276 void memguard_unguard_range(void *p, unsigned long l);
277 #else
278 #define memguard_init() ((void)0)
279 #define memguard_guard_range(_p,_l) ((void)0)
280 #define memguard_unguard_range(_p,_l) ((void)0)
281 #endif
283 void memguard_guard_stack(void *p);
285 int ptwr_do_page_fault(struct vcpu *, unsigned long,
286 struct cpu_user_regs *);
288 int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
290 #ifndef NDEBUG
292 #define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 )
293 #define AUDIT_ERRORS_OK ( 1u << 1 )
294 #define AUDIT_QUIET ( 1u << 2 )
296 void _audit_domain(struct domain *d, int flags);
297 #define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK)
298 void audit_domains(void);
300 #else
302 #define _audit_domain(_d, _f) ((void)0)
303 #define audit_domain(_d) ((void)0)
304 #define audit_domains() ((void)0)
306 #endif
308 int new_guest_cr3(unsigned long pfn);
309 void make_cr3(struct vcpu *v, unsigned long mfn);
311 void propagate_page_fault(unsigned long addr, u16 error_code);
313 int __sync_lazy_execstate(void);
315 /* Arch-specific portion of memory_op hypercall. */
316 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
317 long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
319 int steal_page(
320 struct domain *d, struct page_info *page, unsigned int memflags);
322 int map_ldt_shadow_page(unsigned int);
324 #endif /* __ASM_X86_MM_H__ */