ia64/xen-unstable

view xen/include/asm-x86/page.h @ 18852:9a6153a89d66

x86: Fix mfn_to_virt() to cast MFN to address size.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 03 11:38:36 2008 +0000 (2008-12-03)
parents 8de4b4e9a435
children 3c44c7dcf482
line source
1 #ifndef __X86_PAGE_H__
2 #define __X86_PAGE_H__
4 /*
5 * It is important that the masks are signed quantities. This ensures that
6 * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
7 */
8 #ifndef __ASSEMBLY__
9 #define PAGE_SIZE (1L << PAGE_SHIFT)
10 #else
11 #define PAGE_SIZE (1 << PAGE_SHIFT)
12 #endif
13 #define PAGE_MASK (~(PAGE_SIZE-1))
14 #define PAGE_FLAG_MASK (~0)
16 #ifndef __ASSEMBLY__
17 # include <asm/types.h>
18 # include <xen/lib.h>
19 #endif
21 #if defined(__i386__)
22 # include <asm/x86_32/page.h>
23 #elif defined(__x86_64__)
24 # include <asm/x86_64/page.h>
25 #endif
27 /* Read a pte atomically from memory. */
28 #define l1e_read_atomic(l1ep) \
29 l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
30 #define l2e_read_atomic(l2ep) \
31 l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
32 #define l3e_read_atomic(l3ep) \
33 l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
34 #define l4e_read_atomic(l4ep) \
35 l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
37 /* Write a pte atomically to memory. */
38 #define l1e_write_atomic(l1ep, l1e) \
39 pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
40 #define l2e_write_atomic(l2ep, l2e) \
41 pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
42 #define l3e_write_atomic(l3ep, l3e) \
43 pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
44 #define l4e_write_atomic(l4ep, l4e) \
45 pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
47 /*
48 * Write a pte safely but non-atomically to memory.
49 * The PTE may become temporarily not-present during the update.
50 */
51 #define l1e_write(l1ep, l1e) \
52 pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
53 #define l2e_write(l2ep, l2e) \
54 pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
55 #define l3e_write(l3ep, l3e) \
56 pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
57 #define l4e_write(l4ep, l4e) \
58 pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
60 /* Get direct integer representation of a pte's contents (intpte_t). */
61 #define l1e_get_intpte(x) ((x).l1)
62 #define l2e_get_intpte(x) ((x).l2)
63 #define l3e_get_intpte(x) ((x).l3)
64 #define l4e_get_intpte(x) ((x).l4)
66 /* Get pfn mapped by pte (unsigned long). */
67 #define l1e_get_pfn(x) \
68 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
69 #define l2e_get_pfn(x) \
70 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
71 #define l3e_get_pfn(x) \
72 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
73 #define l4e_get_pfn(x) \
74 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
76 /* Get physical address of page mapped by pte (paddr_t). */
77 #define l1e_get_paddr(x) \
78 ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
79 #define l2e_get_paddr(x) \
80 ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
81 #define l3e_get_paddr(x) \
82 ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
83 #define l4e_get_paddr(x) \
84 ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
86 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
87 #define l1e_get_page(x) (mfn_to_page(l1e_get_pfn(x)))
88 #define l2e_get_page(x) (mfn_to_page(l2e_get_pfn(x)))
89 #define l3e_get_page(x) (mfn_to_page(l3e_get_pfn(x)))
90 #define l4e_get_page(x) (mfn_to_page(l4e_get_pfn(x)))
92 /* Get pte access flags (unsigned int). */
93 #define l1e_get_flags(x) (get_pte_flags((x).l1))
94 #define l2e_get_flags(x) (get_pte_flags((x).l2))
95 #define l3e_get_flags(x) (get_pte_flags((x).l3))
96 #define l4e_get_flags(x) (get_pte_flags((x).l4))
98 /* Construct an empty pte. */
99 #define l1e_empty() ((l1_pgentry_t) { 0 })
100 #define l2e_empty() ((l2_pgentry_t) { 0 })
101 #define l3e_empty() ((l3_pgentry_t) { 0 })
102 #define l4e_empty() ((l4_pgentry_t) { 0 })
104 /* Construct a pte from a pfn and access flags. */
105 #define l1e_from_pfn(pfn, flags) \
106 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
107 #define l2e_from_pfn(pfn, flags) \
108 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
109 #define l3e_from_pfn(pfn, flags) \
110 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
111 #define l4e_from_pfn(pfn, flags) \
112 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
114 /* Construct a pte from a physical address and access flags. */
115 #ifndef __ASSEMBLY__
116 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
117 {
118 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
119 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
120 }
121 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
122 {
123 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
124 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
125 }
126 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
127 {
128 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
129 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
130 }
131 #if CONFIG_PAGING_LEVELS >= 4
132 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
133 {
134 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
135 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
136 }
137 #endif
138 #endif /* !__ASSEMBLY__ */
140 /* Construct a pte from its direct integer representation. */
141 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
142 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
143 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
144 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
146 /* Construct a pte from a page pointer and access flags. */
147 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
148 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
149 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
150 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
152 /* Add extra flags to an existing pte. */
153 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
154 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
155 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
156 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
158 /* Remove flags from an existing pte. */
159 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
160 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
161 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
162 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
164 /* Check if a pte's page mapping or significant access flags have changed. */
165 #define l1e_has_changed(x,y,flags) \
166 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
167 #define l2e_has_changed(x,y,flags) \
168 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
169 #define l3e_has_changed(x,y,flags) \
170 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
171 #define l4e_has_changed(x,y,flags) \
172 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
174 /* Pagetable walking. */
175 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
176 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
177 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
179 /* Given a virtual address, get an entry offset into a page table. */
180 #define l1_table_offset(a) \
181 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
182 #define l2_table_offset(a) \
183 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
184 #define l3_table_offset(a) \
185 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
186 #define l4_table_offset(a) \
187 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
189 /* Convert a pointer to a page-table entry into pagetable slot index. */
190 #define pgentry_ptr_to_slot(_p) \
191 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
193 #ifndef __ASSEMBLY__
195 /* Page-table type. */
196 #if CONFIG_PAGING_LEVELS == 3
197 /* x86_32 PAE */
198 typedef struct { u32 pfn; } pagetable_t;
199 #elif CONFIG_PAGING_LEVELS == 4
200 /* x86_64 */
201 typedef struct { u64 pfn; } pagetable_t;
202 #endif
203 #define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
204 #define pagetable_get_page(x) mfn_to_page((x).pfn)
205 #define pagetable_get_pfn(x) ((x).pfn)
206 #define pagetable_get_mfn(x) _mfn(((x).pfn))
207 #define pagetable_is_null(x) ((x).pfn == 0)
208 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
209 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
210 #define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
211 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
212 #define pagetable_null() pagetable_from_pfn(0)
214 void clear_page_sse2(void *);
215 #define clear_page(_p) (cpu_has_xmm2 ? \
216 clear_page_sse2((void *)(_p)) : \
217 (void)memset((void *)(_p), 0, PAGE_SIZE))
218 void copy_page_sse2(void *, const void *);
219 #define copy_page(_t,_f) (cpu_has_xmm2 ? \
220 copy_page_sse2(_t, _f) : \
221 (void)memcpy(_t, _f, PAGE_SIZE))
223 #define mfn_valid(mfn) ((mfn) < max_page)
225 /* Convert between Xen-heap virtual addresses and machine addresses. */
226 #define __pa(x) (virt_to_maddr(x))
227 #define __va(x) (maddr_to_virt(x))
229 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
230 #define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
231 #define mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
233 /* Convert between machine frame numbers and page-info structures. */
234 #define mfn_to_page(mfn) (frame_table + (mfn))
235 #define page_to_mfn(pg) ((unsigned long)((pg) - frame_table))
237 /* Convert between machine addresses and page-info structures. */
238 #define maddr_to_page(ma) (frame_table + ((ma) >> PAGE_SHIFT))
239 #define page_to_maddr(pg) ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
241 /* Convert between Xen-heap virtual addresses and page-info structures. */
242 #define virt_to_page(va) (frame_table + (__pa(va) >> PAGE_SHIFT))
243 #define page_to_virt(pg) (maddr_to_virt(page_to_maddr(pg)))
245 /* Convert between frame number and address formats. */
246 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
247 #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
249 #endif /* !defined(__ASSEMBLY__) */
251 /* High table entries are reserved by the hypervisor. */
252 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
253 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
255 #define DOMAIN_ENTRIES_PER_L4_PAGETABLE \
256 (l4_table_offset(HYPERVISOR_VIRT_START))
257 #define GUEST_ENTRIES_PER_L4_PAGETABLE \
258 (l4_table_offset(HYPERVISOR_VIRT_END))
259 #define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
260 (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
261 + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
263 /* Where to find each level of the linear mapping */
264 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
265 #define __linear_l2_table \
266 ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
267 #define __linear_l3_table \
268 ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
269 #define __linear_l4_table \
270 ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
273 #ifndef __ASSEMBLY__
274 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
275 #if CONFIG_PAGING_LEVELS == 3
276 extern l2_pgentry_t idle_pg_table_l2[
277 ROOT_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES];
278 #elif CONFIG_PAGING_LEVELS == 4
279 extern l2_pgentry_t *compat_idle_pg_table_l2;
280 extern unsigned int m2p_compat_vstart;
281 #endif
282 void paging_init(void);
283 void setup_idle_pagetable(void);
284 #endif /* !defined(__ASSEMBLY__) */
286 #define _PAGE_PRESENT 0x001U
287 #define _PAGE_RW 0x002U
288 #define _PAGE_USER 0x004U
289 #define _PAGE_PWT 0x008U
290 #define _PAGE_PCD 0x010U
291 #define _PAGE_ACCESSED 0x020U
292 #define _PAGE_DIRTY 0x040U
293 #define _PAGE_PAT 0x080U
294 #define _PAGE_PSE 0x080U
295 #define _PAGE_GLOBAL 0x100U
296 #define _PAGE_AVAIL0 0x200U
297 #define _PAGE_AVAIL1 0x400U
298 #define _PAGE_AVAIL2 0x800U
299 #define _PAGE_AVAIL 0xE00U
300 #define _PAGE_PSE_PAT 0x1000U
302 /*
303 * Debug option: Ensure that granted mappings are not implicitly unmapped.
304 * WARNING: This will need to be disabled to run OSes that use the spare PTE
305 * bits themselves (e.g., *BSD).
306 */
307 #ifdef NDEBUG
308 #undef _PAGE_GNTTAB
309 #endif
310 #ifndef _PAGE_GNTTAB
311 #define _PAGE_GNTTAB 0
312 #endif
314 #define __PAGE_HYPERVISOR \
315 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
316 #define __PAGE_HYPERVISOR_NOCACHE \
317 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
319 #define GRANT_PTE_FLAGS \
320 (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_GNTTAB)
322 #ifndef __ASSEMBLY__
324 static inline int get_order_from_bytes(paddr_t size)
325 {
326 int order;
327 size = (size-1) >> PAGE_SHIFT;
328 for ( order = 0; size; order++ )
329 size >>= 1;
330 return order;
331 }
333 static inline int get_order_from_pages(unsigned long nr_pages)
334 {
335 int order;
336 nr_pages--;
337 for ( order = 0; nr_pages; order++ )
338 nr_pages >>= 1;
339 return order;
340 }
342 /* Allocator functions for Xen pagetables. */
343 void *alloc_xen_pagetable(void);
344 void free_xen_pagetable(void *v);
345 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
346 #ifdef __x86_64__
347 l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
348 #endif
350 /* Map machine page range in Xen virtual address space. */
351 #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages for the mapping */
352 int map_pages_to_xen(
353 unsigned long virt,
354 unsigned long mfn,
355 unsigned long nr_mfns,
356 unsigned int flags);
357 void destroy_xen_mappings(unsigned long v, unsigned long e);
359 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
360 static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
361 {
362 return ((flags >> 5) & 4) | ((flags >> 3) & 3);
363 }
364 static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
365 {
366 return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
367 }
369 #endif /* !__ASSEMBLY__ */
371 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
372 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
374 #endif /* __X86_PAGE_H__ */
376 /*
377 * Local variables:
378 * mode: C
379 * c-set-style: "BSD"
380 * c-basic-offset: 4
381 * tab-width: 4
382 * indent-tabs-mode: nil
383 * End:
384 */