ia64/xen-unstable

view xen/include/asm-x86/page.h @ 13915:a00b8d3800a8

[XEN] Snapshot PAE l3es when they are shadowed.
We don't update the shadows so we mustn't look at the guest l3es
or we'll be confused by them if they change.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Feb 14 14:46:18 2007 +0000 (2007-02-14)
parents 6daa91dc9247
children 9e5e94942045
line source
2 #ifndef __X86_PAGE_H__
3 #define __X86_PAGE_H__
5 /*
6 * It is important that the masks are signed quantities. This ensures that
7 * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
8 */
9 #ifndef __ASSEMBLY__
10 #define PAGE_SIZE (1L << PAGE_SHIFT)
11 #else
12 #define PAGE_SIZE (1 << PAGE_SHIFT)
13 #endif
14 #define PAGE_MASK (~(PAGE_SIZE-1))
15 #define PAGE_FLAG_MASK (~0)
17 #ifndef __ASSEMBLY__
18 # include <asm/types.h>
19 # include <xen/lib.h>
20 #endif
22 #if defined(__i386__)
23 # include <asm/x86_32/page.h>
24 #elif defined(__x86_64__)
25 # include <asm/x86_64/page.h>
26 #endif
28 /* Read a pte atomically from memory. */
29 #define l1e_read_atomic(l1ep) \
30 l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
31 #define l2e_read_atomic(l2ep) \
32 l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
33 #define l3e_read_atomic(l3ep) \
34 l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
35 #define l4e_read_atomic(l4ep) \
36 l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
38 /* Write a pte atomically to memory. */
39 #define l1e_write_atomic(l1ep, l1e) \
40 pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
41 #define l2e_write_atomic(l2ep, l2e) \
42 pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
43 #define l3e_write_atomic(l3ep, l3e) \
44 pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
45 #define l4e_write_atomic(l4ep, l4e) \
46 pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
48 /*
49 * Write a pte safely but non-atomically to memory.
50 * The PTE may become temporarily not-present during the update.
51 */
52 #define l1e_write(l1ep, l1e) \
53 pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
54 #define l2e_write(l2ep, l2e) \
55 pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
56 #define l3e_write(l3ep, l3e) \
57 pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
58 #define l4e_write(l4ep, l4e) \
59 pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
61 /* Get direct integer representation of a pte's contents (intpte_t). */
62 #define l1e_get_intpte(x) ((x).l1)
63 #define l2e_get_intpte(x) ((x).l2)
64 #define l3e_get_intpte(x) ((x).l3)
65 #define l4e_get_intpte(x) ((x).l4)
67 /* Get pfn mapped by pte (unsigned long). */
68 #define l1e_get_pfn(x) \
69 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
70 #define l2e_get_pfn(x) \
71 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
72 #define l3e_get_pfn(x) \
73 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
74 #define l4e_get_pfn(x) \
75 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
77 /* Get physical address of page mapped by pte (paddr_t). */
78 #define l1e_get_paddr(x) \
79 ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
80 #define l2e_get_paddr(x) \
81 ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
82 #define l3e_get_paddr(x) \
83 ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
84 #define l4e_get_paddr(x) \
85 ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
87 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
88 #define l1e_get_page(x) (mfn_to_page(l1e_get_pfn(x)))
89 #define l2e_get_page(x) (mfn_to_page(l2e_get_pfn(x)))
90 #define l3e_get_page(x) (mfn_to_page(l3e_get_pfn(x)))
91 #define l4e_get_page(x) (mfn_to_page(l4e_get_pfn(x)))
93 /* Get pte access flags (unsigned int). */
94 #define l1e_get_flags(x) (get_pte_flags((x).l1))
95 #define l2e_get_flags(x) (get_pte_flags((x).l2))
96 #define l3e_get_flags(x) (get_pte_flags((x).l3))
97 #define l4e_get_flags(x) (get_pte_flags((x).l4))
99 /* Construct an empty pte. */
100 #define l1e_empty() ((l1_pgentry_t) { 0 })
101 #define l2e_empty() ((l2_pgentry_t) { 0 })
102 #define l3e_empty() ((l3_pgentry_t) { 0 })
103 #define l4e_empty() ((l4_pgentry_t) { 0 })
105 /* Construct a pte from a pfn and access flags. */
106 #define l1e_from_pfn(pfn, flags) \
107 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
108 #define l2e_from_pfn(pfn, flags) \
109 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
110 #define l3e_from_pfn(pfn, flags) \
111 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
112 #define l4e_from_pfn(pfn, flags) \
113 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
115 /* Construct a pte from a physical address and access flags. */
116 #ifndef __ASSEMBLY__
117 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
118 {
119 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
120 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
121 }
122 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
123 {
124 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
125 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
126 }
127 #if CONFIG_PAGING_LEVELS >= 3
128 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
129 {
130 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
131 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
132 }
133 #endif
134 #if CONFIG_PAGING_LEVELS >= 4
135 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
136 {
137 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
138 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
139 }
140 #endif
141 #endif /* !__ASSEMBLY__ */
143 /* Construct a pte from its direct integer representation. */
144 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
145 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
146 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
147 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
149 /* Construct a pte from a page pointer and access flags. */
150 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
151 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
152 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
153 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
155 /* Add extra flags to an existing pte. */
156 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
157 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
158 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
159 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
161 /* Remove flags from an existing pte. */
162 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
163 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
164 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
165 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
167 /* Check if a pte's page mapping or significant access flags have changed. */
168 #define l1e_has_changed(x,y,flags) \
169 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
170 #define l2e_has_changed(x,y,flags) \
171 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
172 #define l3e_has_changed(x,y,flags) \
173 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
174 #define l4e_has_changed(x,y,flags) \
175 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
177 /* Pagetable walking. */
178 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
179 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
180 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
182 /* Given a virtual address, get an entry offset into a page table. */
183 #define l1_table_offset(a) \
184 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
185 #define l2_table_offset(a) \
186 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
187 #define l3_table_offset(a) \
188 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
189 #define l4_table_offset(a) \
190 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
192 /* Convert a pointer to a page-table entry into pagetable slot index. */
193 #define pgentry_ptr_to_slot(_p) \
194 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
196 /* Page-table type. */
197 #ifndef __ASSEMBLY__
198 #if CONFIG_PAGING_LEVELS == 2
199 /* x86_32 default */
200 typedef struct { u32 pfn; } pagetable_t;
201 #elif CONFIG_PAGING_LEVELS == 3
202 /* x86_32 PAE */
203 typedef struct { u32 pfn; } pagetable_t;
204 #elif CONFIG_PAGING_LEVELS == 4
205 /* x86_64 */
206 typedef struct { u64 pfn; } pagetable_t;
207 #endif
208 #define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
209 #define pagetable_get_page(x) mfn_to_page((x).pfn)
210 #define pagetable_get_pfn(x) ((x).pfn)
211 #define pagetable_get_mfn(x) _mfn(((x).pfn))
212 #define pagetable_is_null(x) ((x).pfn == 0)
213 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
214 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
215 #define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
216 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
217 #define pagetable_null() pagetable_from_pfn(0)
218 #endif
220 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
221 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
223 #define mfn_valid(mfn) ((mfn) < max_page)
225 /* Convert between Xen-heap virtual addresses and machine addresses. */
226 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
227 #define virt_to_maddr(va) ((unsigned long)(va)-PAGE_OFFSET)
228 #define maddr_to_virt(ma) ((void *)((unsigned long)(ma)+PAGE_OFFSET))
229 /* Shorthand versions of the above functions. */
230 #define __pa(x) (virt_to_maddr(x))
231 #define __va(x) (maddr_to_virt(x))
233 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
234 #define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
235 #define mfn_to_virt(mfn) (maddr_to_virt(mfn << PAGE_SHIFT))
237 /* Convert between machine frame numbers and page-info structures. */
238 #define mfn_to_page(mfn) (frame_table + (mfn))
239 #define page_to_mfn(pg) ((unsigned long)((pg) - frame_table))
241 /* Convert between machine addresses and page-info structures. */
242 #define maddr_to_page(ma) (frame_table + ((ma) >> PAGE_SHIFT))
243 #define page_to_maddr(pg) ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
245 /* Convert between Xen-heap virtual addresses and page-info structures. */
246 #define virt_to_page(va) (frame_table + (__pa(va) >> PAGE_SHIFT))
247 #define page_to_virt(pg) (maddr_to_virt(page_to_maddr(pg)))
249 /* Convert between frame number and address formats. */
250 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
251 #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
253 /* High table entries are reserved by the hypervisor. */
254 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
255 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
256 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
257 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
258 (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
259 #else
260 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
261 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
263 #define DOMAIN_ENTRIES_PER_L4_PAGETABLE \
264 (l4_table_offset(HYPERVISOR_VIRT_START))
265 #define GUEST_ENTRIES_PER_L4_PAGETABLE \
266 (l4_table_offset(HYPERVISOR_VIRT_END))
267 #define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
268 (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
269 + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
270 #endif
272 /* Where to find each level of the linear mapping */
273 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
274 #define __linear_l2_table \
275 ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
276 #define __linear_l3_table \
277 ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
278 #define __linear_l4_table \
279 ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
282 #ifndef __ASSEMBLY__
283 #if CONFIG_PAGING_LEVELS == 3
284 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
285 extern l3_pgentry_t idle_pg_table_l3[ROOT_PAGETABLE_ENTRIES];
286 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES*L2_PAGETABLE_ENTRIES];
287 #else
288 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
289 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES];
290 #ifdef CONFIG_COMPAT
291 extern l2_pgentry_t *compat_idle_pg_table_l2;
292 extern unsigned int m2p_compat_vstart;
293 #endif
294 #endif
295 void paging_init(void);
296 void setup_idle_pagetable(void);
297 #endif
299 #define __pge_off() \
300 do { \
301 __asm__ __volatile__( \
302 "mov %0, %%cr4; # turn off PGE " \
303 : : "r" (mmu_cr4_features & ~X86_CR4_PGE) ); \
304 } while ( 0 )
306 #define __pge_on() \
307 do { \
308 __asm__ __volatile__( \
309 "mov %0, %%cr4; # turn off PGE " \
310 : : "r" (mmu_cr4_features) ); \
311 } while ( 0 )
313 #define _PAGE_PRESENT 0x001U
314 #define _PAGE_RW 0x002U
315 #define _PAGE_USER 0x004U
316 #define _PAGE_PWT 0x008U
317 #define _PAGE_PCD 0x010U
318 #define _PAGE_ACCESSED 0x020U
319 #define _PAGE_DIRTY 0x040U
320 #define _PAGE_PAT 0x080U
321 #define _PAGE_PSE 0x080U
322 #define _PAGE_GLOBAL 0x100U
323 #define _PAGE_AVAIL0 0x200U
324 #define _PAGE_AVAIL1 0x400U
325 #define _PAGE_AVAIL2 0x800U
326 #define _PAGE_AVAIL 0xE00U
327 #define _PAGE_PSE_PAT 0x1000U
329 /*
330 * Debug option: Ensure that granted mappings are not implicitly unmapped.
331 * WARNING: This will need to be disabled to run OSes that use the spare PTE
332 * bits themselves (e.g., *BSD).
333 */
334 #ifndef NDEBUG
335 #define _PAGE_GNTTAB _PAGE_AVAIL2
336 #else
337 #define _PAGE_GNTTAB 0
338 #endif
340 #define __PAGE_HYPERVISOR \
341 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
342 #define __PAGE_HYPERVISOR_NOCACHE \
343 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
345 #ifndef __ASSEMBLY__
347 static inline int get_order_from_bytes(paddr_t size)
348 {
349 int order;
350 size = (size-1) >> PAGE_SHIFT;
351 for ( order = 0; size; order++ )
352 size >>= 1;
353 return order;
354 }
356 static inline int get_order_from_pages(unsigned long nr_pages)
357 {
358 int order;
359 nr_pages--;
360 for ( order = 0; nr_pages; order++ )
361 nr_pages >>= 1;
362 return order;
363 }
365 /* Allocator functions for Xen pagetables. */
366 struct page_info *alloc_xen_pagetable(void);
367 void free_xen_pagetable(struct page_info *pg);
368 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
370 /* Map machine page range in Xen virtual address space. */
371 #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
372 int
373 map_pages_to_xen(
374 unsigned long virt,
375 unsigned long mfn,
376 unsigned long nr_mfns,
377 unsigned long flags);
379 #endif /* !__ASSEMBLY__ */
381 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
382 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
384 #endif /* __X86_PAGE_H__ */
386 /*
387 * Local variables:
388 * mode: C
389 * c-set-style: "BSD"
390 * c-basic-offset: 4
391 * tab-width: 4
392 * indent-tabs-mode: nil
393 * End:
394 */