ia64/xen-unstable

view xen/include/asm-x86/page.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents e704430b5b32
children ca2984b17fcf
line source
1 #ifndef __X86_PAGE_H__
2 #define __X86_PAGE_H__
4 /*
5 * It is important that the masks are signed quantities. This ensures that
6 * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
7 */
8 #ifndef __ASSEMBLY__
9 #define PAGE_SIZE (1L << PAGE_SHIFT)
10 #else
11 #define PAGE_SIZE (1 << PAGE_SHIFT)
12 #endif
13 #define PAGE_MASK (~(PAGE_SIZE-1))
14 #define PAGE_FLAG_MASK (~0)
16 #ifndef __ASSEMBLY__
17 # include <asm/types.h>
18 # include <xen/lib.h>
19 #endif
21 #if defined(__i386__)
22 # include <asm/x86_32/page.h>
23 #elif defined(__x86_64__)
24 # include <asm/x86_64/page.h>
25 #endif
27 /* Read a pte atomically from memory. */
28 #define l1e_read_atomic(l1ep) \
29 l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
30 #define l2e_read_atomic(l2ep) \
31 l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
32 #define l3e_read_atomic(l3ep) \
33 l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
34 #define l4e_read_atomic(l4ep) \
35 l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
37 /* Write a pte atomically to memory. */
38 #define l1e_write_atomic(l1ep, l1e) \
39 pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
40 #define l2e_write_atomic(l2ep, l2e) \
41 pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
42 #define l3e_write_atomic(l3ep, l3e) \
43 pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
44 #define l4e_write_atomic(l4ep, l4e) \
45 pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
47 /*
48 * Write a pte safely but non-atomically to memory.
49 * The PTE may become temporarily not-present during the update.
50 */
51 #define l1e_write(l1ep, l1e) \
52 pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
53 #define l2e_write(l2ep, l2e) \
54 pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
55 #define l3e_write(l3ep, l3e) \
56 pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
57 #define l4e_write(l4ep, l4e) \
58 pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
60 /* Get direct integer representation of a pte's contents (intpte_t). */
61 #define l1e_get_intpte(x) ((x).l1)
62 #define l2e_get_intpte(x) ((x).l2)
63 #define l3e_get_intpte(x) ((x).l3)
64 #define l4e_get_intpte(x) ((x).l4)
66 /* Get pfn mapped by pte (unsigned long). */
67 #define l1e_get_pfn(x) \
68 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
69 #define l2e_get_pfn(x) \
70 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
71 #define l3e_get_pfn(x) \
72 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
73 #define l4e_get_pfn(x) \
74 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
76 /* Get physical address of page mapped by pte (paddr_t). */
77 #define l1e_get_paddr(x) \
78 ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
79 #define l2e_get_paddr(x) \
80 ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
81 #define l3e_get_paddr(x) \
82 ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
83 #define l4e_get_paddr(x) \
84 ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
86 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
87 #define l1e_get_page(x) (mfn_to_page(l1e_get_pfn(x)))
88 #define l2e_get_page(x) (mfn_to_page(l2e_get_pfn(x)))
89 #define l3e_get_page(x) (mfn_to_page(l3e_get_pfn(x)))
90 #define l4e_get_page(x) (mfn_to_page(l4e_get_pfn(x)))
92 /* Get pte access flags (unsigned int). */
93 #define l1e_get_flags(x) (get_pte_flags((x).l1))
94 #define l2e_get_flags(x) (get_pte_flags((x).l2))
95 #define l3e_get_flags(x) (get_pte_flags((x).l3))
96 #define l4e_get_flags(x) (get_pte_flags((x).l4))
98 /* Construct an empty pte. */
99 #define l1e_empty() ((l1_pgentry_t) { 0 })
100 #define l2e_empty() ((l2_pgentry_t) { 0 })
101 #define l3e_empty() ((l3_pgentry_t) { 0 })
102 #define l4e_empty() ((l4_pgentry_t) { 0 })
104 /* Construct a pte from a pfn and access flags. */
105 #define l1e_from_pfn(pfn, flags) \
106 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
107 #define l2e_from_pfn(pfn, flags) \
108 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
109 #define l3e_from_pfn(pfn, flags) \
110 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
111 #define l4e_from_pfn(pfn, flags) \
112 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
114 /* Construct a pte from a physical address and access flags. */
115 #ifndef __ASSEMBLY__
116 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
117 {
118 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
119 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
120 }
121 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
122 {
123 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
124 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
125 }
126 #if CONFIG_PAGING_LEVELS >= 3
127 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
128 {
129 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
130 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
131 }
132 #endif
133 #if CONFIG_PAGING_LEVELS >= 4
134 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
135 {
136 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
137 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
138 }
139 #endif
140 #endif /* !__ASSEMBLY__ */
142 /* Construct a pte from its direct integer representation. */
143 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
144 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
145 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
146 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
148 /* Construct a pte from a page pointer and access flags. */
149 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
150 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
151 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
152 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
154 /* Add extra flags to an existing pte. */
155 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
156 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
157 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
158 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
160 /* Remove flags from an existing pte. */
161 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
162 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
163 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
164 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
166 /* Check if a pte's page mapping or significant access flags have changed. */
167 #define l1e_has_changed(x,y,flags) \
168 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
169 #define l2e_has_changed(x,y,flags) \
170 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
171 #define l3e_has_changed(x,y,flags) \
172 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
173 #define l4e_has_changed(x,y,flags) \
174 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
176 /* Pagetable walking. */
177 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
178 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
179 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
181 /* Given a virtual address, get an entry offset into a page table. */
182 #define l1_table_offset(a) \
183 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
184 #define l2_table_offset(a) \
185 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
186 #define l3_table_offset(a) \
187 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
188 #define l4_table_offset(a) \
189 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
191 /* Convert a pointer to a page-table entry into pagetable slot index. */
192 #define pgentry_ptr_to_slot(_p) \
193 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
195 #ifndef __ASSEMBLY__
197 /* Page-table type. */
198 #if CONFIG_PAGING_LEVELS == 2
199 /* x86_32 default */
200 typedef struct { u32 pfn; } pagetable_t;
201 #elif CONFIG_PAGING_LEVELS == 3
202 /* x86_32 PAE */
203 typedef struct { u32 pfn; } pagetable_t;
204 #elif CONFIG_PAGING_LEVELS == 4
205 /* x86_64 */
206 typedef struct { u64 pfn; } pagetable_t;
207 #endif
208 #define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
209 #define pagetable_get_page(x) mfn_to_page((x).pfn)
210 #define pagetable_get_pfn(x) ((x).pfn)
211 #define pagetable_get_mfn(x) _mfn(((x).pfn))
212 #define pagetable_is_null(x) ((x).pfn == 0)
213 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
214 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
215 #define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
216 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
217 #define pagetable_null() pagetable_from_pfn(0)
219 void clear_page_sse2(void *);
220 #define clear_page(_p) (cpu_has_xmm2 ? \
221 clear_page_sse2((void *)(_p)) : \
222 (void)memset((void *)(_p), 0, PAGE_SIZE))
223 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
225 #define mfn_valid(mfn) ((mfn) < max_page)
227 /* Convert between Xen-heap virtual addresses and machine addresses. */
228 #define __pa(x) (virt_to_maddr(x))
229 #define __va(x) (maddr_to_virt(x))
231 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
232 #define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
233 #define mfn_to_virt(mfn) (maddr_to_virt(mfn << PAGE_SHIFT))
235 /* Convert between machine frame numbers and page-info structures. */
236 #define mfn_to_page(mfn) (frame_table + (mfn))
237 #define page_to_mfn(pg) ((unsigned long)((pg) - frame_table))
239 /* Convert between machine addresses and page-info structures. */
240 #define maddr_to_page(ma) (frame_table + ((ma) >> PAGE_SHIFT))
241 #define page_to_maddr(pg) ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
243 /* Convert between Xen-heap virtual addresses and page-info structures. */
244 #define virt_to_page(va) (frame_table + (__pa(va) >> PAGE_SHIFT))
245 #define page_to_virt(pg) (maddr_to_virt(page_to_maddr(pg)))
247 /* Convert between frame number and address formats. */
248 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
249 #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
251 #endif /* !defined(__ASSEMBLY__) */
253 /* High table entries are reserved by the hypervisor. */
254 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
255 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
256 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
257 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
258 (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
259 #else
260 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
261 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
263 #define DOMAIN_ENTRIES_PER_L4_PAGETABLE \
264 (l4_table_offset(HYPERVISOR_VIRT_START))
265 #define GUEST_ENTRIES_PER_L4_PAGETABLE \
266 (l4_table_offset(HYPERVISOR_VIRT_END))
267 #define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
268 (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
269 + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
270 #endif
272 /* Where to find each level of the linear mapping */
273 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
274 #define __linear_l2_table \
275 ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
276 #define __linear_l3_table \
277 ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
278 #define __linear_l4_table \
279 ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
282 #ifndef __ASSEMBLY__
283 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
284 #if CONFIG_PAGING_LEVELS == 3
285 extern l2_pgentry_t idle_pg_table_l2[
286 ROOT_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES];
287 #elif CONFIG_PAGING_LEVELS == 2
288 #define idle_pg_table_l2 idle_pg_table
289 #elif CONFIG_PAGING_LEVELS == 4
290 extern l2_pgentry_t *compat_idle_pg_table_l2;
291 extern unsigned int m2p_compat_vstart;
292 #endif
293 void paging_init(void);
294 void setup_idle_pagetable(void);
295 #endif /* !defined(__ASSEMBLY__) */
297 #define __pge_off() write_cr4(mmu_cr4_features & ~X86_CR4_PGE)
298 #define __pge_on() write_cr4(mmu_cr4_features)
300 #define _PAGE_PRESENT 0x001U
301 #define _PAGE_RW 0x002U
302 #define _PAGE_USER 0x004U
303 #define _PAGE_PWT 0x008U
304 #define _PAGE_PCD 0x010U
305 #define _PAGE_ACCESSED 0x020U
306 #define _PAGE_DIRTY 0x040U
307 #define _PAGE_PAT 0x080U
308 #define _PAGE_PSE 0x080U
309 #define _PAGE_GLOBAL 0x100U
310 #define _PAGE_AVAIL0 0x200U
311 #define _PAGE_AVAIL1 0x400U
312 #define _PAGE_AVAIL2 0x800U
313 #define _PAGE_AVAIL 0xE00U
314 #define _PAGE_PSE_PAT 0x1000U
316 /*
317 * Debug option: Ensure that granted mappings are not implicitly unmapped.
318 * WARNING: This will need to be disabled to run OSes that use the spare PTE
319 * bits themselves (e.g., *BSD).
320 */
321 #ifndef NDEBUG
322 #define _PAGE_GNTTAB _PAGE_AVAIL2
323 #else
324 #define _PAGE_GNTTAB 0
325 #endif
327 #define __PAGE_HYPERVISOR \
328 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
329 #define __PAGE_HYPERVISOR_NOCACHE \
330 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
332 #ifndef __ASSEMBLY__
334 static inline int get_order_from_bytes(paddr_t size)
335 {
336 int order;
337 size = (size-1) >> PAGE_SHIFT;
338 for ( order = 0; size; order++ )
339 size >>= 1;
340 return order;
341 }
343 static inline int get_order_from_pages(unsigned long nr_pages)
344 {
345 int order;
346 nr_pages--;
347 for ( order = 0; nr_pages; order++ )
348 nr_pages >>= 1;
349 return order;
350 }
352 /* Allocator functions for Xen pagetables. */
353 void *alloc_xen_pagetable(void);
354 void free_xen_pagetable(void *v);
355 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
357 /* Map machine page range in Xen virtual address space. */
358 #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
359 int
360 map_pages_to_xen(
361 unsigned long virt,
362 unsigned long mfn,
363 unsigned long nr_mfns,
364 unsigned long flags);
365 void destroy_xen_mappings(unsigned long v, unsigned long e);
367 #endif /* !__ASSEMBLY__ */
369 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
370 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
372 #endif /* __X86_PAGE_H__ */
374 /*
375 * Local variables:
376 * mode: C
377 * c-set-style: "BSD"
378 * c-basic-offset: 4
379 * tab-width: 4
380 * indent-tabs-mode: nil
381 * End:
382 */