ia64/xen-unstable

view xen/include/asm-x86/page.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 3c44c7dcf482
children
line source
1 #ifndef __X86_PAGE_H__
2 #define __X86_PAGE_H__
4 /*
5 * It is important that the masks are signed quantities. This ensures that
6 * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
7 */
8 #ifndef __ASSEMBLY__
9 #define PAGE_SIZE (1L << PAGE_SHIFT)
10 #else
11 #define PAGE_SIZE (1 << PAGE_SHIFT)
12 #endif
13 #define PAGE_MASK (~(PAGE_SIZE-1))
14 #define PAGE_FLAG_MASK (~0)
16 #ifndef __ASSEMBLY__
17 # include <asm/types.h>
18 # include <xen/lib.h>
19 #endif
21 #if defined(__i386__)
22 # include <asm/x86_32/page.h>
23 #elif defined(__x86_64__)
24 # include <asm/x86_64/page.h>
25 #endif
27 /* Read a pte atomically from memory. */
28 #define l1e_read_atomic(l1ep) \
29 l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
30 #define l2e_read_atomic(l2ep) \
31 l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
32 #define l3e_read_atomic(l3ep) \
33 l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
34 #define l4e_read_atomic(l4ep) \
35 l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
37 /* Write a pte atomically to memory. */
38 #define l1e_write_atomic(l1ep, l1e) \
39 pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
40 #define l2e_write_atomic(l2ep, l2e) \
41 pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
42 #define l3e_write_atomic(l3ep, l3e) \
43 pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
44 #define l4e_write_atomic(l4ep, l4e) \
45 pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
47 /*
48 * Write a pte safely but non-atomically to memory.
49 * The PTE may become temporarily not-present during the update.
50 */
51 #define l1e_write(l1ep, l1e) \
52 pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
53 #define l2e_write(l2ep, l2e) \
54 pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
55 #define l3e_write(l3ep, l3e) \
56 pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
57 #define l4e_write(l4ep, l4e) \
58 pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
60 /* Get direct integer representation of a pte's contents (intpte_t). */
61 #define l1e_get_intpte(x) ((x).l1)
62 #define l2e_get_intpte(x) ((x).l2)
63 #define l3e_get_intpte(x) ((x).l3)
64 #define l4e_get_intpte(x) ((x).l4)
66 /* Get pfn mapped by pte (unsigned long). */
67 #define l1e_get_pfn(x) \
68 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
69 #define l2e_get_pfn(x) \
70 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
71 #define l3e_get_pfn(x) \
72 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
73 #define l4e_get_pfn(x) \
74 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
76 /* Get physical address of page mapped by pte (paddr_t). */
77 #define l1e_get_paddr(x) \
78 ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
79 #define l2e_get_paddr(x) \
80 ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
81 #define l3e_get_paddr(x) \
82 ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
83 #define l4e_get_paddr(x) \
84 ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
86 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
87 #define l1e_get_page(x) (mfn_to_page(l1e_get_pfn(x)))
88 #define l2e_get_page(x) (mfn_to_page(l2e_get_pfn(x)))
89 #define l3e_get_page(x) (mfn_to_page(l3e_get_pfn(x)))
90 #define l4e_get_page(x) (mfn_to_page(l4e_get_pfn(x)))
92 /* Get pte access flags (unsigned int). */
93 #define l1e_get_flags(x) (get_pte_flags((x).l1))
94 #define l2e_get_flags(x) (get_pte_flags((x).l2))
95 #define l3e_get_flags(x) (get_pte_flags((x).l3))
96 #define l4e_get_flags(x) (get_pte_flags((x).l4))
98 /* Construct an empty pte. */
99 #define l1e_empty() ((l1_pgentry_t) { 0 })
100 #define l2e_empty() ((l2_pgentry_t) { 0 })
101 #define l3e_empty() ((l3_pgentry_t) { 0 })
102 #define l4e_empty() ((l4_pgentry_t) { 0 })
104 /* Construct a pte from a pfn and access flags. */
105 #define l1e_from_pfn(pfn, flags) \
106 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
107 #define l2e_from_pfn(pfn, flags) \
108 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
109 #define l3e_from_pfn(pfn, flags) \
110 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
111 #define l4e_from_pfn(pfn, flags) \
112 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
114 /* Construct a pte from a physical address and access flags. */
115 #ifndef __ASSEMBLY__
116 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
117 {
118 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
119 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
120 }
121 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
122 {
123 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
124 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
125 }
126 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
127 {
128 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
129 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
130 }
131 #if CONFIG_PAGING_LEVELS >= 4
132 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
133 {
134 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
135 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
136 }
137 #endif
138 #endif /* !__ASSEMBLY__ */
140 /* Construct a pte from its direct integer representation. */
141 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
142 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
143 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
144 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
146 /* Construct a pte from a page pointer and access flags. */
147 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
148 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
149 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
150 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
152 /* Add extra flags to an existing pte. */
153 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
154 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
155 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
156 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
158 /* Remove flags from an existing pte. */
159 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
160 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
161 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
162 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
164 /* Check if a pte's page mapping or significant access flags have changed. */
165 #define l1e_has_changed(x,y,flags) \
166 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
167 #define l2e_has_changed(x,y,flags) \
168 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
169 #define l3e_has_changed(x,y,flags) \
170 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
171 #define l4e_has_changed(x,y,flags) \
172 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
174 /* Pagetable walking. */
175 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
176 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
177 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
179 /* Given a virtual address, get an entry offset into a page table. */
180 #define l1_table_offset(a) \
181 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
182 #define l2_table_offset(a) \
183 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
184 #define l3_table_offset(a) \
185 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
186 #define l4_table_offset(a) \
187 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
189 /* Convert a pointer to a page-table entry into pagetable slot index. */
190 #define pgentry_ptr_to_slot(_p) \
191 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
193 #ifndef __ASSEMBLY__
195 /* Page-table type. */
196 #if CONFIG_PAGING_LEVELS == 3
197 /* x86_32 PAE */
198 typedef struct { u32 pfn; } pagetable_t;
199 #elif CONFIG_PAGING_LEVELS == 4
200 /* x86_64 */
201 typedef struct { u64 pfn; } pagetable_t;
202 #endif
203 #define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
204 #define pagetable_get_page(x) mfn_to_page((x).pfn)
205 #define pagetable_get_pfn(x) ((x).pfn)
206 #define pagetable_get_mfn(x) _mfn(((x).pfn))
207 #define pagetable_is_null(x) ((x).pfn == 0)
208 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
209 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
210 #define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
211 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
212 #define pagetable_null() pagetable_from_pfn(0)
214 void clear_page_sse2(void *);
215 #define clear_page(_p) (cpu_has_xmm2 ? \
216 clear_page_sse2((void *)(_p)) : \
217 (void)memset((void *)(_p), 0, PAGE_SIZE))
218 void copy_page_sse2(void *, const void *);
219 #define copy_page(_t,_f) (cpu_has_xmm2 ? \
220 copy_page_sse2(_t, _f) : \
221 (void)memcpy(_t, _f, PAGE_SIZE))
223 #define __mfn_valid(mfn) ((mfn) < max_page)
225 /* Convert between Xen-heap virtual addresses and machine addresses. */
226 #define __pa(x) (virt_to_maddr(x))
227 #define __va(x) (maddr_to_virt(x))
229 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
230 #define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
231 #define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
233 /* Convert between machine frame numbers and page-info structures. */
234 #define __mfn_to_page(mfn) (frame_table + (mfn))
235 #define __page_to_mfn(pg) ((unsigned long)((pg) - frame_table))
237 /* Convert between machine addresses and page-info structures. */
238 #define __maddr_to_page(ma) (frame_table + ((ma) >> PAGE_SHIFT))
239 #define __page_to_maddr(pg) ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
241 /* Convert between Xen-heap virtual addresses and page-info structures. */
242 #define __virt_to_page(va) (frame_table + (__pa(va) >> PAGE_SHIFT))
243 #define __page_to_virt(pg) (maddr_to_virt(page_to_maddr(pg)))
245 /* Convert between frame number and address formats. */
246 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
247 #define __paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
249 /*
250 * We define non-underscored wrappers for above conversion functions. These are
251 * overridden in various source files while underscored versions remain intact.
252 */
253 #define mfn_valid(mfn) __mfn_valid(mfn)
254 #define virt_to_mfn(va) __virt_to_mfn(va)
255 #define mfn_to_virt(mfn) __mfn_to_virt(mfn)
256 #define mfn_to_page(mfn) __mfn_to_page(mfn)
257 #define page_to_mfn(pg) __page_to_mfn(pg)
258 #define maddr_to_page(ma) __maddr_to_page(ma)
259 #define page_to_maddr(pg) __page_to_maddr(pg)
260 #define virt_to_page(va) __virt_to_page(va)
261 #define page_to_virt(pg) __page_to_virt(pg)
262 #define pfn_to_paddr(pfn) __pfn_to_paddr(pfn)
263 #define paddr_to_pfn(pa) __paddr_to_pfn(pa)
265 #endif /* !defined(__ASSEMBLY__) */
267 /* High table entries are reserved by the hypervisor. */
268 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
269 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
271 #define DOMAIN_ENTRIES_PER_L4_PAGETABLE \
272 (l4_table_offset(HYPERVISOR_VIRT_START))
273 #define GUEST_ENTRIES_PER_L4_PAGETABLE \
274 (l4_table_offset(HYPERVISOR_VIRT_END))
275 #define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
276 (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
277 + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
279 /* Where to find each level of the linear mapping */
280 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
281 #define __linear_l2_table \
282 ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
283 #define __linear_l3_table \
284 ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
285 #define __linear_l4_table \
286 ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
289 #ifndef __ASSEMBLY__
290 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
291 #if CONFIG_PAGING_LEVELS == 3
292 extern l2_pgentry_t idle_pg_table_l2[
293 ROOT_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES];
294 #elif CONFIG_PAGING_LEVELS == 4
295 extern l2_pgentry_t *compat_idle_pg_table_l2;
296 extern unsigned int m2p_compat_vstart;
297 #endif
298 void paging_init(void);
299 void setup_idle_pagetable(void);
300 #endif /* !defined(__ASSEMBLY__) */
302 #define _PAGE_PRESENT 0x001U
303 #define _PAGE_RW 0x002U
304 #define _PAGE_USER 0x004U
305 #define _PAGE_PWT 0x008U
306 #define _PAGE_PCD 0x010U
307 #define _PAGE_ACCESSED 0x020U
308 #define _PAGE_DIRTY 0x040U
309 #define _PAGE_PAT 0x080U
310 #define _PAGE_PSE 0x080U
311 #define _PAGE_GLOBAL 0x100U
312 #define _PAGE_AVAIL0 0x200U
313 #define _PAGE_AVAIL1 0x400U
314 #define _PAGE_AVAIL2 0x800U
315 #define _PAGE_AVAIL 0xE00U
316 #define _PAGE_PSE_PAT 0x1000U
318 /*
319 * Debug option: Ensure that granted mappings are not implicitly unmapped.
320 * WARNING: This will need to be disabled to run OSes that use the spare PTE
321 * bits themselves (e.g., *BSD).
322 */
323 #ifdef NDEBUG
324 #undef _PAGE_GNTTAB
325 #endif
326 #ifndef _PAGE_GNTTAB
327 #define _PAGE_GNTTAB 0
328 #endif
330 #define __PAGE_HYPERVISOR \
331 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
332 #define __PAGE_HYPERVISOR_NOCACHE \
333 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
335 #define GRANT_PTE_FLAGS \
336 (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_GNTTAB)
338 #ifndef __ASSEMBLY__
340 static inline int get_order_from_bytes(paddr_t size)
341 {
342 int order;
343 size = (size-1) >> PAGE_SHIFT;
344 for ( order = 0; size; order++ )
345 size >>= 1;
346 return order;
347 }
349 static inline int get_order_from_pages(unsigned long nr_pages)
350 {
351 int order;
352 nr_pages--;
353 for ( order = 0; nr_pages; order++ )
354 nr_pages >>= 1;
355 return order;
356 }
358 /* Allocator functions for Xen pagetables. */
359 void *alloc_xen_pagetable(void);
360 void free_xen_pagetable(void *v);
361 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
362 #ifdef __x86_64__
363 l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
364 #endif
366 /* Map machine page range in Xen virtual address space. */
367 #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages for the mapping */
368 int map_pages_to_xen(
369 unsigned long virt,
370 unsigned long mfn,
371 unsigned long nr_mfns,
372 unsigned int flags);
373 void destroy_xen_mappings(unsigned long v, unsigned long e);
375 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
376 static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
377 {
378 return ((flags >> 5) & 4) | ((flags >> 3) & 3);
379 }
380 static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
381 {
382 return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
383 }
385 #endif /* !__ASSEMBLY__ */
387 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
388 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
390 #endif /* __X86_PAGE_H__ */
392 /*
393 * Local variables:
394 * mode: C
395 * c-set-style: "BSD"
396 * c-basic-offset: 4
397 * tab-width: 4
398 * indent-tabs-mode: nil
399 * End:
400 */