ia64/xen-unstable

view xen/include/asm-x86/page.h @ 6707:3bde4219c681

manual merge
author iap10@freefall.cl.cam.ac.uk
date Thu Sep 08 17:40:37 2005 +0000 (2005-09-08)
parents 1f4863861d18 e3fd0fa58364
children aa0990ef260f
line source
2 #ifndef __X86_PAGE_H__
3 #define __X86_PAGE_H__
5 /*
6 * It is important that the masks are signed quantities. This ensures that
7 * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
8 */
9 #ifndef __ASSEMBLY__
10 #define PAGE_SIZE (1L << PAGE_SHIFT)
11 #else
12 #define PAGE_SIZE (1 << PAGE_SHIFT)
13 #endif
14 #define PAGE_MASK (~(PAGE_SIZE-1))
15 #define PAGE_FLAG_MASK (~0)
17 #ifndef __ASSEMBLY__
18 # include <asm/types.h>
19 # include <xen/lib.h>
20 #endif
22 #if defined(__i386__)
23 # include <asm/x86_32/page.h>
24 #elif defined(__x86_64__)
25 # include <asm/x86_64/page.h>
26 #endif
28 /* Get direct integer representation of a pte's contents (intpte_t). */
29 #define l1e_get_intpte(x) ((x).l1)
30 #define l2e_get_intpte(x) ((x).l2)
31 #define l3e_get_intpte(x) ((x).l3)
32 #define l4e_get_intpte(x) ((x).l4)
34 /* Get pfn mapped by pte (unsigned long). */
35 #define l1e_get_pfn(x) \
36 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
37 #define l2e_get_pfn(x) \
38 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
39 #define l3e_get_pfn(x) \
40 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
41 #define l4e_get_pfn(x) \
42 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
44 /* Get physical address of page mapped by pte (physaddr_t). */
45 #define l1e_get_paddr(x) \
46 ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
47 #define l2e_get_paddr(x) \
48 ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
49 #define l3e_get_paddr(x) \
50 ((physaddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
51 #define l4e_get_paddr(x) \
52 ((physaddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
54 /* Get pointer to info structure of page mapped by pte (struct pfn_info *). */
55 #define l1e_get_page(x) (pfn_to_page(l1e_get_pfn(x)))
56 #define l2e_get_page(x) (pfn_to_page(l2e_get_pfn(x)))
57 #define l3e_get_page(x) (pfn_to_page(l3e_get_pfn(x)))
58 #define l4e_get_page(x) (pfn_to_page(l4e_get_pfn(x)))
60 /* Get pte access flags (unsigned int). */
61 #define l1e_get_flags(x) (get_pte_flags((x).l1))
62 #define l2e_get_flags(x) (get_pte_flags((x).l2))
63 #define l3e_get_flags(x) (get_pte_flags((x).l3))
64 #define l4e_get_flags(x) (get_pte_flags((x).l4))
66 /* Construct an empty pte. */
67 #define l1e_empty() ((l1_pgentry_t) { 0 })
68 #define l2e_empty() ((l2_pgentry_t) { 0 })
69 #define l3e_empty() ((l3_pgentry_t) { 0 })
70 #define l4e_empty() ((l4_pgentry_t) { 0 })
72 /* Construct a pte from a pfn and access flags. */
73 #define l1e_from_pfn(pfn, flags) \
74 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
75 #define l2e_from_pfn(pfn, flags) \
76 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
77 #define l3e_from_pfn(pfn, flags) \
78 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
79 #define l4e_from_pfn(pfn, flags) \
80 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
82 /* Construct a pte from a physical address and access flags. */
83 #ifndef __ASSEMBLY__
84 static inline l1_pgentry_t l1e_from_paddr(physaddr_t pa, unsigned int flags)
85 {
86 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
87 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
88 }
89 static inline l2_pgentry_t l2e_from_paddr(physaddr_t pa, unsigned int flags)
90 {
91 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
92 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
93 }
94 #if CONFIG_PAGING_LEVELS >= 3
95 static inline l3_pgentry_t l3e_from_paddr(physaddr_t pa, unsigned int flags)
96 {
97 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
98 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
99 }
100 #endif
101 #if CONFIG_PAGING_LEVELS >= 4
102 static inline l4_pgentry_t l4e_from_paddr(physaddr_t pa, unsigned int flags)
103 {
104 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
105 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
106 }
107 #endif
108 #endif /* !__ASSEMBLY__ */
110 /* Construct a pte from its direct integer representation. */
111 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
112 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
113 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
114 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
116 /* Construct a pte from a page pointer and access flags. */
117 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_pfn(page),(flags)))
118 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_pfn(page),(flags)))
119 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_pfn(page),(flags)))
120 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_pfn(page),(flags)))
122 /* Add extra flags to an existing pte. */
123 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
124 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
125 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
126 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
128 /* Remove flags from an existing pte. */
129 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
130 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
131 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
132 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
134 /* Check if a pte's page mapping or significant access flags have changed. */
135 #define l1e_has_changed(x,y,flags) \
136 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
137 #define l2e_has_changed(x,y,flags) \
138 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
139 #define l3e_has_changed(x,y,flags) \
140 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
141 #define l4e_has_changed(x,y,flags) \
142 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
144 /* Pagetable walking. */
145 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
146 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
147 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
149 /* Given a virtual address, get an entry offset into a page table. */
150 #define l1_table_offset(a) \
151 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
152 #define l2_table_offset(a) \
153 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
154 #define l3_table_offset(a) \
155 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
156 #define l4_table_offset(a) \
157 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
159 /* Convert a pointer to a page-table entry into pagetable slot index. */
160 #define pgentry_ptr_to_slot(_p) \
161 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
163 /* Page-table type. */
164 #ifndef __ASSEMBLY__
165 #if CONFIG_PAGING_LEVELS == 2
166 /* x86_32 default */
167 typedef struct { u32 pfn; } pagetable_t;
168 #elif CONFIG_PAGING_LEVELS == 3
169 /* x86_32 PAE */
170 typedef struct { u32 pfn; } pagetable_t;
171 #elif CONFIG_PAGING_LEVELS == 4
172 /* x86_64 */
173 typedef struct { u64 pfn; } pagetable_t;
174 #endif
175 #define pagetable_get_paddr(x) ((physaddr_t)(x).pfn << PAGE_SHIFT)
176 #define pagetable_get_pfn(x) ((x).pfn)
177 #define mk_pagetable(pa) \
178 ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
179 #endif
181 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
182 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
184 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
185 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
186 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
187 #define pfn_to_page(_pfn) (frame_table + (_pfn))
188 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
189 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
190 #define pfn_valid(_pfn) ((_pfn) < max_page)
192 #define pfn_to_phys(pfn) ((physaddr_t)(pfn) << PAGE_SHIFT)
193 #define phys_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
195 /* High table entries are reserved by the hypervisor. */
196 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
197 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
198 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
199 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
200 (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
201 #else
202 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
203 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
205 #define DOMAIN_ENTRIES_PER_L4_PAGETABLE \
206 (l4_table_offset(HYPERVISOR_VIRT_START))
207 #define GUEST_ENTRIES_PER_L4_PAGETABLE \
208 (l4_table_offset(HYPERVISOR_VIRT_END))
209 #define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
210 (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
211 + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
212 #endif
214 #define LINEAR_PT_OFFSET (LINEAR_PT_VIRT_START & VADDR_MASK)
215 #define linear_l1_table \
216 ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
217 #define __linear_l2_table \
218 ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \
219 (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<0))))
220 #define __linear_l3_table \
221 ((l3_pgentry_t *)(LINEAR_PT_VIRT_START + \
222 (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<0)) + \
223 (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<1))))
224 #define __linear_l4_table \
225 ((l4_pgentry_t *)(LINEAR_PT_VIRT_START + \
226 (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<0)) + \
227 (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<1)) + \
228 (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<2))))
230 #define linear_pg_table linear_l1_table
231 #define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
232 #define linear_l3_table(_ed) ((_ed)->arch.guest_vl3table)
233 #define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
235 #define va_to_l1mfn(_ed, _va) \
236 (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
238 #ifndef __ASSEMBLY__
239 #if CONFIG_PAGING_LEVELS == 3
240 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
241 extern l3_pgentry_t idle_pg_table_l3[ROOT_PAGETABLE_ENTRIES];
242 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES*L2_PAGETABLE_ENTRIES];
243 #else
244 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
245 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES];
246 #endif
247 extern void paging_init(void);
248 #endif
250 #define __pge_off() \
251 do { \
252 __asm__ __volatile__( \
253 "mov %0, %%cr4; # turn off PGE " \
254 : : "r" (mmu_cr4_features & ~X86_CR4_PGE) ); \
255 } while ( 0 )
257 #define __pge_on() \
258 do { \
259 __asm__ __volatile__( \
260 "mov %0, %%cr4; # turn off PGE " \
261 : : "r" (mmu_cr4_features) ); \
262 } while ( 0 )
264 #define _PAGE_PRESENT 0x001U
265 #define _PAGE_RW 0x002U
266 #define _PAGE_USER 0x004U
267 #define _PAGE_PWT 0x008U
268 #define _PAGE_PCD 0x010U
269 #define _PAGE_ACCESSED 0x020U
270 #define _PAGE_DIRTY 0x040U
271 #define _PAGE_PAT 0x080U
272 #define _PAGE_PSE 0x080U
273 #define _PAGE_GLOBAL 0x100U
274 #define _PAGE_AVAIL 0xE00U
276 #define __PAGE_HYPERVISOR \
277 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
278 #define __PAGE_HYPERVISOR_NOCACHE \
279 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
281 #ifndef __ASSEMBLY__
283 static inline int get_order_from_bytes(physaddr_t size)
284 {
285 int order;
286 size = (size-1) >> PAGE_SHIFT;
287 for ( order = 0; size; order++ )
288 size >>= 1;
289 return order;
290 }
292 static inline int get_order_from_pages(unsigned long nr_pages)
293 {
294 int order;
295 nr_pages--;
296 for ( order = 0; nr_pages; order++ )
297 nr_pages >>= 1;
298 return order;
299 }
301 /* Allocator functions for Xen pagetables. */
302 struct pfn_info *alloc_xen_pagetable(void);
303 void free_xen_pagetable(struct pfn_info *pg);
304 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
306 /* Map physical page range in Xen virtual address space. */
307 #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
308 int
309 map_pages_to_xen(
310 unsigned long virt,
311 unsigned long pfn,
312 unsigned long nr_pfns,
313 unsigned long flags);
315 #endif /* !__ASSEMBLY__ */
317 #endif /* __I386_PAGE_H__ */
319 /*
320 * Local variables:
321 * mode: C
322 * c-set-style: "BSD"
323 * c-basic-offset: 4
324 * tab-width: 4
325 * indent-tabs-mode: nil
326 * End:
327 */