ia64/xen-unstable

view xen/include/asm-x86/page.h @ 5282:209e087e9ba2

bitkeeper revision 1.1646 (429f647ejz5YEoIPEm_Y2iLwylO84A)

page.h:
Keep it simple.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Thu Jun 02 19:56:46 2005 +0000 (2005-06-02)
parents ac04979dce3a
children b3907ddefba4
line source
2 #ifndef __X86_PAGE_H__
3 #define __X86_PAGE_H__
5 #ifndef __ASSEMBLY__
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
7 #else
8 #define PAGE_SIZE (1 << PAGE_SHIFT)
9 #endif
10 #define PAGE_MASK (~(PAGE_SIZE-1))
11 #define PAGE_FLAG_MASK (~0U)
13 #ifndef __ASSEMBLY__
14 # include <asm/types.h>
15 #endif
17 #if defined(__i386__)
18 # include <asm/x86_32/page.h>
19 #elif defined(__x86_64__)
20 # include <asm/x86_64/page.h>
21 #endif
23 /* Get direct integer representation of a pte's contents (intpte_t). */
24 #define l1e_get_intpte(x) ((x).l1)
25 #define l2e_get_intpte(x) ((x).l2)
26 #define l3e_get_intpte(x) ((x).l3)
27 #define l4e_get_intpte(x) ((x).l4)
29 /* Get pfn mapped by pte (unsigned long). */
30 #define l1e_get_pfn(x) \
31 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
32 #define l2e_get_pfn(x) \
33 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
34 #define l3e_get_pfn(x) \
35 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
36 #define l4e_get_pfn(x) \
37 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
39 /* Get physical address of page mapped by pte (physaddr_t). */
40 #define l1e_get_paddr(x) \
41 ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
42 #define l2e_get_paddr(x) \
43 ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
44 #define l3e_get_paddr(x) \
45 ((physaddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
46 #define l4e_get_paddr(x) \
47 ((physaddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
49 /* Get pointer to info structure of page mapped by pte (struct pfn_info *). */
50 #define l1e_get_page(x) (pfn_to_page(l1e_get_pfn(x)))
51 #define l2e_get_page(x) (pfn_to_page(l2e_get_pfn(x)))
52 #define l3e_get_page(x) (pfn_to_page(l3e_get_pfn(x)))
53 #define l4e_get_page(x) (pfn_to_page(l4e_get_pfn(x)))
55 /* Get pte access flags (unsigned int). */
56 #define l1e_get_flags(x) (get_pte_flags((x).l1))
57 #define l2e_get_flags(x) (get_pte_flags((x).l2))
58 #define l3e_get_flags(x) (get_pte_flags((x).l3))
59 #define l4e_get_flags(x) (get_pte_flags((x).l4))
61 /* Construct an empty pte. */
62 #define l1e_empty() ((l1_pgentry_t) { 0 })
63 #define l2e_empty() ((l2_pgentry_t) { 0 })
64 #define l3e_empty() ((l3_pgentry_t) { 0 })
65 #define l4e_empty() ((l4_pgentry_t) { 0 })
67 /* Construct a pte from a pfn and access flags. */
68 #define l1e_from_pfn(pfn, flags) \
69 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
70 #define l2e_from_pfn(pfn, flags) \
71 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
72 #define l3e_from_pfn(pfn, flags) \
73 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
74 #define l4e_from_pfn(pfn, flags) \
75 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
77 /* Construct a pte from a physical address and access flags. */
78 #ifndef __ASSEMBLY__
79 static inline l1_pgentry_t l1e_from_paddr(physaddr_t pa, unsigned int flags)
80 {
81 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
82 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
83 }
84 static inline l2_pgentry_t l2e_from_paddr(physaddr_t pa, unsigned int flags)
85 {
86 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
87 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
88 }
89 #if CONFIG_PAGING_LEVELS >= 3
90 static inline l3_pgentry_t l3e_from_paddr(physaddr_t pa, unsigned int flags)
91 {
92 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
93 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
94 }
95 #endif
96 #if CONFIG_PAGING_LEVELS >= 4
97 static inline l4_pgentry_t l4e_from_paddr(physaddr_t pa, unsigned int flags)
98 {
99 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
100 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
101 }
102 #endif
103 #endif /* !__ASSEMBLY__ */
105 /* Construct a pte from its direct integer representation. */
106 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
107 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
108 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
109 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
111 /* Construct a pte from a page pointer and access flags. */
112 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_pfn(page),(flags)))
113 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_pfn(page),(flags)))
114 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_pfn(page),(flags)))
115 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_pfn(page),(flags)))
117 /* Add extra flags to an existing pte. */
118 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
119 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
120 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
121 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
123 /* Remove flags from an existing pte. */
124 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
125 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
126 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
127 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
129 /* Check if a pte's page mapping or significant access flags have changed. */
130 #define l1e_has_changed(x,y,flags) \
131 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
132 #define l2e_has_changed(x,y,flags) \
133 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
134 #define l3e_has_changed(x,y,flags) \
135 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
136 #define l4e_has_changed(x,y,flags) \
137 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
139 /* Pagetable walking. */
140 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
141 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
142 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
144 /* Given a virtual address, get an entry offset into a page table. */
145 #define l1_table_offset(a) \
146 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
147 #define l2_table_offset(a) \
148 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
149 #define l3_table_offset(a) \
150 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
151 #define l4_table_offset(a) \
152 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
154 /* Convert a pointer to a page-table entry into pagetable slot index. */
155 #define pgentry_ptr_to_slot(_p) \
156 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
158 /* Page-table type. */
159 #ifndef __ASSEMBLY__
160 #if CONFIG_PAGING_LEVELS == 2
161 /* x86_32 default */
162 typedef struct { u32 pfn; } pagetable_t;
163 #elif CONFIG_PAGING_LEVELS == 3
164 /* x86_32 PAE */
165 typedef struct { u32 pfn; } pagetable_t;
166 #elif CONFIG_PAGING_LEVELS == 4
167 /* x86_64 */
168 typedef struct { u64 pfn; } pagetable_t;
169 #endif
170 #define pagetable_get_paddr(x) ((physaddr_t)(x).pfn << PAGE_SHIFT)
171 #define pagetable_get_pfn(x) ((x).pfn)
172 #define mk_pagetable(pa) \
173 ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
174 #endif
176 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
177 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
179 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
180 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
181 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
182 #define pfn_to_page(_pfn) (frame_table + (_pfn))
183 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
184 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
185 #define pfn_valid(_pfn) ((_pfn) < max_page)
187 /* High table entries are reserved by the hypervisor. */
188 /* FIXME: this breaks with PAE -- kraxel */
189 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
190 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
191 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
192 (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
194 #define linear_l1_table \
195 ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
196 #define __linear_l2_table \
197 ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \
198 (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
199 #define __linear_l3_table \
200 ((l3_pgentry_t *)(LINEAR_PT_VIRT_START + \
201 (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
202 (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1))))
203 #define __linear_l4_table \
204 ((l4_pgentry_t *)(LINEAR_PT_VIRT_START + \
205 (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
206 (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) + \
207 (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
209 #define linear_pg_table linear_l1_table
210 #define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
211 #define linear_l3_table(_ed) ((_ed)->arch.guest_vl3table)
212 #define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
214 #define va_to_l1mfn(_ed, _va) \
215 (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
217 #ifndef __ASSEMBLY__
218 #if CONFIG_PAGING_LEVELS == 3
219 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
220 extern l3_pgentry_t idle_pg_table_l3[ROOT_PAGETABLE_ENTRIES];
221 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES*L2_PAGETABLE_ENTRIES];
222 #else
223 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
224 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES];
225 #endif
226 extern void paging_init(void);
227 #endif
229 #define __pge_off() \
230 do { \
231 __asm__ __volatile__( \
232 "mov %0, %%cr4; # turn off PGE " \
233 : : "r" (mmu_cr4_features & ~X86_CR4_PGE) ); \
234 } while ( 0 )
236 #define __pge_on() \
237 do { \
238 __asm__ __volatile__( \
239 "mov %0, %%cr4; # turn off PGE " \
240 : : "r" (mmu_cr4_features) ); \
241 } while ( 0 )
243 #define _PAGE_PRESENT 0x001U
244 #define _PAGE_RW 0x002U
245 #define _PAGE_USER 0x004U
246 #define _PAGE_PWT 0x008U
247 #define _PAGE_PCD 0x010U
248 #define _PAGE_ACCESSED 0x020U
249 #define _PAGE_DIRTY 0x040U
250 #define _PAGE_PAT 0x080U
251 #define _PAGE_PSE 0x080U
252 #define _PAGE_GLOBAL 0x100U
253 #define _PAGE_AVAIL 0xE00U
255 #define __PAGE_HYPERVISOR \
256 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
257 #define __PAGE_HYPERVISOR_NOCACHE \
258 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
260 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
262 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR)
263 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE)
265 #ifndef __ASSEMBLY__
267 static __inline__ int get_order(unsigned long size)
268 {
269 int order;
271 size = (size-1) >> (PAGE_SHIFT-1);
272 order = -1;
273 do {
274 size >>= 1;
275 order++;
276 } while (size);
277 return order;
278 }
280 /* Allocator functions for Xen pagetables. */
281 struct pfn_info *alloc_xen_pagetable(void);
282 void free_xen_pagetable(struct pfn_info *pg);
283 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
285 /* Map physical page range in Xen virtual address space. */
286 #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
287 int
288 map_pages_to_xen(
289 unsigned long virt,
290 unsigned long pfn,
291 unsigned long nr_pfns,
292 unsigned long flags);
294 #endif /* !__ASSEMBLY__ */
296 #endif /* __I386_PAGE_H__ */
298 /*
299 * Local variables:
300 * mode: C
301 * c-set-style: "BSD"
302 * c-basic-offset: 4
303 * tab-width: 4
304 * indent-tabs-mode: nil
305 * End:
306 */