ia64/xen-unstable

view linux-2.4.29-xen-sparse/include/asm-xen/pgtable.h @ 3516:1a4f61d36171

bitkeeper revision 1.1159.223.31 (41f599bcklevTYwPtWQUZ7QK-azDbg)

Fix recent patch to change the way the version string is generated.
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@freefall.cl.cam.ac.uk
date Tue Jan 25 00:58:36 2005 +0000 (2005-01-25)
parents ed0d4ce83995
children d126cac32f08
line source
1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
4 #include <linux/config.h>
6 /*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
11 *
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
14 */
15 #ifndef __ASSEMBLY__
16 #include <asm/processor.h>
17 #include <asm/hypervisor.h>
18 #include <linux/threads.h>
19 #include <asm/fixmap.h>
21 #ifndef _I386_BITOPS_H
22 #include <asm/bitops.h>
23 #endif
25 #define swapper_pg_dir 0
26 extern void paging_init(void);
28 /* Caches aren't brain-dead on the intel. */
29 #define flush_cache_all() do { } while (0)
30 #define flush_cache_mm(mm) do { } while (0)
31 #define flush_cache_range(mm, start, end) do { } while (0)
32 #define flush_cache_page(vma, vmaddr) do { } while (0)
33 #define flush_page_to_ram(page) do { } while (0)
34 #define flush_dcache_page(page) do { } while (0)
35 #define flush_icache_range(start, end) do { } while (0)
36 #define flush_icache_page(vma,pg) do { } while (0)
37 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
39 extern unsigned long pgkern_mask;
41 #define __flush_tlb() ({ queue_tlb_flush(); XEN_flush_page_update_queue(); })
42 #define __flush_tlb_global() __flush_tlb()
43 #define __flush_tlb_all() __flush_tlb_global()
44 #define __flush_tlb_one(addr) ({ queue_invlpg(addr); XEN_flush_page_update_queue(); })
45 #define __flush_tlb_single(addr) ({ queue_invlpg(addr); XEN_flush_page_update_queue(); })
47 /*
48 * ZERO_PAGE is a global shared page that is always zero: used
49 * for zero-mapped memory areas etc..
50 */
51 extern unsigned long empty_zero_page[1024];
52 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
54 #endif /* !__ASSEMBLY__ */
56 /*
57 * The Linux x86 paging architecture is 'compile-time dual-mode', it
58 * implements both the traditional 2-level x86 page tables and the
59 * newer 3-level PAE-mode page tables.
60 */
61 #ifndef __ASSEMBLY__
62 #if CONFIG_X86_PAE
63 # include <asm/pgtable-3level.h>
65 /*
66 * Need to initialise the X86 PAE caches
67 */
68 extern void pgtable_cache_init(void);
70 #else
71 # include <asm/pgtable-2level.h>
73 /*
74 * No page table caches to initialise
75 */
76 #define pgtable_cache_init() do { } while (0)
78 #endif
79 #endif
81 #define PMD_SIZE (1UL << PMD_SHIFT)
82 #define PMD_MASK (~(PMD_SIZE-1))
83 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84 #define PGDIR_MASK (~(PGDIR_SIZE-1))
86 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
87 #define FIRST_USER_PGD_NR 0
89 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
90 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
92 #define TWOLEVEL_PGDIR_SHIFT 22
93 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
94 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
97 #ifndef __ASSEMBLY__
98 /* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
99 #define VMALLOC_OFFSET (4*1024*1024)
100 extern void * high_memory;
101 #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
102 ~(VMALLOC_OFFSET-1))
103 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
104 #if CONFIG_HIGHMEM
105 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
106 #else
107 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
108 #endif
110 #define _PAGE_BIT_PRESENT 0
111 #define _PAGE_BIT_RW 1
112 #define _PAGE_BIT_USER 2
113 #define _PAGE_BIT_PWT 3
114 #define _PAGE_BIT_PCD 4
115 #define _PAGE_BIT_ACCESSED 5
116 #define _PAGE_BIT_DIRTY 6
117 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
118 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
120 #define _PAGE_PRESENT 0x001
121 #define _PAGE_RW 0x002
122 #define _PAGE_USER 0x004
123 #define _PAGE_PWT 0x008
124 #define _PAGE_PCD 0x010
125 #define _PAGE_ACCESSED 0x020
126 #define _PAGE_DIRTY 0x040
127 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
128 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
130 #define _PAGE_PROTNONE 0x080 /* If not present */
132 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
133 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
134 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
136 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
137 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
138 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
139 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
141 #define __PAGE_KERNEL \
142 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
143 #define __PAGE_KERNEL_NOCACHE \
144 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
145 #define __PAGE_KERNEL_RO \
146 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
148 #if 0
149 #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
150 #else
151 #define MAKE_GLOBAL(x) __pgprot(x)
152 #endif
154 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
155 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
156 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
158 /*
159 * The i386 can't do page protection for execute, and considers that
160 * the same are read. Also, write permissions imply read permissions.
161 * This is the closest we can get..
162 */
163 #define __P000 PAGE_NONE
164 #define __P001 PAGE_READONLY
165 #define __P010 PAGE_COPY
166 #define __P011 PAGE_COPY
167 #define __P100 PAGE_READONLY
168 #define __P101 PAGE_READONLY
169 #define __P110 PAGE_COPY
170 #define __P111 PAGE_COPY
172 #define __S000 PAGE_NONE
173 #define __S001 PAGE_READONLY
174 #define __S010 PAGE_SHARED
175 #define __S011 PAGE_SHARED
176 #define __S100 PAGE_READONLY
177 #define __S101 PAGE_READONLY
178 #define __S110 PAGE_SHARED
179 #define __S111 PAGE_SHARED
181 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
182 #define pte_clear(xp) queue_l1_entry_update(xp, 0)
184 #define pmd_none(x) (!(x).pmd)
185 #define pmd_present(x) ((x).pmd & _PAGE_PRESENT)
186 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
187 #define pmd_bad(x) (((x).pmd & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
190 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
192 /*
193 * The following only work if pte_present() is true.
194 * Undefined behaviour if not..
195 */
196 static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
197 static inline int pte_exec(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
198 static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
199 static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
200 static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
202 static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
203 static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
204 static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
205 static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
206 static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
207 static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
208 static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
209 static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
210 static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
211 static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
213 static inline int ptep_test_and_clear_dirty(pte_t *ptep)
214 {
215 unsigned long pteval = *(unsigned long *)ptep;
216 int ret = pteval & _PAGE_DIRTY;
217 if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
218 return ret;
219 }
220 static inline int ptep_test_and_clear_young(pte_t *ptep)
221 {
222 unsigned long pteval = *(unsigned long *)ptep;
223 int ret = pteval & _PAGE_ACCESSED;
224 if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
225 return ret;
226 }
227 static inline void ptep_set_wrprotect(pte_t *ptep)
228 {
229 unsigned long pteval = *(unsigned long *)ptep;
230 if ( (pteval & _PAGE_RW) )
231 queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
232 }
233 static inline void ptep_mkdirty(pte_t *ptep)
234 {
235 unsigned long pteval = *(unsigned long *)ptep;
236 if ( !(pteval & _PAGE_DIRTY) )
237 queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
238 }
240 /*
241 * Conversion functions: convert a page and protection to a page entry,
242 * and a page entry and page directory to the page they refer to.
243 */
245 #define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
247 /* This takes a physical page address that is used by the remapping functions */
248 #define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT, pgprot)
250 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
251 {
252 pte.pte_low &= _PAGE_CHG_MASK;
253 pte.pte_low |= pgprot_val(newprot);
254 return pte;
255 }
257 #define page_pte(page) page_pte_prot(page, __pgprot(0))
259 #define pmd_page(pmd) \
260 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
262 /* to find an entry in a page-table-directory. */
263 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
265 #define __pgd_offset(address) pgd_index(address)
267 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
269 /* to find an entry in a kernel page-table-directory */
270 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
272 #define __pmd_offset(address) \
273 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
275 /* Find an entry in the third-level page table.. */
276 #define __pte_offset(address) \
277 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
278 #define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
279 __pte_offset(address))
281 /*
282 * The i386 doesn't have any external MMU info: the kernel page
283 * tables contain all the necessary information.
284 */
285 #define update_mmu_cache(vma,address,pte) do { } while (0)
287 /* Encode and de-code a swap entry */
288 #define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
289 #define SWP_OFFSET(x) ((x).val >> 8)
290 #define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
291 #define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
292 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
294 struct page;
295 int change_page_attr(struct page *, int, pgprot_t prot);
297 static inline void __make_page_readonly(void *va)
298 {
299 pgd_t *pgd = pgd_offset_k((unsigned long)va);
300 pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
301 pte_t *pte = pte_offset(pmd, (unsigned long)va);
302 queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
303 }
305 static inline void __make_page_writable(void *va)
306 {
307 pgd_t *pgd = pgd_offset_k((unsigned long)va);
308 pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
309 pte_t *pte = pte_offset(pmd, (unsigned long)va);
310 queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
311 }
313 static inline void make_page_readonly(void *va)
314 {
315 pgd_t *pgd = pgd_offset_k((unsigned long)va);
316 pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
317 pte_t *pte = pte_offset(pmd, (unsigned long)va);
318 queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
319 if ( (unsigned long)va >= VMALLOC_START )
320 __make_page_readonly(machine_to_virt(
321 *(unsigned long *)pte&PAGE_MASK));
322 }
324 static inline void make_page_writable(void *va)
325 {
326 pgd_t *pgd = pgd_offset_k((unsigned long)va);
327 pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
328 pte_t *pte = pte_offset(pmd, (unsigned long)va);
329 queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
330 if ( (unsigned long)va >= VMALLOC_START )
331 __make_page_writable(machine_to_virt(
332 *(unsigned long *)pte&PAGE_MASK));
333 }
335 static inline void make_pages_readonly(void *va, unsigned int nr)
336 {
337 while ( nr-- != 0 )
338 {
339 make_page_readonly(va);
340 va = (void *)((unsigned long)va + PAGE_SIZE);
341 }
342 }
344 static inline void make_pages_writable(void *va, unsigned int nr)
345 {
346 while ( nr-- != 0 )
347 {
348 make_page_writable(va);
349 va = (void *)((unsigned long)va + PAGE_SIZE);
350 }
351 }
353 static inline unsigned long arbitrary_virt_to_machine(void *va)
354 {
355 pgd_t *pgd = pgd_offset_k((unsigned long)va);
356 pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
357 pte_t *pte = pte_offset(pmd, (unsigned long)va);
358 unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
359 return pa | ((unsigned long)va & (PAGE_SIZE-1));
360 }
362 #endif /* !__ASSEMBLY__ */
364 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
365 #define PageSkip(page) (0)
366 #define kern_addr_valid(addr) (1)
368 #define io_remap_page_range remap_page_range
370 #endif /* _I386_PGTABLE_H */