ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h @ 6190:4ec947baae75

Add generic_page_range() -- generic page table operation.

Linux has several instances of repeated code to do updates to a range
of PTEs. Mapping memory between domains in Xen also tends to need to
do this quite frequently, to ensure page tables have been constructed
and to look up PTE addresses when making mapping-related hypercalls.
This patch adds a generic PTE walk-and-fill operation that takes a
function pointer to call on leaf entries. direct_remap_area_pages()
is updated to use the new call, ass are abuses of
__direct_remap_area_pages.

This patch also introduces two new helper functions for working with
page tables when mapping memory between domains:
create_lookup_pte_addr() returns the machine address of a PTE,
allocating intermediate page tables as necessary. touch_pte_range()
ensures that page tables exist for a virtual address range.

Many of the existing linux page table operations (e.g. zap/remap/etc)
could be modified to use this interface, which would potentially
shorten up mm/memory.c a bit.
author akw27@arcadians.cl.cam.ac.uk
date Mon Aug 15 13:16:04 2005 +0000 (2005-08-15)
parents dc27fd3392b1
children 1a0723cd37f1 29aab159846c 1ae656509f02 23979fb12c49 84ee014ebd41 99914b54f7bf
line source
1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
4 #include <linux/config.h>
5 #include <asm-xen/hypervisor.h>
7 /*
8 * The Linux memory management assumes a three-level page table setup. On
9 * the i386, we use that, but "fold" the mid level into the top-level page
10 * table, so that we physically have the same two-level page table as the
11 * i386 mmu expects.
12 *
13 * This file contains the functions and defines necessary to modify and use
14 * the i386 page table tree.
15 */
16 #ifndef __ASSEMBLY__
17 #include <asm/processor.h>
18 #include <asm/fixmap.h>
19 #include <linux/threads.h>
21 #ifndef _I386_BITOPS_H
22 #include <asm/bitops.h>
23 #endif
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include <linux/spinlock.h>
29 /*
30 * ZERO_PAGE is a global shared page that is always zero: used
31 * for zero-mapped memory areas etc..
32 */
33 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
34 extern unsigned long empty_zero_page[1024];
35 extern pgd_t swapper_pg_dir[1024];
36 extern kmem_cache_t *pgd_cache;
37 extern kmem_cache_t *pmd_cache;
38 extern spinlock_t pgd_lock;
39 extern struct page *pgd_list;
41 void pmd_ctor(void *, kmem_cache_t *, unsigned long);
42 void pgd_ctor(void *, kmem_cache_t *, unsigned long);
43 void pgd_dtor(void *, kmem_cache_t *, unsigned long);
44 void pgtable_cache_init(void);
45 void paging_init(void);
47 /*
48 * The Linux x86 paging architecture is 'compile-time dual-mode', it
49 * implements both the traditional 2-level x86 page tables and the
50 * newer 3-level PAE-mode page tables.
51 */
52 #ifdef CONFIG_X86_PAE
53 # include <asm/pgtable-3level-defs.h>
54 # define PMD_SIZE (1UL << PMD_SHIFT)
55 # define PMD_MASK (~(PMD_SIZE-1))
56 #else
57 # include <asm/pgtable-2level-defs.h>
58 #endif
60 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
61 #define PGDIR_MASK (~(PGDIR_SIZE-1))
63 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
64 #define FIRST_USER_ADDRESS 0
66 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
67 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
69 #define TWOLEVEL_PGDIR_SHIFT 22
70 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
71 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
73 /* Just any arbitrary offset to the start of the vmalloc VM area: the
74 * current 8MB value just means that there will be a 8MB "hole" after the
75 * physical memory until the kernel virtual memory starts. That means that
76 * any out-of-bounds memory accesses will hopefully be caught.
77 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
78 * area for the same reason. ;)
79 */
80 #define VMALLOC_OFFSET (8*1024*1024)
81 #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
82 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
83 #ifdef CONFIG_HIGHMEM
84 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
85 #else
86 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
87 #endif
89 /*
90 * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
91 * of the Pentium details, but assuming intel did the straightforward
92 * thing, this bit set in the page directory entry just means that
93 * the page directory entry points directly to a 4MB-aligned block of
94 * memory.
95 */
96 #define _PAGE_BIT_PRESENT 0
97 #define _PAGE_BIT_RW 1
98 #define _PAGE_BIT_USER 2
99 #define _PAGE_BIT_PWT 3
100 #define _PAGE_BIT_PCD 4
101 #define _PAGE_BIT_ACCESSED 5
102 #define _PAGE_BIT_DIRTY 6
103 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
104 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
105 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
106 #define _PAGE_BIT_UNUSED2 10
107 #define _PAGE_BIT_UNUSED3 11
108 #define _PAGE_BIT_NX 63
110 #define _PAGE_PRESENT 0x001
111 #define _PAGE_RW 0x002
112 #define _PAGE_USER 0x004
113 #define _PAGE_PWT 0x008
114 #define _PAGE_PCD 0x010
115 #define _PAGE_ACCESSED 0x020
116 #define _PAGE_DIRTY 0x040
117 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
118 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
119 #define _PAGE_UNUSED1 0x200 /* available for programmer */
120 #define _PAGE_UNUSED2 0x400
121 #define _PAGE_UNUSED3 0x800
123 #define _PAGE_FILE 0x040 /* set:pagecache unset:swap */
124 #define _PAGE_PROTNONE 0x080 /* If not present */
125 #ifdef CONFIG_X86_PAE
126 #define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
127 #else
128 #define _PAGE_NX 0
129 #endif
131 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
132 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
133 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
135 #define PAGE_NONE \
136 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
137 #define PAGE_SHARED \
138 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
140 #define PAGE_SHARED_EXEC \
141 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
142 #define PAGE_COPY_NOEXEC \
143 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
144 #define PAGE_COPY_EXEC \
145 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
146 #define PAGE_COPY \
147 PAGE_COPY_NOEXEC
148 #define PAGE_READONLY \
149 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
150 #define PAGE_READONLY_EXEC \
151 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
153 #define _PAGE_KERNEL \
154 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
155 #define _PAGE_KERNEL_EXEC \
156 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
158 extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
159 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
160 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
161 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
162 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
164 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
165 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
166 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
167 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
168 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
169 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
171 /*
172 * The i386 can't do page protection for execute, and considers that
173 * the same are read. Also, write permissions imply read permissions.
174 * This is the closest we can get..
175 */
176 #define __P000 PAGE_NONE
177 #define __P001 PAGE_READONLY
178 #define __P010 PAGE_COPY
179 #define __P011 PAGE_COPY
180 #define __P100 PAGE_READONLY_EXEC
181 #define __P101 PAGE_READONLY_EXEC
182 #define __P110 PAGE_COPY_EXEC
183 #define __P111 PAGE_COPY_EXEC
185 #define __S000 PAGE_NONE
186 #define __S001 PAGE_READONLY
187 #define __S010 PAGE_SHARED
188 #define __S011 PAGE_SHARED
189 #define __S100 PAGE_READONLY_EXEC
190 #define __S101 PAGE_READONLY_EXEC
191 #define __S110 PAGE_SHARED_EXEC
192 #define __S111 PAGE_SHARED_EXEC
194 /*
195 * Define this if things work differently on an i386 and an i486:
196 * it will (on an i486) warn about kernel memory accesses that are
197 * done without a 'access_ok(VERIFY_WRITE,..)'
198 */
199 #undef TEST_ACCESS_OK
201 /* The boot page tables (all created as a single array) */
202 extern unsigned long pg0[];
204 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
205 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
207 #define pmd_none(x) (!pmd_val(x))
208 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
209 can temporarily clear it. */
210 #define pmd_present(x) (pmd_val(x))
211 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
212 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
215 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
217 /*
218 * The following only work if pte_present() is true.
219 * Undefined behaviour if not..
220 */
221 static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
222 static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
223 static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
224 static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
225 static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
227 /*
228 * The following only works if pte_present() is not true.
229 */
230 static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
232 static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
233 static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
234 static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
235 static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
236 static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
237 static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
238 static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
239 static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
240 static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
241 static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
243 #ifdef CONFIG_X86_PAE
244 # include <asm/pgtable-3level.h>
245 #else
246 # include <asm/pgtable-2level.h>
247 #endif
249 static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
250 {
251 if (!pte_dirty(*ptep))
252 return 0;
253 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
254 }
256 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
257 {
258 if (!pte_young(*ptep))
259 return 0;
260 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
261 }
263 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
264 {
265 if (pte_write(*ptep))
266 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
267 }
269 /*
270 * Macro to mark a page protection value as "uncacheable". On processors which do not support
271 * it, this is a no-op.
272 */
273 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
274 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
276 /*
277 * Conversion functions: convert a page and protection to a page entry,
278 * and a page entry and page directory to the page they refer to.
279 */
281 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
282 #define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
284 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
285 {
286 pte.pte_low &= _PAGE_CHG_MASK;
287 pte.pte_low |= pgprot_val(newprot);
288 #ifdef CONFIG_X86_PAE
289 /*
290 * Chop off the NX bit (if present), and add the NX portion of
291 * the newprot (if present):
292 */
293 pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
294 pte.pte_high |= (pgprot_val(newprot) >> 32) & \
295 (__supported_pte_mask >> 32);
296 #endif
297 return pte;
298 }
300 #define page_pte(page) page_pte_prot(page, __pgprot(0))
302 #define pmd_large(pmd) \
303 ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
305 /*
306 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
307 *
308 * this macro returns the index of the entry in the pgd page which would
309 * control the given virtual address
310 */
311 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
312 #define pgd_index_k(addr) pgd_index(addr)
314 /*
315 * pgd_offset() returns a (pgd_t *)
316 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
317 */
318 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
320 /*
321 * a shortcut which implies the use of the kernel's pgd, instead
322 * of a process's
323 */
324 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
326 /*
327 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
328 *
329 * this macro returns the index of the entry in the pmd page which would
330 * control the given virtual address
331 */
332 #define pmd_index(address) \
333 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
335 /*
336 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
337 *
338 * this macro returns the index of the entry in the pte page which would
339 * control the given virtual address
340 */
341 #define pte_index(address) \
342 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
343 #define pte_offset_kernel(dir, address) \
344 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
346 /*
347 * Helper function that returns the kernel pagetable entry controlling
348 * the virtual address 'address'. NULL means no pagetable entry present.
349 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
350 * as a pte too.
351 */
352 extern pte_t *lookup_address(unsigned long address);
354 /*
355 * Make a given kernel text page executable/non-executable.
356 * Returns the previous executability setting of that page (which
357 * is used to restore the previous state). Used by the SMP bootup code.
358 * NOTE: this is an __init function for security reasons.
359 */
360 #ifdef CONFIG_X86_PAE
361 extern int set_kernel_exec(unsigned long vaddr, int enable);
362 #else
363 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
364 #endif
366 extern void noexec_setup(const char *str);
368 #if defined(CONFIG_HIGHPTE)
369 #define pte_offset_map(dir, address) \
370 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
371 pte_index(address))
372 #define pte_offset_map_nested(dir, address) \
373 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
374 pte_index(address))
375 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
376 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
377 #else
378 #define pte_offset_map(dir, address) \
379 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
380 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
381 #define pte_unmap(pte) do { } while (0)
382 #define pte_unmap_nested(pte) do { } while (0)
383 #endif
385 /*
386 * The i386 doesn't have any external MMU info: the kernel page
387 * tables contain all the necessary information.
388 *
389 * Also, we only update the dirty/accessed state if we set
390 * the dirty bit by hand in the kernel, since the hardware
391 * will do the accessed bit for us, and we don't want to
392 * race with other CPU's that might be updating the dirty
393 * bit at the same time.
394 */
395 #define update_mmu_cache(vma,address,pte) do { } while (0)
396 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
397 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
398 do { \
399 if (__dirty) { \
400 if ( likely((__vma)->vm_mm == current->mm) ) { \
401 BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
402 } else { \
403 xen_l1_entry_update((__ptep), (__entry)); \
404 flush_tlb_page((__vma), (__address)); \
405 } \
406 } \
407 } while (0)
409 #define __HAVE_ARCH_PTEP_ESTABLISH
410 #define ptep_establish(__vma, __address, __ptep, __entry) \
411 do { \
412 ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
413 } while (0)
415 #define __HAVE_ARCH_PTEP_ESTABLISH_NEW
416 #define ptep_establish_new(__vma, __address, __ptep, __entry) \
417 do { \
418 if (likely((__vma)->vm_mm == current->mm)) { \
419 BUG_ON(HYPERVISOR_update_va_mapping((__address), \
420 __entry, 0)); \
421 } else { \
422 xen_l1_entry_update((__ptep), (__entry)); \
423 } \
424 } while (0)
426 #ifndef CONFIG_XEN_SHADOW_MODE
427 void make_lowmem_page_readonly(void *va);
428 void make_lowmem_page_writable(void *va);
429 void make_page_readonly(void *va);
430 void make_page_writable(void *va);
431 void make_pages_readonly(void *va, unsigned int nr);
432 void make_pages_writable(void *va, unsigned int nr);
433 #else
434 #define make_lowmem_page_readonly(_va) ((void)0)
435 #define make_lowmem_page_writable(_va) ((void)0)
436 #define make_page_readonly(_va) ((void)0)
437 #define make_page_writable(_va) ((void)0)
438 #define make_pages_readonly(_va, _nr) ((void)0)
439 #define make_pages_writable(_va, _nr) ((void)0)
440 #endif
442 #define virt_to_ptep(__va) \
443 ({ \
444 pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
445 pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
446 pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
447 pte_offset_kernel(__pmd, (unsigned long)(__va)); \
448 })
450 #define arbitrary_virt_to_machine(__va) \
451 ({ \
452 pte_t *__pte = virt_to_ptep(__va); \
453 unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
454 __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
455 })
457 #endif /* !__ASSEMBLY__ */
459 #ifndef CONFIG_DISCONTIGMEM
460 #define kern_addr_valid(addr) (1)
461 #endif /* !CONFIG_DISCONTIGMEM */
463 int direct_remap_area_pages(struct mm_struct *mm,
464 unsigned long address,
465 unsigned long machine_addr,
466 unsigned long size,
467 pgprot_t prot,
468 domid_t domid);
469 int create_lookup_pte_addr(struct mm_struct *mm,
470 unsigned long address,
471 unsigned long *ptep);
472 int touch_pte_range(struct mm_struct *mm,
473 unsigned long address,
474 unsigned long size);
476 #define io_remap_page_range(vma,from,phys,size,prot) \
477 direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
479 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
480 direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
482 #define MK_IOSPACE_PFN(space, pfn) (pfn)
483 #define GET_IOSPACE(pfn) 0
484 #define GET_PFN(pfn) (pfn)
486 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
487 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
488 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
489 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
490 #define __HAVE_ARCH_PTE_SAME
491 #include <asm-generic/pgtable.h>
493 #endif /* _I386_PGTABLE_H */