ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgtable.h @ 14019:4b9680c58d73

linux/x86: Clean up page table handling headers

- remove dead code
- fix line breaking and space vs. tab usage
- remove redundant parentheses

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Tue Feb 20 09:41:32 2007 +0000 (2007-02-20)
parents d2dff286994d
children 56760c1de102
line source
1 #ifndef _X86_64_PGTABLE_H
2 #define _X86_64_PGTABLE_H
4 /*
5 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree.
7 */
8 #include <asm/processor.h>
9 #include <asm/fixmap.h>
10 #include <asm/bitops.h>
11 #include <linux/threads.h>
12 #include <linux/sched.h>
13 #include <asm/pda.h>
14 #ifdef CONFIG_XEN
15 #include <asm/hypervisor.h>
17 extern pud_t level3_user_pgt[512];
18 extern pud_t init_level4_user_pgt[];
20 extern void xen_init_pt(void);
22 #define virt_to_ptep(__va) \
23 ({ \
24 pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
25 pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
26 pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
27 pte_offset_kernel(__pmd, (unsigned long)(__va)); \
28 })
30 #define arbitrary_virt_to_machine(__va) \
31 ({ \
32 maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
33 m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
34 })
35 #endif
37 extern pud_t level3_kernel_pgt[512];
38 extern pud_t level3_physmem_pgt[512];
39 extern pud_t level3_ident_pgt[512];
40 extern pmd_t level2_kernel_pgt[512];
41 extern pgd_t init_level4_pgt[];
42 extern pgd_t boot_level4_pgt[];
43 extern unsigned long __supported_pte_mask;
45 #define swapper_pg_dir init_level4_pgt
47 extern int nonx_setup(char *str);
48 extern void paging_init(void);
49 extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
51 extern unsigned long pgkern_mask;
53 /*
54 * ZERO_PAGE is a global shared page that is always zero: used
55 * for zero-mapped memory areas etc..
56 */
57 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
58 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
60 /*
61 * PGDIR_SHIFT determines what a top-level page table entry can map
62 */
63 #define PGDIR_SHIFT 39
64 #define PTRS_PER_PGD 512
66 /*
67 * 3rd level page
68 */
69 #define PUD_SHIFT 30
70 #define PTRS_PER_PUD 512
72 /*
73 * PMD_SHIFT determines the size of the area a middle-level
74 * page table can map
75 */
76 #define PMD_SHIFT 21
77 #define PTRS_PER_PMD 512
79 /*
80 * entries per page directory level
81 */
82 #define PTRS_PER_PTE 512
84 #define pte_ERROR(e) \
85 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
86 #define pmd_ERROR(e) \
87 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
88 #define pud_ERROR(e) \
89 printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
90 #define pgd_ERROR(e) \
91 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
93 #define pgd_none(x) (!pgd_val(x))
94 #define pud_none(x) (!pud_val(x))
96 #define set_pte_batched(pteptr, pteval) \
97 queue_l1_entry_update(pteptr, (pteval))
99 extern inline int pud_present(pud_t pud) { return !pud_none(pud); }
101 static inline void set_pte(pte_t *dst, pte_t val)
102 {
103 *dst = val;
104 }
106 #define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
107 #define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
108 #define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
110 static inline void pud_clear (pud_t * pud)
111 {
112 set_pud(pud, __pud(0));
113 }
115 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
117 static inline void pgd_clear (pgd_t * pgd)
118 {
119 set_pgd(pgd, __pgd(0));
120 set_pgd(__user_pgd(pgd), __pgd(0));
121 }
123 #define pud_page(pud) \
124 ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
126 /*
127 * A note on implementation of this atomic 'get-and-clear' operation.
128 * This is actually very simple because Xen Linux can only run on a single
129 * processor. Therefore, we cannot race other processors setting the 'accessed'
130 * or 'dirty' bits on a page-table entry.
131 * Even if pages are shared between domains, that is not a problem because
132 * each domain will have separate page tables, with their own versions of
133 * accessed & dirty state.
134 */
135 #define ptep_get_and_clear(mm,addr,xp) __pte_ma(xchg(&(xp)->pte, 0))
137 #if 0
138 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
139 {
140 pte_t pte = *xp;
141 if (pte.pte)
142 set_pte(xp, __pte_ma(0));
143 return pte;
144 }
145 #endif
147 struct mm_struct;
149 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
150 {
151 pte_t pte;
152 if (full) {
153 pte = *ptep;
154 *ptep = __pte(0);
155 } else {
156 pte = ptep_get_and_clear(mm, addr, ptep);
157 }
158 return pte;
159 }
161 #define pte_same(a, b) ((a).pte == (b).pte)
163 #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
165 #define PMD_SIZE (1UL << PMD_SHIFT)
166 #define PMD_MASK (~(PMD_SIZE-1))
167 #define PUD_SIZE (1UL << PUD_SHIFT)
168 #define PUD_MASK (~(PUD_SIZE-1))
169 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
170 #define PGDIR_MASK (~(PGDIR_SIZE-1))
172 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
173 #define FIRST_USER_ADDRESS 0
175 #ifndef __ASSEMBLY__
176 #define MAXMEM 0x3fffffffffffUL
177 #define VMALLOC_START 0xffffc20000000000UL
178 #define VMALLOC_END 0xffffe1ffffffffffUL
179 #define MODULES_VADDR 0xffffffff88000000UL
180 #define MODULES_END 0xfffffffffff00000UL
181 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
183 #define _PAGE_BIT_PRESENT 0
184 #define _PAGE_BIT_RW 1
185 #define _PAGE_BIT_USER 2
186 #define _PAGE_BIT_PWT 3
187 #define _PAGE_BIT_PCD 4
188 #define _PAGE_BIT_ACCESSED 5
189 #define _PAGE_BIT_DIRTY 6
190 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
191 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
192 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
194 #define _PAGE_PRESENT 0x001
195 #define _PAGE_RW 0x002
196 #define _PAGE_USER 0x004
197 #define _PAGE_PWT 0x008
198 #define _PAGE_PCD 0x010
199 #define _PAGE_ACCESSED 0x020
200 #define _PAGE_DIRTY 0x040
201 #define _PAGE_PSE 0x080 /* 2MB page */
202 #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
203 #define _PAGE_GLOBAL 0x100 /* Global TLB entry */
205 #define _PAGE_PROTNONE 0x080 /* If not present */
206 #define _PAGE_NX (1UL<<_PAGE_BIT_NX)
208 #ifdef CONFIG_XEN_COMPAT_030002
209 extern unsigned int __kernel_page_user;
210 #else
211 #define __kernel_page_user 0
212 #endif
214 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
215 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
217 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
219 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
220 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
221 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
222 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
223 #define PAGE_COPY PAGE_COPY_NOEXEC
224 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
225 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
226 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
227 #define __PAGE_KERNEL \
228 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
229 #define __PAGE_KERNEL_EXEC \
230 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
231 #define __PAGE_KERNEL_NOCACHE \
232 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
233 #define __PAGE_KERNEL_RO \
234 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
235 #define __PAGE_KERNEL_VSYSCALL \
236 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
237 #define __PAGE_KERNEL_VSYSCALL_NOCACHE \
238 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
239 #define __PAGE_KERNEL_LARGE \
240 (__PAGE_KERNEL | _PAGE_PSE)
241 #define __PAGE_KERNEL_LARGE_EXEC \
242 (__PAGE_KERNEL_EXEC | _PAGE_PSE)
244 /*
245 * We don't support GLOBAL page in xenolinux64
246 */
247 #define MAKE_GLOBAL(x) __pgprot((x))
249 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
250 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
251 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
252 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
253 #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
254 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
255 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
256 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
258 /* xwr */
259 #define __P000 PAGE_NONE
260 #define __P001 PAGE_READONLY
261 #define __P010 PAGE_COPY
262 #define __P011 PAGE_COPY
263 #define __P100 PAGE_READONLY_EXEC
264 #define __P101 PAGE_READONLY_EXEC
265 #define __P110 PAGE_COPY_EXEC
266 #define __P111 PAGE_COPY_EXEC
268 #define __S000 PAGE_NONE
269 #define __S001 PAGE_READONLY
270 #define __S010 PAGE_SHARED
271 #define __S011 PAGE_SHARED
272 #define __S100 PAGE_READONLY_EXEC
273 #define __S101 PAGE_READONLY_EXEC
274 #define __S110 PAGE_SHARED_EXEC
275 #define __S111 PAGE_SHARED_EXEC
277 static inline unsigned long pgd_bad(pgd_t pgd)
278 {
279 unsigned long val = pgd_val(pgd);
280 val &= ~PTE_MASK;
281 val &= ~(_PAGE_USER | _PAGE_DIRTY);
282 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
283 }
285 static inline unsigned long pud_bad(pud_t pud)
286 {
287 unsigned long val = pud_val(pud);
288 val &= ~PTE_MASK;
289 val &= ~(_PAGE_USER | _PAGE_DIRTY);
290 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
291 }
293 #define set_pte_at(_mm,addr,ptep,pteval) do { \
294 if (((_mm) != current->mm && (_mm) != &init_mm) || \
295 HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
296 set_pte((ptep), (pteval)); \
297 } while (0)
299 #define pte_none(x) (!(x).pte)
300 #define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
301 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
303 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
305 #define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
306 #define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
307 __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
308 #define pte_pfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
309 mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
311 #define pte_page(x) pfn_to_page(pte_pfn(x))
313 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
314 {
315 unsigned long pte = page_nr << PAGE_SHIFT;
316 pte |= pgprot_val(pgprot);
317 pte &= __supported_pte_mask;
318 return __pte(pte);
319 }
321 /*
322 * The following only work if pte_present() is true.
323 * Undefined behaviour if not..
324 */
325 #define __pte_val(x) ((x).pte)
327 #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
328 static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
329 static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
330 static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
331 static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
332 static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
333 static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
334 static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
335 static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
337 static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
338 static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
339 static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
340 static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
341 static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
342 static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
343 static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
344 static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
345 static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
346 static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
347 static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
349 struct vm_area_struct;
351 static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
352 {
353 pte_t pte = *ptep;
354 int ret = pte_dirty(pte);
355 if (ret)
356 set_pte(ptep, pte_mkclean(pte));
357 return ret;
358 }
360 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
361 {
362 pte_t pte = *ptep;
363 int ret = pte_young(pte);
364 if (ret)
365 set_pte(ptep, pte_mkold(pte));
366 return ret;
367 }
369 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
370 {
371 pte_t pte = *ptep;
372 if (pte_write(pte))
373 set_pte(ptep, pte_wrprotect(pte));
374 }
376 /*
377 * Macro to mark a page protection value as "uncacheable".
378 */
379 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
381 static inline int pmd_large(pmd_t pte) {
382 return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
383 }
386 /*
387 * Conversion functions: convert a page and protection to a page entry,
388 * and a page entry and page directory to the page they refer to.
389 */
391 /*
392 * Level 4 access.
393 * Never use these in the common code.
394 */
395 #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
396 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
397 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
398 #define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
399 #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
400 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
402 /* PUD - Level3 access */
403 /* to find an entry in a page-table-directory. */
404 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
405 #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
406 static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
407 {
408 return pud + pud_index(address);
409 }
411 /* Find correct pud via the hidden fourth level page level: */
413 /* This accesses the reference page table of the boot cpu.
414 Other CPUs get synced lazily via the page fault handler. */
415 static inline pud_t *pud_offset_k(pgd_t *pgd, unsigned long address)
416 {
417 return pud_offset(pgd_offset_k(address), address);
418 }
420 /* PMD - Level 2 access */
421 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
422 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
424 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
425 #define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
426 pmd_index(address))
427 #define pmd_none(x) (!pmd_val(x))
428 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
429 can temporarily clear it. */
430 #define pmd_present(x) (pmd_val(x))
431 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
432 #define pmd_bad(x) ((pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
433 != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
434 #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
435 #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
437 #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
438 #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
439 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
441 /* PTE - Level 1 access. */
443 /* page, protection -> pte */
444 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
445 #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
447 /* physical address -> PTE */
448 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
449 {
450 unsigned long pteval;
451 pteval = physpage | pgprot_val(pgprot);
452 return __pte(pteval);
453 }
455 /* Change flags of a PTE */
456 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
457 {
458 /*
459 * Since this might change the present bit (which controls whether
460 * a pte_t object has undergone p2m translation), we must use
461 * pte_val() on the input pte and __pte() for the return value.
462 */
463 unsigned long pteval = pte_val(pte);
465 pteval &= _PAGE_CHG_MASK;
466 pteval |= pgprot_val(newprot);
467 pteval &= __supported_pte_mask;
468 return __pte(pteval);
469 }
471 #define pte_index(address) \
472 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
473 #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
474 pte_index(address))
476 /* x86-64 always has all page tables mapped. */
477 #define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
478 #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
479 #define pte_unmap(pte) /* NOP */
480 #define pte_unmap_nested(pte) /* NOP */
482 #define update_mmu_cache(vma,address,pte) do { } while (0)
484 /* We only update the dirty/accessed state if we set
485 * the dirty bit by hand in the kernel, since the hardware
486 * will do the accessed bit for us, and we don't want to
487 * race with other CPU's that might be updating the dirty
488 * bit at the same time. */
489 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
490 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
491 do { \
492 if (__dirty) { \
493 if ( likely((__vma)->vm_mm == current->mm) ) { \
494 BUG_ON(HYPERVISOR_update_va_mapping(__address, \
495 __entry, \
496 (unsigned long)(__vma)->vm_mm->cpu_vm_mask.bits| \
497 UVMF_INVLPG|UVMF_MULTI)); \
498 } else { \
499 xen_l1_entry_update(__ptep, __entry); \
500 flush_tlb_page(__vma, __address); \
501 } \
502 } \
503 } while (0)
505 /* Encode and de-code a swap entry */
506 #define __swp_type(x) (((x).val >> 1) & 0x3f)
507 #define __swp_offset(x) ((x).val >> 8)
508 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
509 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
510 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
512 extern spinlock_t pgd_lock;
513 extern struct page *pgd_list;
514 void vmalloc_sync_all(void);
516 #endif /* !__ASSEMBLY__ */
518 extern int kern_addr_valid(unsigned long addr);
520 #define DOMID_LOCAL (0xFFFFU)
522 int direct_remap_pfn_range(struct vm_area_struct *vma,
523 unsigned long address,
524 unsigned long mfn,
525 unsigned long size,
526 pgprot_t prot,
527 domid_t domid);
529 int direct_kernel_remap_pfn_range(unsigned long address,
530 unsigned long mfn,
531 unsigned long size,
532 pgprot_t prot,
533 domid_t domid);
535 int create_lookup_pte_addr(struct mm_struct *mm,
536 unsigned long address,
537 uint64_t *ptep);
539 int touch_pte_range(struct mm_struct *mm,
540 unsigned long address,
541 unsigned long size);
543 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
544 direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
546 #define MK_IOSPACE_PFN(space, pfn) (pfn)
547 #define GET_IOSPACE(pfn) 0
548 #define GET_PFN(pfn) (pfn)
550 #define HAVE_ARCH_UNMAPPED_AREA
552 #define pgtable_cache_init() do { } while (0)
553 #define check_pgt_cache() do { } while (0)
555 #define PAGE_AGP PAGE_KERNEL_NOCACHE
556 #define HAVE_PAGE_AGP 1
558 /* fs/proc/kcore.c */
559 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
560 #define kc_offset_to_vaddr(o) \
561 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
563 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
564 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
565 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
566 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
567 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
568 #define __HAVE_ARCH_PTE_SAME
569 #include <asm-generic/pgtable.h>
571 #endif /* _X86_64_PGTABLE_H */