ia64/linux-2.6.18-xen.hg

view include/asm-x86_64/mach-xen/asm/pgtable.h @ 761:5e1269aa5c29

blktap, gntdev: fix highpte handling

In case of highpte, virt_to_machine() can't be used. Introduce
ptep_to_machine() and use it, also to simplify xen_l1_entry_update().

Original patch from: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 10 13:32:32 2008 +0000 (2008-12-10)
parents 1c928f28914a
children baeb818cd2dc
line source
1 #ifndef _X86_64_PGTABLE_H
2 #define _X86_64_PGTABLE_H
4 /*
5 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree.
7 */
8 #include <asm/processor.h>
9 #include <asm/fixmap.h>
10 #include <asm/bitops.h>
11 #include <linux/threads.h>
12 #include <linux/sched.h>
13 #include <asm/pda.h>
14 #ifdef CONFIG_XEN
15 #include <asm/hypervisor.h>
17 extern pud_t level3_user_pgt[512];
19 extern void xen_init_pt(void);
21 extern pte_t *lookup_address(unsigned long address);
23 #define virt_to_ptep(va) \
24 ({ \
25 pte_t *__ptep = lookup_address((unsigned long)(va)); \
26 BUG_ON(!__ptep || !pte_present(*__ptep)); \
27 __ptep; \
28 })
30 #define arbitrary_virt_to_machine(va) \
31 (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
32 | ((unsigned long)(va) & (PAGE_SIZE - 1)))
34 #define ptep_to_machine(ptep) virt_to_machine(ptep)
35 #endif
37 extern pud_t level3_kernel_pgt[512];
38 extern pud_t level3_physmem_pgt[512];
39 extern pud_t level3_ident_pgt[512];
40 extern pmd_t level2_kernel_pgt[512];
41 extern pgd_t init_level4_pgt[];
42 extern pgd_t boot_level4_pgt[];
43 extern unsigned long __supported_pte_mask;
45 #define swapper_pg_dir init_level4_pgt
47 extern int nonx_setup(char *str);
48 extern void paging_init(void);
49 extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
51 extern unsigned long pgkern_mask;
53 /*
54 * ZERO_PAGE is a global shared page that is always zero: used
55 * for zero-mapped memory areas etc..
56 */
57 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
58 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
60 /*
61 * PGDIR_SHIFT determines what a top-level page table entry can map
62 */
63 #define PGDIR_SHIFT 39
64 #define PTRS_PER_PGD 512
66 /*
67 * 3rd level page
68 */
69 #define PUD_SHIFT 30
70 #define PTRS_PER_PUD 512
72 /*
73 * PMD_SHIFT determines the size of the area a middle-level
74 * page table can map
75 */
76 #define PMD_SHIFT 21
77 #define PTRS_PER_PMD 512
79 /*
80 * entries per page directory level
81 */
82 #define PTRS_PER_PTE 512
84 #define pte_ERROR(e) \
85 printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
86 &(e), __pte_val(e), pte_pfn(e))
87 #define pmd_ERROR(e) \
88 printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
89 &(e), __pmd_val(e), pmd_pfn(e))
90 #define pud_ERROR(e) \
91 printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
92 &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
93 #define pgd_ERROR(e) \
94 printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
95 &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
97 #define pgd_none(x) (!__pgd_val(x))
98 #define pud_none(x) (!__pud_val(x))
100 static inline void set_pte(pte_t *dst, pte_t val)
101 {
102 *dst = val;
103 }
105 #define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
106 #define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
107 #define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
109 static inline void pud_clear (pud_t * pud)
110 {
111 set_pud(pud, __pud(0));
112 }
114 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
116 static inline void pgd_clear (pgd_t * pgd)
117 {
118 set_pgd(pgd, __pgd(0));
119 set_pgd(__user_pgd(pgd), __pgd(0));
120 }
122 #define pud_page(pud) \
123 ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
125 #define pte_same(a, b) ((a).pte == (b).pte)
127 #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
129 #define PMD_SIZE (1UL << PMD_SHIFT)
130 #define PMD_MASK (~(PMD_SIZE-1))
131 #define PUD_SIZE (1UL << PUD_SHIFT)
132 #define PUD_MASK (~(PUD_SIZE-1))
133 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
134 #define PGDIR_MASK (~(PGDIR_SIZE-1))
136 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
137 #define FIRST_USER_ADDRESS 0
139 #ifndef __ASSEMBLY__
140 #define MAXMEM 0x3fffffffffffUL
141 #define VMALLOC_START 0xffffc20000000000UL
142 #define VMALLOC_END 0xffffe1ffffffffffUL
143 #define MODULES_VADDR 0xffffffff88000000UL
144 #define MODULES_END 0xfffffffffff00000UL
145 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
147 #define _PAGE_BIT_PRESENT 0
148 #define _PAGE_BIT_RW 1
149 #define _PAGE_BIT_USER 2
150 #define _PAGE_BIT_PWT 3
151 #define _PAGE_BIT_PCD 4
152 #define _PAGE_BIT_ACCESSED 5
153 #define _PAGE_BIT_DIRTY 6
154 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
155 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
156 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
158 #define _PAGE_PRESENT 0x001
159 #define _PAGE_RW 0x002
160 #define _PAGE_USER 0x004
161 #define _PAGE_PWT 0x008
162 #define _PAGE_PCD 0x010
163 #define _PAGE_ACCESSED 0x020
164 #define _PAGE_DIRTY 0x040
165 #define _PAGE_PSE 0x080 /* 2MB page */
166 #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
167 #define _PAGE_GLOBAL 0x100 /* Global TLB entry */
169 #define _PAGE_PROTNONE 0x080 /* If not present */
170 #define _PAGE_NX (1UL<<_PAGE_BIT_NX)
172 /* Mapped page is I/O or foreign and has no associated page struct. */
173 #define _PAGE_IO 0x200
175 #if CONFIG_XEN_COMPAT <= 0x030002
176 extern unsigned int __kernel_page_user;
177 #else
178 #define __kernel_page_user 0
179 #endif
181 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
182 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
184 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
186 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
187 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
188 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
189 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
190 #define PAGE_COPY PAGE_COPY_NOEXEC
191 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
192 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
193 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
194 #define __PAGE_KERNEL \
195 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
196 #define __PAGE_KERNEL_EXEC \
197 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
198 #define __PAGE_KERNEL_NOCACHE \
199 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
200 #define __PAGE_KERNEL_RO \
201 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
202 #define __PAGE_KERNEL_VSYSCALL \
203 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
204 #define __PAGE_KERNEL_VSYSCALL_NOCACHE \
205 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
206 #define __PAGE_KERNEL_LARGE \
207 (__PAGE_KERNEL | _PAGE_PSE)
208 #define __PAGE_KERNEL_LARGE_EXEC \
209 (__PAGE_KERNEL_EXEC | _PAGE_PSE)
211 /*
212 * We don't support GLOBAL page in xenolinux64
213 */
214 #define MAKE_GLOBAL(x) __pgprot((x))
216 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
217 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
218 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
219 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
220 #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
221 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
222 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
223 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
225 /* xwr */
226 #define __P000 PAGE_NONE
227 #define __P001 PAGE_READONLY
228 #define __P010 PAGE_COPY
229 #define __P011 PAGE_COPY
230 #define __P100 PAGE_READONLY_EXEC
231 #define __P101 PAGE_READONLY_EXEC
232 #define __P110 PAGE_COPY_EXEC
233 #define __P111 PAGE_COPY_EXEC
235 #define __S000 PAGE_NONE
236 #define __S001 PAGE_READONLY
237 #define __S010 PAGE_SHARED
238 #define __S011 PAGE_SHARED
239 #define __S100 PAGE_READONLY_EXEC
240 #define __S101 PAGE_READONLY_EXEC
241 #define __S110 PAGE_SHARED_EXEC
242 #define __S111 PAGE_SHARED_EXEC
244 static inline unsigned long pgd_bad(pgd_t pgd)
245 {
246 unsigned long val = __pgd_val(pgd);
247 val &= ~PTE_MASK;
248 val &= ~(_PAGE_USER | _PAGE_DIRTY);
249 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
250 }
252 static inline unsigned long pud_bad(pud_t pud)
253 {
254 unsigned long val = __pud_val(pud);
255 val &= ~PTE_MASK;
256 val &= ~(_PAGE_USER | _PAGE_DIRTY);
257 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
258 }
260 #define set_pte_at(_mm,addr,ptep,pteval) do { \
261 if (((_mm) != current->mm && (_mm) != &init_mm) || \
262 HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
263 set_pte((ptep), (pteval)); \
264 } while (0)
266 #define pte_none(x) (!(x).pte)
267 #define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
268 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
270 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
272 #define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
273 #define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
274 __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
275 #define pte_pfn(_pte) ((_pte).pte & _PAGE_IO ? end_pfn : \
276 (_pte).pte & _PAGE_PRESENT ? \
277 mfn_to_local_pfn(__pte_mfn(_pte)) : \
278 __pte_mfn(_pte))
280 #define pte_page(x) pfn_to_page(pte_pfn(x))
282 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
283 {
284 unsigned long pte = page_nr << PAGE_SHIFT;
285 pte |= pgprot_val(pgprot);
286 pte &= __supported_pte_mask;
287 return __pte(pte);
288 }
290 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
291 {
292 pte_t pte = *ptep;
293 if (!pte_none(pte)) {
294 if ((mm != &init_mm) ||
295 HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
296 pte = __pte_ma(xchg(&ptep->pte, 0));
297 }
298 return pte;
299 }
301 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
302 {
303 if (full) {
304 pte_t pte = *ptep;
305 if (mm->context.pinned)
306 xen_l1_entry_update(ptep, __pte(0));
307 else
308 *ptep = __pte(0);
309 return pte;
310 }
311 return ptep_get_and_clear(mm, addr, ptep);
312 }
314 #define ptep_clear_flush(vma, addr, ptep) \
315 ({ \
316 pte_t *__ptep = (ptep); \
317 pte_t __res = *__ptep; \
318 if (!pte_none(__res) && \
319 ((vma)->vm_mm != current->mm || \
320 HYPERVISOR_update_va_mapping(addr, __pte(0), \
321 (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
322 UVMF_INVLPG|UVMF_MULTI))) { \
323 __ptep->pte = 0; \
324 flush_tlb_page(vma, addr); \
325 } \
326 __res; \
327 })
329 /*
330 * The following only work if pte_present() is true.
331 * Undefined behaviour if not..
332 */
333 #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
334 static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
335 static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
336 static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
337 static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
338 static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
339 static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
340 static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
341 static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
343 static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
344 static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
345 static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
346 static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
347 static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
348 static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
349 static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
350 static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
351 static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
352 static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
353 static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
355 #define ptep_test_and_clear_dirty(vma, addr, ptep) \
356 ({ \
357 pte_t __pte = *(ptep); \
358 int __ret = pte_dirty(__pte); \
359 if (__ret) \
360 set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
361 __ret; \
362 })
364 #define ptep_test_and_clear_young(vma, addr, ptep) \
365 ({ \
366 pte_t __pte = *(ptep); \
367 int __ret = pte_young(__pte); \
368 if (__ret) \
369 set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
370 __ret; \
371 })
373 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
374 {
375 pte_t pte = *ptep;
376 if (pte_write(pte))
377 set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
378 }
380 /*
381 * Macro to mark a page protection value as "uncacheable".
382 */
383 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
385 static inline int pmd_large(pmd_t pte) {
386 return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
387 }
390 /*
391 * Conversion functions: convert a page and protection to a page entry,
392 * and a page entry and page directory to the page they refer to.
393 */
395 /*
396 * Level 4 access.
397 * Never use these in the common code.
398 */
399 #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
400 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
401 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
402 #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
403 #define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
404 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
406 /* PUD - Level3 access */
407 /* to find an entry in a page-table-directory. */
408 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
409 #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
410 #define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
412 /* PMD - Level 2 access */
413 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
414 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
416 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
417 #define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
418 pmd_index(address))
419 #define pmd_none(x) (!__pmd_val(x))
420 #if CONFIG_XEN_COMPAT <= 0x030002
421 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
422 can temporarily clear it. */
423 #define pmd_present(x) (__pmd_val(x))
424 #else
425 #define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
426 #endif
427 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
428 #define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
429 != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
430 #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
431 #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
433 #define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
434 #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
435 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
437 /* PTE - Level 1 access. */
439 /* page, protection -> pte */
440 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
441 #define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
443 /* physical address -> PTE */
444 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
445 {
446 unsigned long pteval;
447 pteval = physpage | pgprot_val(pgprot);
448 return __pte(pteval);
449 }
451 /* Change flags of a PTE */
452 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
453 {
454 /*
455 * Since this might change the present bit (which controls whether
456 * a pte_t object has undergone p2m translation), we must use
457 * pte_val() on the input pte and __pte() for the return value.
458 */
459 unsigned long pteval = pte_val(pte);
461 pteval &= _PAGE_CHG_MASK;
462 pteval |= pgprot_val(newprot);
463 pteval &= __supported_pte_mask;
464 return __pte(pteval);
465 }
467 #define pte_index(address) \
468 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
469 #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
470 pte_index(address))
472 /* x86-64 always has all page tables mapped. */
473 #define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
474 #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
475 #define pte_unmap(pte) /* NOP */
476 #define pte_unmap_nested(pte) /* NOP */
478 #define update_mmu_cache(vma,address,pte) do { } while (0)
480 /*
481 * Rules for using ptep_establish: the pte MUST be a user pte, and
482 * must be a present->present transition.
483 */
484 #define __HAVE_ARCH_PTEP_ESTABLISH
485 #define ptep_establish(vma, address, ptep, pteval) \
486 do { \
487 if ( likely((vma)->vm_mm == current->mm) ) { \
488 BUG_ON(HYPERVISOR_update_va_mapping(address, \
489 pteval, \
490 (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
491 UVMF_INVLPG|UVMF_MULTI)); \
492 } else { \
493 xen_l1_entry_update(ptep, pteval); \
494 flush_tlb_page(vma, address); \
495 } \
496 } while (0)
498 /* We only update the dirty/accessed state if we set
499 * the dirty bit by hand in the kernel, since the hardware
500 * will do the accessed bit for us, and we don't want to
501 * race with other CPU's that might be updating the dirty
502 * bit at the same time. */
503 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
504 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
505 do { \
506 if (dirty) \
507 ptep_establish(vma, address, ptep, entry); \
508 } while (0)
510 /* Encode and de-code a swap entry */
511 #define __swp_type(x) (((x).val >> 1) & 0x3f)
512 #define __swp_offset(x) ((x).val >> 8)
513 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
514 #define __pte_to_swp_entry(pte) ((swp_entry_t) { __pte_val(pte) })
515 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
517 extern spinlock_t pgd_lock;
518 extern struct page *pgd_list;
519 void vmalloc_sync_all(void);
521 #endif /* !__ASSEMBLY__ */
523 extern int kern_addr_valid(unsigned long addr);
525 #define DOMID_LOCAL (0xFFFFU)
527 struct vm_area_struct;
529 int direct_remap_pfn_range(struct vm_area_struct *vma,
530 unsigned long address,
531 unsigned long mfn,
532 unsigned long size,
533 pgprot_t prot,
534 domid_t domid);
536 int direct_kernel_remap_pfn_range(unsigned long address,
537 unsigned long mfn,
538 unsigned long size,
539 pgprot_t prot,
540 domid_t domid);
542 int create_lookup_pte_addr(struct mm_struct *mm,
543 unsigned long address,
544 uint64_t *ptep);
546 int touch_pte_range(struct mm_struct *mm,
547 unsigned long address,
548 unsigned long size);
550 int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
551 unsigned long addr, unsigned long end, pgprot_t newprot);
553 #define arch_change_pte_range(mm, pmd, addr, end, newprot) \
554 xen_change_pte_range(mm, pmd, addr, end, newprot)
556 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
557 direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
559 #define MK_IOSPACE_PFN(space, pfn) (pfn)
560 #define GET_IOSPACE(pfn) 0
561 #define GET_PFN(pfn) (pfn)
563 #define HAVE_ARCH_UNMAPPED_AREA
565 #define pgtable_cache_init() do { } while (0)
566 #define check_pgt_cache() do { } while (0)
568 #define PAGE_AGP PAGE_KERNEL_NOCACHE
569 #define HAVE_PAGE_AGP 1
571 /* fs/proc/kcore.c */
572 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
573 #define kc_offset_to_vaddr(o) \
574 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
576 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
577 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
578 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
579 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
580 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
581 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
582 #define __HAVE_ARCH_PTE_SAME
583 #include <asm-generic/pgtable.h>
585 #endif /* _X86_64_PGTABLE_H */