ia64/xen-unstable

view xenolinux-2.4.21-pre4-sparse/include/asm-xeno/pgtable-2level.h @ 235:d7d0a23b2e07

bitkeeper revision 1.93 (3e5a4e6bkPheUp3x1uufN2MS3LAB7A)

Latest and Greatest version of XenoLinux based on the Linux-2.4.21-pre4
kernel.
author iap10@labyrinth.cl.cam.ac.uk
date Mon Feb 24 16:55:07 2003 +0000 (2003-02-24)
parents
children
line source
1 #ifndef _I386_PGTABLE_2LEVEL_H
2 #define _I386_PGTABLE_2LEVEL_H
4 /*
5 * traditional i386 two-level paging structure:
6 */
8 #define PGDIR_SHIFT 22
9 #define PTRS_PER_PGD 1024
11 /*
12 * the i386 is two-level, so we don't really have any
13 * PMD directory physically.
14 */
15 #define PMD_SHIFT 22
16 #define PTRS_PER_PMD 1
18 #define PTRS_PER_PTE 1024
20 #define pte_ERROR(e) \
21 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
22 #define pmd_ERROR(e) \
23 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
24 #define pgd_ERROR(e) \
25 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
27 /*
28 * The "pgd_xxx()" functions here are trivial for a folded two-level
29 * setup: the pgd is never bad, and a pmd always exists (as it's folded
30 * into the pgd entry)
31 */
32 static inline int pgd_none(pgd_t pgd) { return 0; }
33 static inline int pgd_bad(pgd_t pgd) { return 0; }
34 static inline int pgd_present(pgd_t pgd) { return 1; }
35 #define pgd_clear(xp) do { } while (0)
37 #define set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
38 #define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
39 #define set_pmd(pmdptr, pmdval) queue_l2_entry_update(__pa(pmdptr), (pmdval).pmd)
40 #define set_pgd(pgdptr, pgdval) ((void)0)
42 #define pgd_page(pgd) \
43 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
45 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
46 {
47 return (pmd_t *) dir;
48 }
50 /*
51 * A note on implementation of this atomic 'get-and-clear' operation.
52 * This is actually very simple because XenoLinux can only run on a single
53 * processor. Therefore, we cannot race other processors setting the 'accessed'
54 * or 'dirty' bits on a page-table entry.
55 * Even if pages are shared between domains, that is not a problem because
56 * each domain will have separate page tables, with their own versions of
57 * accessed & dirty state.
58 */
59 static inline pte_t ptep_get_and_clear(pte_t *xp)
60 {
61 pte_t pte = *xp;
62 queue_l1_entry_update(__pa(xp), 0);
63 return pte;
64 }
66 #define pte_same(a, b) ((a).pte_low == (b).pte_low)
67 #define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
68 #define pte_none(x) (!(x).pte_low)
69 #define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
71 #endif /* _I386_PGTABLE_2LEVEL_H */