ia64/xen-unstable

view linux-2.4.29-xen-sparse/include/asm-xen/pgtable-2level.h @ 3887:4385894c52ae

bitkeeper revision 1.1230.2.4 (421a95cepOZORm0EbZfqBeZ6PZ8MwA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xen-unstable.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author iap10@freefall.cl.cam.ac.uk
date Tue Feb 22 02:15:42 2005 +0000 (2005-02-22)
parents 9ee09144e830
children db5a30a327e6 f234096eb41e a01199a95070
line source
1 #ifndef _I386_PGTABLE_2LEVEL_H
2 #define _I386_PGTABLE_2LEVEL_H
4 /*
5 * traditional i386 two-level paging structure:
6 */
8 #define PGDIR_SHIFT 22
9 #define PTRS_PER_PGD 1024
11 /*
12 * the i386 is two-level, so we don't really have any
13 * PMD directory physically.
14 */
15 #define PMD_SHIFT 22
16 #define PTRS_PER_PMD 1
18 #define PTRS_PER_PTE 1024
20 #define pte_ERROR(e) \
21 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
22 #define pmd_ERROR(e) \
23 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
24 #define pgd_ERROR(e) \
25 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
27 /*
28 * The "pgd_xxx()" functions here are trivial for a folded two-level
29 * setup: the pgd is never bad, and a pmd always exists (as it's folded
30 * into the pgd entry)
31 */
32 static inline int pgd_none(pgd_t pgd) { return 0; }
33 static inline int pgd_bad(pgd_t pgd) { return 0; }
34 static inline int pgd_present(pgd_t pgd) { return 1; }
35 #define pgd_clear(xp) do { } while (0)
37 #define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
38 #define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
39 #define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval).pmd)
40 #define set_pgd(pgdptr, pgdval) ((void)0)
42 #define pgd_page(pgd) \
43 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
45 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
46 {
47 return (pmd_t *) dir;
48 }
50 #define pte_same(a, b) ((a).pte_low == (b).pte_low)
52 /*
53 * We detect special mappings in one of two ways:
54 * 1. If the MFN is an I/O page then Xen will set the m2p entry
55 * to be outside our maximum possible pseudophys range.
56 * 2. If the MFN belongs to a different domain then we will certainly
57 * not have MFN in our p2m table. Conversely, if the page is ours,
58 * then we'll have p2m(m2p(MFN))==MFN.
59 * If we detect a special mapping then it doesn't have a 'struct page'.
60 * We force !VALID_PAGE() by returning an out-of-range pointer.
61 *
62 * NB. These checks require that, for any MFN that is not in our reservation,
63 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
64 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
65 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
66 *
67 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
68 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
69 * require. In all the cases we care about, the high bit gets shifted out
70 * (e.g., phys_to_machine()) so behaviour there is correct.
71 */
72 #define INVALID_P2M_ENTRY (~0U)
73 #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
74 #define pte_page(_pte) \
75 ({ \
76 unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
77 unsigned long pfn = mfn_to_pfn(mfn); \
78 if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
79 pfn = max_mapnr; /* specia: force !VALID_PAGE() */ \
80 &mem_map[pfn]; \
81 })
83 #define pte_none(x) (!(x).pte_low)
84 #define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
86 /*
87 * A note on implementation of this atomic 'get-and-clear' operation.
88 * This is actually very simple because XenoLinux can only run on a single
89 * processor. Therefore, we cannot race other processors setting the 'accessed'
90 * or 'dirty' bits on a page-table entry.
91 * Even if pages are shared between domains, that is not a problem because
92 * each domain will have separate page tables, with their own versions of
93 * accessed & dirty state.
94 */
95 static inline pte_t ptep_get_and_clear(pte_t *xp)
96 {
97 pte_t pte = *xp;
98 if ( !pte_none(pte) )
99 queue_l1_entry_update(xp, 0);
100 return pte;
101 }
103 #endif /* _I386_PGTABLE_2LEVEL_H */