ia64/xen-unstable

view linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h @ 3000:c60cac51f45e

bitkeeper revision 1.1159.1.427 (419a4dacYXoRnTKw9jlnxnjLIcPbAg)

Disable writabel pagetables for CONFIG_SMP.
author cl349@freefall.cl.cam.ac.uk
date Tue Nov 16 18:57:48 2004 +0000 (2004-11-16)
parents 145b7783c604
children f0fe276ae088
line source
1 #ifndef _I386_PGTABLE_2LEVEL_H
2 #define _I386_PGTABLE_2LEVEL_H
4 #define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
6 #define pmd_ERROR(e) \
7 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
8 #define pgd_ERROR(e) \
9 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
11 /*
12 * The "pgd_xxx()" functions here are trivial for a folded two-level
13 * setup: the pgd is never bad, and a pmd always exists (as it's folded
14 * into the pgd entry)
15 */
16 static inline int pgd_none(pgd_t pgd) { return 0; }
17 static inline int pgd_bad(pgd_t pgd) { return 0; }
18 static inline int pgd_present(pgd_t pgd) { return 1; }
19 #define pgd_clear(xp) do { } while (0)
21 /*
22 * Certain architectures need to do special things when PTEs
23 * within a page table are directly modified. Thus, the following
24 * hook is made available.
25 */
26 #ifdef CONFIG_SMP
27 #define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
28 #if 0
29 do { \
30 (*(pteptr) = pteval); \
31 HYPERVISOR_xen_version(0); \
32 } while (0)
33 #endif
34 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
35 #else
36 #ifdef CONFIG_XEN_WRITABLE_PAGETABLES
37 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
38 #define set_pte_atomic(pteptr, pteval) (*(pteptr) = pteval)
39 #else
40 #define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
41 #define set_pte_atomic(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
42 #endif
43 #endif
44 /*
45 * (pmds are folded into pgds so this doesn't get actually called,
46 * but the define is needed for a generic inline function.)
47 */
48 #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval).pmd)
49 #define set_pgd(pgdptr, pgdval) ((void)0)
51 #define pgd_page(pgd) \
52 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
54 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
55 {
56 return (pmd_t *) dir;
57 }
59 /*
60 * A note on implementation of this atomic 'get-and-clear' operation.
61 * This is actually very simple because Xen Linux can only run on a single
62 * processor. Therefore, we cannot race other processors setting the 'accessed'
63 * or 'dirty' bits on a page-table entry.
64 * Even if pages are shared between domains, that is not a problem because
65 * each domain will have separate page tables, with their own versions of
66 * accessed & dirty state.
67 */
68 static inline pte_t ptep_get_and_clear(pte_t *xp)
69 {
70 pte_t pte = *xp;
71 if (pte.pte_low)
72 set_pte(xp, __pte_ma(0));
73 return pte;
74 }
76 #define pte_same(a, b) ((a).pte_low == (b).pte_low)
77 /*
78 * We detect special mappings in one of two ways:
79 * 1. If the MFN is an I/O page then Xen will set the m2p entry
80 * to be outside our maximum possible pseudophys range.
81 * 2. If the MFN belongs to a different domain then we will certainly
82 * not have MFN in our p2m table. Conversely, if the page is ours,
83 * then we'll have p2m(m2p(MFN))==MFN.
84 * If we detect a special mapping then it doesn't have a 'struct page'.
85 * We force !pfn_valid() by returning an out-of-range pointer.
86 *
87 * NB. These checks require that, for any MFN that is not in our reservation,
88 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
89 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
90 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
91 *
92 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
93 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
94 * require. In all the cases we care about, the high bit gets shifted out
95 * (e.g., phys_to_machine()) so behaviour there is correct.
96 */
97 #define INVALID_P2M_ENTRY (~0UL)
98 #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
99 #define pte_pfn(_pte) \
100 ({ \
101 unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
102 unsigned long pfn = mfn_to_pfn(mfn); \
103 if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
104 pfn = max_mapnr; /* special: force !pfn_valid() */ \
105 pfn; \
106 })
108 #define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
110 #define pte_none(x) (!(x).pte_low)
112 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
113 #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
114 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
116 /*
117 * All present user pages are user-executable:
118 */
119 static inline int pte_exec(pte_t pte)
120 {
121 return pte_user(pte);
122 }
124 /*
125 * All present pages are kernel-executable:
126 */
127 static inline int pte_exec_kernel(pte_t pte)
128 {
129 return 1;
130 }
132 /*
133 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
134 * into this range:
135 */
136 #define PTE_FILE_MAX_BITS 29
138 #define pte_to_pgoff(pte) \
139 ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
141 #define pgoff_to_pte(off) \
142 ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
144 /* Encode and de-code a swap entry */
145 #define __swp_type(x) (((x).val >> 1) & 0x1f)
146 #define __swp_offset(x) ((x).val >> 8)
147 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
148 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
149 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
151 #endif /* _I386_PGTABLE_2LEVEL_H */