ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h @ 8534:da7873110bbb

Tiny bootstrap cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:46:46 2006 +0100 (2006-01-09)
parents 99a1f5dc75a9
children 990c009015e8
line source
1 #ifndef _I386_PGTABLE_3LEVEL_H
2 #define _I386_PGTABLE_3LEVEL_H
4 #include <asm-generic/pgtable-nopud.h>
6 /*
7 * Intel Physical Address Extension (PAE) Mode - three-level page
8 * tables on PPro+ CPUs.
9 *
10 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
11 */
13 #define pte_ERROR(e) \
14 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
15 #define pmd_ERROR(e) \
16 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
17 #define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
20 #define pud_none(pud) 0
21 #define pud_bad(pud) 0
22 #define pud_present(pud) 1
24 /*
25 * Is the pte executable?
26 */
27 static inline int pte_x(pte_t pte)
28 {
29 return !(pte_val(pte) & _PAGE_NX);
30 }
32 /*
33 * All present user-pages with !NX bit are user-executable:
34 */
35 static inline int pte_exec(pte_t pte)
36 {
37 return pte_user(pte) && pte_x(pte);
38 }
39 /*
40 * All present pages with !NX bit are kernel-executable:
41 */
42 static inline int pte_exec_kernel(pte_t pte)
43 {
44 return pte_x(pte);
45 }
47 /* Rules for using set_pte: the pte being assigned *must* be
48 * either not present or in a state where the hardware will
49 * not attempt to update the pte. In places where this is
50 * not possible, use pte_get_and_clear to obtain the old pte
51 * value and then use set_pte to update it. -ben
52 */
53 #define __HAVE_ARCH_SET_PTE_ATOMIC
55 #if 1
56 /* use writable pagetables */
57 static inline void set_pte(pte_t *ptep, pte_t pte)
58 {
59 ptep->pte_high = pte.pte_high;
60 smp_wmb();
61 ptep->pte_low = pte.pte_low;
62 }
63 # define set_pte_atomic(pteptr,pteval) \
64 set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
65 #else
66 /* no writable pagetables */
67 # define set_pte(pteptr,pteval) \
68 xen_l1_entry_update((pteptr), (pteval))
69 # define set_pte_atomic(pteptr,pteval) set_pte(pteptr,pteval)
70 #endif
72 #define set_pte_at(_mm,addr,ptep,pteval) do { \
73 if (((_mm) != current->mm && (_mm) != &init_mm) || \
74 HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
75 set_pte((ptep), (pteval)); \
76 } while (0)
78 #define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
79 if (((_mm) != current->mm && (_mm) != &init_mm) || \
80 HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
81 set_pte((ptep), (pteval)); \
82 xen_invlpg((addr)); \
83 } \
84 } while (0)
86 #ifdef CONFIG_XEN_SHADOW_MODE
87 # define set_pmd(pmdptr,pmdval) \
88 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
89 # define set_pud(pudptr,pudval) \
90 set_64bit((unsigned long long *)(pudptr),pud_val(pudval))
91 #else
92 # define set_pmd(pmdptr,pmdval) \
93 xen_l2_entry_update((pmdptr), (pmdval))
94 # define set_pud(pudptr,pudval) \
95 xen_l3_entry_update((pudptr), (pudval))
96 #endif
98 /*
99 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
100 * the TLB via cr3 if the top-level pgd is changed...
101 * We do not let the generic code free and clear pgd entries due to
102 * this erratum.
103 */
104 static inline void pud_clear (pud_t * pud) { }
106 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
108 #define pmd_page_kernel(pmd) \
109 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
111 #define pud_page(pud) \
112 ((struct page *) __va(pud_val(pud) & PAGE_MASK))
114 #define pud_page_kernel(pud) \
115 ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
118 /* Find an entry in the second-level page table.. */
119 #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
120 pmd_index(address))
122 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
123 {
124 pte_t res;
126 /* xchg acts as a barrier before the setting of the high bits */
127 res.pte_low = xchg(&ptep->pte_low, 0);
128 res.pte_high = ptep->pte_high;
129 ptep->pte_high = 0;
131 return res;
132 }
134 static inline int pte_same(pte_t a, pte_t b)
135 {
136 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
137 }
139 #define pte_page(x) pfn_to_page(pte_pfn(x))
141 static inline int pte_none(pte_t pte)
142 {
143 return !pte.pte_low && !pte.pte_high;
144 }
146 #define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\
147 (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) )
148 #define pte_pfn(_pte) \
149 ({ \
150 unsigned long mfn = pte_mfn(_pte); \
151 unsigned long pfn = mfn_to_pfn(mfn); \
152 if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
153 pfn = max_mapnr; /* special: force !pfn_valid() */ \
154 pfn; \
155 })
157 extern unsigned long long __supported_pte_mask;
159 static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
160 {
161 pte_t pte;
163 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
164 (pgprot_val(pgprot) >> 32);
165 pte.pte_high &= (__supported_pte_mask >> 32);
166 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
167 __supported_pte_mask;
168 return pte;
169 }
171 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
172 {
173 return pfn_pte_ma(pfn_to_mfn(page_nr), pgprot);
174 }
176 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
177 {
178 BUG(); panic("needs review");
179 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
180 pgprot_val(pgprot)) & __supported_pte_mask);
181 }
183 /*
184 * Bits 0, 6 and 7 are taken in the low part of the pte,
185 * put the 32 bits of offset into the high part.
186 */
187 #define pte_to_pgoff(pte) ((pte).pte_high)
188 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
189 #define PTE_FILE_MAX_BITS 32
191 /* Encode and de-code a swap entry */
192 #define __swp_type(x) (((x).val) & 0x1f)
193 #define __swp_offset(x) ((x).val >> 5)
194 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
195 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
196 #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
198 #define __pmd_free_tlb(tlb, x) do { } while (0)
200 #endif /* _I386_PGTABLE_3LEVEL_H */