ia64/xen-unstable

view linux-2.4.29-xen-sparse/include/asm-xen/pgalloc.h @ 3516:1a4f61d36171

bitkeeper revision 1.1159.223.31 (41f599bcklevTYwPtWQUZ7QK-azDbg)

Fix recent patch to change the way the version string is generated.
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@freefall.cl.cam.ac.uk
date Tue Jan 25 00:58:36 2005 +0000 (2005-01-25)
parents ed0d4ce83995
children d126cac32f08
line source
1 #ifndef _I386_PGALLOC_H
2 #define _I386_PGALLOC_H
4 #include <linux/config.h>
5 #include <asm/processor.h>
6 #include <asm/fixmap.h>
7 #include <asm/hypervisor.h>
8 #include <linux/threads.h>
10 /*
11 * Quick lists are aligned so that least significant bits of array pointer
12 * are all zero when list is empty, and all one when list is full.
13 */
14 #define QUICKLIST_ENTRIES 256
15 #define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
16 #define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
17 #define pgd_quicklist (current_cpu_data.pgd_quick)
18 #define pmd_quicklist (current_cpu_data.pmd_quick)
19 #define pte_quicklist (current_cpu_data.pte_quick)
20 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
22 #define pmd_populate(mm, pmd, pte) \
23 do { \
24 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
25 XEN_flush_page_update_queue(); \
26 } while ( 0 )
28 /*
29 * Allocate and free page tables.
30 */
32 #if defined (CONFIG_X86_PAE)
34 #error "no PAE support as yet"
36 /*
37 * We can't include <linux/slab.h> here, thus these uglinesses.
38 */
39 struct kmem_cache_s;
41 extern struct kmem_cache_s *pae_pgd_cachep;
42 extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
43 extern void kmem_cache_free(struct kmem_cache_s *, void *);
46 static inline pgd_t *get_pgd_slow(void)
47 {
48 int i;
49 pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
51 if (pgd) {
52 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
53 unsigned long pmd = __get_free_page(GFP_KERNEL);
54 if (!pmd)
55 goto out_oom;
56 clear_page(pmd);
57 set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
58 }
59 memcpy(pgd + USER_PTRS_PER_PGD,
60 init_mm.pgd + USER_PTRS_PER_PGD,
61 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
62 }
63 return pgd;
64 out_oom:
65 for (i--; i >= 0; i--)
66 free_page((unsigned long)__va(pgd_val(pgd[i])-1));
67 kmem_cache_free(pae_pgd_cachep, pgd);
68 return NULL;
69 }
71 #else
73 static inline pgd_t *get_pgd_slow(void)
74 {
75 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
77 if (pgd) {
78 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
79 memcpy(pgd + USER_PTRS_PER_PGD,
80 init_mm.pgd + USER_PTRS_PER_PGD,
81 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
82 __make_page_readonly(pgd);
83 queue_pgd_pin(__pa(pgd));
84 }
85 return pgd;
86 }
88 #endif /* CONFIG_X86_PAE */
90 static inline pgd_t *get_pgd_fast(void)
91 {
92 unsigned long ret;
94 if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
95 ret = *(--pgd_quicklist);
96 pgtable_cache_size--;
98 } else
99 ret = (unsigned long)get_pgd_slow();
100 return (pgd_t *)ret;
101 }
103 static inline void free_pgd_slow(pgd_t *pgd)
104 {
105 #if defined(CONFIG_X86_PAE)
106 #error
107 int i;
109 for (i = 0; i < USER_PTRS_PER_PGD; i++)
110 free_page((unsigned long)__va(pgd_val(pgd[i])-1));
111 kmem_cache_free(pae_pgd_cachep, pgd);
112 #else
113 queue_pgd_unpin(__pa(pgd));
114 __make_page_writable(pgd);
115 free_page((unsigned long)pgd);
116 #endif
117 }
119 static inline void free_pgd_fast(pgd_t *pgd)
120 {
121 if ( !QUICKLIST_FULL(pgd_quicklist) ) {
122 *(pgd_quicklist++) = (unsigned long)pgd;
123 pgtable_cache_size++;
124 } else
125 free_pgd_slow(pgd);
126 }
128 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
129 {
130 pte_t *pte;
132 pte = (pte_t *) __get_free_page(GFP_KERNEL);
133 if (pte)
134 {
135 clear_page(pte);
136 __make_page_readonly(pte);
137 queue_pte_pin(__pa(pte));
138 }
139 return pte;
141 }
143 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
144 unsigned long address)
145 {
146 unsigned long ret = 0;
147 if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
148 ret = *(--pte_quicklist);
149 pgtable_cache_size--;
150 }
151 return (pte_t *)ret;
152 }
154 static __inline__ void pte_free_slow(pte_t *pte)
155 {
156 queue_pte_unpin(__pa(pte));
157 __make_page_writable(pte);
158 free_page((unsigned long)pte);
159 }
161 static inline void pte_free_fast(pte_t *pte)
162 {
163 if ( !QUICKLIST_FULL(pte_quicklist) ) {
164 *(pte_quicklist++) = (unsigned long)pte;
165 pgtable_cache_size++;
166 } else
167 pte_free_slow(pte);
168 }
170 #define pte_free(pte) pte_free_fast(pte)
171 #define pgd_free(pgd) free_pgd_fast(pgd)
172 #define pgd_alloc(mm) get_pgd_fast()
174 /*
175 * allocating and freeing a pmd is trivial: the 1-entry pmd is
176 * inside the pgd, so has no extra memory associated with it.
177 * (In the PAE case we free the pmds as part of the pgd.)
178 */
180 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
181 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
182 #define pmd_free_slow(x) do { } while (0)
183 #define pmd_free_fast(x) do { } while (0)
184 #define pmd_free(x) do { } while (0)
185 #define pgd_populate(mm, pmd, pte) BUG()
187 extern int do_check_pgt_cache(int, int);
189 /*
190 * TLB flushing:
191 *
192 * - flush_tlb() flushes the current mm struct TLBs
193 * - flush_tlb_all() flushes all processes TLBs
194 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
195 * - flush_tlb_page(vma, vmaddr) flushes one page
196 * - flush_tlb_range(mm, start, end) flushes a range of pages
197 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
198 *
199 * ..but the i386 has somewhat limited tlb flushing capabilities,
200 * and page-granular flushes are available only on i486 and up.
201 */
203 #ifndef CONFIG_SMP
205 #define flush_tlb() __flush_tlb()
206 #define flush_tlb_all() __flush_tlb_all()
207 #define local_flush_tlb() __flush_tlb()
209 static inline void flush_tlb_mm(struct mm_struct *mm)
210 {
211 if (mm == current->active_mm) queue_tlb_flush();
212 XEN_flush_page_update_queue();
213 }
215 static inline void flush_tlb_page(struct vm_area_struct *vma,
216 unsigned long addr)
217 {
218 if (vma->vm_mm == current->active_mm) queue_invlpg(addr);
219 XEN_flush_page_update_queue();
220 }
222 static inline void flush_tlb_range(struct mm_struct *mm,
223 unsigned long start, unsigned long end)
224 {
225 if (mm == current->active_mm) queue_tlb_flush();
226 XEN_flush_page_update_queue();
227 }
229 #else
230 #error no guestos SMP support yet...
231 #include <asm/smp.h>
233 #define local_flush_tlb() \
234 __flush_tlb()
236 extern void flush_tlb_all(void);
237 extern void flush_tlb_current_task(void);
238 extern void flush_tlb_mm(struct mm_struct *);
239 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
241 #define flush_tlb() flush_tlb_current_task()
243 static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
244 {
245 flush_tlb_mm(mm);
246 }
248 #define TLBSTATE_OK 1
249 #define TLBSTATE_LAZY 2
251 struct tlb_state
252 {
253 struct mm_struct *active_mm;
254 int state;
255 } ____cacheline_aligned;
256 extern struct tlb_state cpu_tlbstate[NR_CPUS];
258 #endif /* CONFIG_SMP */
260 static inline void flush_tlb_pgtables(struct mm_struct *mm,
261 unsigned long start, unsigned long end)
262 {
263 /* i386 does not keep any page table caches in TLB */
264 XEN_flush_page_update_queue();
265 }
267 /*
268 * NB. The 'domid' field should be zero if mapping I/O space (non RAM).
269 * Otherwise it identifies the owner of the memory that is being mapped.
270 */
271 extern int direct_remap_area_pages(struct mm_struct *mm,
272 unsigned long address,
273 unsigned long machine_addr,
274 unsigned long size,
275 pgprot_t prot,
276 domid_t domid);
278 extern int __direct_remap_area_pages(struct mm_struct *mm,
279 unsigned long address,
280 unsigned long size,
281 mmu_update_t *v);
285 #endif /* _I386_PGALLOC_H */