ia64/xen-unstable

view linux-2.4-xen-sparse/include/asm-xen/pgalloc.h @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 f294acb25858
children 99914b54f7bf
line source
1 #ifndef _I386_PGALLOC_H
2 #define _I386_PGALLOC_H
4 #include <linux/config.h>
5 #include <asm/processor.h>
6 #include <asm/fixmap.h>
7 #include <asm/hypervisor.h>
8 #include <linux/threads.h>
10 /*
11 * Quick lists are aligned so that least significant bits of array pointer
12 * are all zero when list is empty, and all one when list is full.
13 */
14 #define QUICKLIST_ENTRIES 256
15 #define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
16 #define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
17 #define pgd_quicklist (current_cpu_data.pgd_quick)
18 #define pmd_quicklist (current_cpu_data.pmd_quick)
19 #define pte_quicklist (current_cpu_data.pte_quick)
20 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
22 #define pmd_populate(mm, pmd, pte) \
23 do { \
24 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
25 } while ( 0 )
27 /*
28 * Allocate and free page tables.
29 */
31 #if defined (CONFIG_X86_PAE)
33 #error "no PAE support as yet"
35 /*
36 * We can't include <linux/slab.h> here, thus these uglinesses.
37 */
38 struct kmem_cache_s;
40 extern struct kmem_cache_s *pae_pgd_cachep;
41 extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
42 extern void kmem_cache_free(struct kmem_cache_s *, void *);
45 static inline pgd_t *get_pgd_slow(void)
46 {
47 int i;
48 pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
50 if (pgd) {
51 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
52 unsigned long pmd = __get_free_page(GFP_KERNEL);
53 if (!pmd)
54 goto out_oom;
55 clear_page(pmd);
56 set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
57 }
58 memcpy(pgd + USER_PTRS_PER_PGD,
59 init_mm.pgd + USER_PTRS_PER_PGD,
60 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
61 }
62 return pgd;
63 out_oom:
64 for (i--; i >= 0; i--)
65 free_page((unsigned long)__va(pgd_val(pgd[i])-1));
66 kmem_cache_free(pae_pgd_cachep, pgd);
67 return NULL;
68 }
70 #else
72 static inline pgd_t *get_pgd_slow(void)
73 {
74 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
76 if (pgd) {
77 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
78 memcpy(pgd + USER_PTRS_PER_PGD,
79 init_mm.pgd + USER_PTRS_PER_PGD,
80 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
81 __make_page_readonly(pgd);
82 xen_pgd_pin(__pa(pgd));
83 }
84 return pgd;
85 }
87 #endif /* CONFIG_X86_PAE */
89 static inline pgd_t *get_pgd_fast(void)
90 {
91 unsigned long ret;
93 if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
94 ret = *(--pgd_quicklist);
95 pgtable_cache_size--;
97 } else
98 ret = (unsigned long)get_pgd_slow();
99 return (pgd_t *)ret;
100 }
102 static inline void free_pgd_slow(pgd_t *pgd)
103 {
104 #if defined(CONFIG_X86_PAE)
105 #error
106 int i;
108 for (i = 0; i < USER_PTRS_PER_PGD; i++)
109 free_page((unsigned long)__va(pgd_val(pgd[i])-1));
110 kmem_cache_free(pae_pgd_cachep, pgd);
111 #else
112 xen_pgd_unpin(__pa(pgd));
113 __make_page_writable(pgd);
114 free_page((unsigned long)pgd);
115 #endif
116 }
118 static inline void free_pgd_fast(pgd_t *pgd)
119 {
120 if ( !QUICKLIST_FULL(pgd_quicklist) ) {
121 *(pgd_quicklist++) = (unsigned long)pgd;
122 pgtable_cache_size++;
123 } else
124 free_pgd_slow(pgd);
125 }
127 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
128 {
129 pte_t *pte;
131 pte = (pte_t *) __get_free_page(GFP_KERNEL);
132 if (pte)
133 {
134 clear_page(pte);
135 __make_page_readonly(pte);
136 xen_pte_pin(__pa(pte));
137 }
138 return pte;
140 }
142 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
143 unsigned long address)
144 {
145 unsigned long ret = 0;
146 if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
147 ret = *(--pte_quicklist);
148 pgtable_cache_size--;
149 }
150 return (pte_t *)ret;
151 }
153 static __inline__ void pte_free_slow(pte_t *pte)
154 {
155 xen_pte_unpin(__pa(pte));
156 __make_page_writable(pte);
157 free_page((unsigned long)pte);
158 }
160 static inline void pte_free_fast(pte_t *pte)
161 {
162 if ( !QUICKLIST_FULL(pte_quicklist) ) {
163 *(pte_quicklist++) = (unsigned long)pte;
164 pgtable_cache_size++;
165 } else
166 pte_free_slow(pte);
167 }
169 #define pte_free(pte) pte_free_fast(pte)
170 #define pgd_free(pgd) free_pgd_fast(pgd)
171 #define pgd_alloc(mm) get_pgd_fast()
173 /*
174 * allocating and freeing a pmd is trivial: the 1-entry pmd is
175 * inside the pgd, so has no extra memory associated with it.
176 * (In the PAE case we free the pmds as part of the pgd.)
177 */
179 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
180 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
181 #define pmd_free_slow(x) do { } while (0)
182 #define pmd_free_fast(x) do { } while (0)
183 #define pmd_free(x) do { } while (0)
184 #define pgd_populate(mm, pmd, pte) BUG()
186 extern int do_check_pgt_cache(int, int);
188 /*
189 * TLB flushing:
190 *
191 * - flush_tlb() flushes the current mm struct TLBs
192 * - flush_tlb_all() flushes all processes TLBs
193 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
194 * - flush_tlb_page(vma, vmaddr) flushes one page
195 * - flush_tlb_range(mm, start, end) flushes a range of pages
196 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
197 *
198 * ..but the i386 has somewhat limited tlb flushing capabilities,
199 * and page-granular flushes are available only on i486 and up.
200 */
202 #ifndef CONFIG_SMP
204 #define flush_tlb() __flush_tlb()
205 #define flush_tlb_all() __flush_tlb_all()
206 #define local_flush_tlb() __flush_tlb()
208 static inline void flush_tlb_mm(struct mm_struct *mm)
209 {
210 if (mm == current->active_mm) xen_tlb_flush();
211 }
213 static inline void flush_tlb_page(struct vm_area_struct *vma,
214 unsigned long addr)
215 {
216 if (vma->vm_mm == current->active_mm) xen_invlpg(addr);
217 }
219 static inline void flush_tlb_range(struct mm_struct *mm,
220 unsigned long start, unsigned long end)
221 {
222 if (mm == current->active_mm) xen_tlb_flush();
223 }
225 #else
226 #error no kernel SMP support yet...
227 #include <asm/smp.h>
229 #define local_flush_tlb() \
230 __flush_tlb()
232 extern void flush_tlb_all(void);
233 extern void flush_tlb_current_task(void);
234 extern void flush_tlb_mm(struct mm_struct *);
235 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
237 #define flush_tlb() flush_tlb_current_task()
239 static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
240 {
241 flush_tlb_mm(mm);
242 }
244 #define TLBSTATE_OK 1
245 #define TLBSTATE_LAZY 2
247 struct tlb_state
248 {
249 struct mm_struct *active_mm;
250 int state;
251 } ____cacheline_aligned;
252 extern struct tlb_state cpu_tlbstate[NR_CPUS];
254 #endif /* CONFIG_SMP */
256 static inline void flush_tlb_pgtables(struct mm_struct *mm,
257 unsigned long start, unsigned long end)
258 {
259 /* i386 does not keep any page table caches in TLB */
260 }
262 /*
263 * NB. The 'domid' field should be zero if mapping I/O space (non RAM).
264 * Otherwise it identifies the owner of the memory that is being mapped.
265 */
266 extern int direct_remap_area_pages(struct mm_struct *mm,
267 unsigned long address,
268 unsigned long machine_addr,
269 unsigned long size,
270 pgprot_t prot,
271 domid_t domid);
273 extern int __direct_remap_area_pages(struct mm_struct *mm,
274 unsigned long address,
275 unsigned long size,
276 mmu_update_t *v);
280 #endif /* _I386_PGALLOC_H */