ia64/xen-unstable

view old/xenolinux-2.4.16-sparse/include/asm-xeno/pgalloc.h @ 235:d7d0a23b2e07

bitkeeper revision 1.93 (3e5a4e6bkPheUp3x1uufN2MS3LAB7A)

Latest and Greatest version of XenoLinux based on the Linux-2.4.21-pre4
kernel.
author iap10@labyrinth.cl.cam.ac.uk
date Mon Feb 24 16:55:07 2003 +0000 (2003-02-24)
parents
children
line source
1 #ifndef _I386_PGALLOC_H
2 #define _I386_PGALLOC_H
4 #include <linux/config.h>
5 #include <asm/processor.h>
6 #include <asm/hypervisor.h>
7 #include <linux/threads.h>
9 /*
10 * Quick lists are aligned so that least significant bits of array pointer
11 * are all zero when list is empty, and all one when list is full.
12 */
13 #define QUICKLIST_ENTRIES 256
14 #define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
15 #define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
16 #define pgd_quicklist (current_cpu_data.pgd_quick)
17 #define pmd_quicklist (current_cpu_data.pmd_quick)
18 #define pte_quicklist (current_cpu_data.pte_quick)
19 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
21 #define pmd_populate(mm, pmd, pte) \
22 do { \
23 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
24 XENO_flush_page_update_queue(); \
25 } while ( 0 )
27 static __inline__ pgd_t *get_pgd_slow(void)
28 {
29 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
30 pgd_t *kpgd;
31 pmd_t *kpmd;
32 pte_t *kpte;
34 if (pgd) {
35 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
36 memcpy(pgd + USER_PTRS_PER_PGD,
37 init_mm.pgd + USER_PTRS_PER_PGD,
38 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
39 kpgd = pgd_offset_k((unsigned long)pgd);
40 kpmd = pmd_offset(kpgd, (unsigned long)pgd);
41 kpte = pte_offset(kpmd, (unsigned long)pgd);
42 queue_l1_entry_update(__pa(kpte), (*(unsigned long *)kpte)&~_PAGE_RW);
43 queue_pgd_pin(__pa(pgd));
44 }
46 return pgd;
47 }
49 static __inline__ void free_pgd_slow(pgd_t *pgd)
50 {
51 pgd_t *kpgd;
52 pmd_t *kpmd;
53 pte_t *kpte;
54 queue_pgd_unpin(__pa(pgd));
55 kpgd = pgd_offset_k((unsigned long)pgd);
56 kpmd = pmd_offset(kpgd, (unsigned long)pgd);
57 kpte = pte_offset(kpmd, (unsigned long)pgd);
58 queue_l1_entry_update(__pa(kpte), (*(unsigned long *)kpte)|_PAGE_RW);
59 free_page((unsigned long)pgd);
60 }
62 static __inline__ pgd_t *get_pgd_fast(void)
63 {
64 unsigned long ret;
66 if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
67 ret = *(--pgd_quicklist);
68 pgtable_cache_size--;
69 } else
70 ret = (unsigned long)get_pgd_slow();
71 return (pgd_t *)ret;
72 }
74 static __inline__ void free_pgd_fast(pgd_t *pgd)
75 {
76 if ( !QUICKLIST_FULL(pgd_quicklist) ) {
77 *(pgd_quicklist++) = (unsigned long)pgd;
78 pgtable_cache_size++;
79 } else
80 free_pgd_slow(pgd);
81 }
83 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
84 {
85 pte_t *pte;
86 pgd_t *kpgd;
87 pmd_t *kpmd;
88 pte_t *kpte;
90 pte = (pte_t *) __get_free_page(GFP_KERNEL);
91 if (pte)
92 {
93 clear_page(pte);
94 kpgd = pgd_offset_k((unsigned long)pte);
95 kpmd = pmd_offset(kpgd, (unsigned long)pte);
96 kpte = pte_offset(kpmd, (unsigned long)pte);
97 queue_l1_entry_update(__pa(kpte), (*(unsigned long *)kpte)&~_PAGE_RW);
98 queue_pte_pin(__pa(pte));
99 }
100 return pte;
101 }
103 static __inline__ void pte_free_slow(pte_t *pte)
104 {
105 pgd_t *kpgd;
106 pmd_t *kpmd;
107 pte_t *kpte;
108 queue_pte_unpin(__pa(pte));
109 kpgd = pgd_offset_k((unsigned long)pte);
110 kpmd = pmd_offset(kpgd, (unsigned long)pte);
111 kpte = pte_offset(kpmd, (unsigned long)pte);
112 queue_l1_entry_update(__pa(kpte), (*(unsigned long *)kpte)|_PAGE_RW);
113 free_page((unsigned long)pte);
114 }
116 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
117 {
118 unsigned long ret = 0;
119 if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
120 ret = *(--pte_quicklist);
121 pgtable_cache_size--;
122 }
123 return (pte_t *)ret;
124 }
126 static __inline__ void pte_free_fast(pte_t *pte)
127 {
128 if ( !QUICKLIST_FULL(pte_quicklist) ) {
129 *(pte_quicklist++) = (unsigned long)pte;
130 pgtable_cache_size++;
131 } else
132 pte_free_slow(pte);
133 }
135 #define pte_free(pte) pte_free_fast(pte)
136 #define pgd_alloc(mm) get_pgd_fast()
137 #define pgd_free(pgd) free_pgd_fast(pgd)
139 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
140 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
141 #define pmd_free_slow(x) do { } while (0)
142 #define pmd_free_fast(x) do { } while (0)
143 #define pmd_free(x) do { } while (0)
144 #define pgd_populate(mm, pmd, pte) BUG()
146 extern int do_check_pgt_cache(int, int);
148 /*
149 * - flush_tlb() flushes the current mm struct TLBs
150 * - flush_tlb_all() flushes all processes TLBs
151 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
152 * - flush_tlb_page(vma, vmaddr) flushes one page
153 * - flush_tlb_range(mm, start, end) flushes a range of pages
154 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
155 */
157 #define flush_tlb() __flush_tlb()
158 #define flush_tlb_all() __flush_tlb_all()
159 #define local_flush_tlb() __flush_tlb()
161 static inline void flush_tlb_mm(struct mm_struct *mm)
162 {
163 if ( mm == current->active_mm ) queue_tlb_flush();
164 XENO_flush_page_update_queue();
165 }
167 static inline void flush_tlb_page(struct vm_area_struct *vma,
168 unsigned long addr)
169 {
170 if ( vma->vm_mm == current->active_mm ) queue_invlpg(addr);
171 XENO_flush_page_update_queue();
172 }
174 static inline void flush_tlb_range(struct mm_struct *mm,
175 unsigned long start, unsigned long end)
176 {
177 if ( mm == current->active_mm ) queue_tlb_flush();
178 XENO_flush_page_update_queue();
179 }
181 static inline void flush_tlb_pgtables(struct mm_struct *mm,
182 unsigned long start, unsigned long end)
183 {
184 /* i386 does not keep any page table caches in TLB */
185 XENO_flush_page_update_queue();
186 }
188 #endif /* _I386_PGALLOC_H */