ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h @ 8534:da7873110bbb

Tiny bootstrap cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:46:46 2006 +0100 (2006-01-09)
parents 06d84bf87159
children 4be553bcfb0f
line source
1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
4 #include <asm/fixmap.h>
5 #include <asm/pda.h>
6 #include <linux/threads.h>
7 #include <linux/mm.h>
8 #include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
10 void make_page_readonly(void *va);
11 void make_page_writable(void *va);
12 void make_pages_readonly(void *va, unsigned int nr);
13 void make_pages_writable(void *va, unsigned int nr);
15 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
17 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
18 {
19 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
20 }
22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
23 {
24 if (unlikely((mm)->context.pinned)) {
25 BUG_ON(HYPERVISOR_update_va_mapping(
26 (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
27 pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
28 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
29 } else {
30 *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
31 }
32 }
34 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
35 {
36 if (unlikely((mm)->context.pinned)) {
37 BUG_ON(HYPERVISOR_update_va_mapping(
38 (unsigned long)pmd,
39 pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
40 PAGE_KERNEL_RO), 0));
41 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
42 } else {
43 *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
44 }
45 }
47 /*
48 * We need to use the batch mode here, but pgd_pupulate() won't be
49 * be called frequently.
50 */
51 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
52 {
53 if (unlikely((mm)->context.pinned)) {
54 BUG_ON(HYPERVISOR_update_va_mapping(
55 (unsigned long)pud,
56 pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
57 PAGE_KERNEL_RO), 0));
58 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
59 set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
60 } else {
61 *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
62 *(__user_pgd(pgd)) = *(pgd);
63 }
64 }
66 extern __inline__ void pmd_free(pmd_t *pmd)
67 {
68 pte_t *ptep = virt_to_ptep(pmd);
70 if (!pte_write(*ptep)) {
71 BUG_ON(HYPERVISOR_update_va_mapping(
72 (unsigned long)pmd,
73 pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
74 0));
75 }
76 free_page((unsigned long)pmd);
77 }
79 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
80 {
81 pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
82 return pmd;
83 }
85 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
86 {
87 pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
88 return pud;
89 }
91 static inline void pud_free(pud_t *pud)
92 {
93 pte_t *ptep = virt_to_ptep(pud);
95 if (!pte_write(*ptep)) {
96 BUG_ON(HYPERVISOR_update_va_mapping(
97 (unsigned long)pud,
98 pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
99 0));
100 }
101 free_page((unsigned long)pud);
102 }
104 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
105 {
106 /*
107 * We allocate two contiguous pages for kernel and user.
108 */
109 unsigned boundary;
110 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
112 if (!pgd)
113 return NULL;
114 /*
115 * Copy kernel pointers in from init.
116 * Could keep a freelist or slab cache of those because the kernel
117 * part never changes.
118 */
119 boundary = pgd_index(__PAGE_OFFSET);
120 memset(pgd, 0, boundary * sizeof(pgd_t));
121 memcpy(pgd + boundary,
122 init_level4_pgt + boundary,
123 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
125 memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
126 /*
127 * Set level3_user_pgt for vsyscall area
128 */
129 set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
130 mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
131 return pgd;
132 }
134 static inline void pgd_free(pgd_t *pgd)
135 {
136 pte_t *ptep = virt_to_ptep(pgd);
138 if (!pte_write(*ptep)) {
139 xen_pgd_unpin(__pa(pgd));
140 BUG_ON(HYPERVISOR_update_va_mapping(
141 (unsigned long)pgd,
142 pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
143 0));
144 }
146 ptep = virt_to_ptep(__user_pgd(pgd));
148 if (!pte_write(*ptep)) {
149 xen_pgd_unpin(__pa(__user_pgd(pgd)));
150 BUG_ON(HYPERVISOR_update_va_mapping(
151 (unsigned long)__user_pgd(pgd),
152 pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
153 PAGE_KERNEL),
154 0));
155 }
157 free_pages((unsigned long)pgd, 1);
158 }
160 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
161 {
162 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
163 if (pte)
164 make_page_readonly(pte);
166 return pte;
167 }
169 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
170 {
171 struct page *pte;
173 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
174 return pte;
175 }
177 /* Should really implement gc for free page table pages. This could be
178 done with a reference count in struct page. */
180 extern __inline__ void pte_free_kernel(pte_t *pte)
181 {
182 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
183 xen_pte_unpin(__pa(pte));
184 make_page_writable(pte);
185 free_page((unsigned long)pte);
186 }
188 extern void pte_free(struct page *pte);
190 //#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
191 //#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
192 //#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
194 #define __pte_free_tlb(tlb,x) pte_free((x))
195 #define __pmd_free_tlb(tlb,x) pmd_free((x))
196 #define __pud_free_tlb(tlb,x) pud_free((x))
198 #endif /* _X86_64_PGALLOC_H */