ia64/xen-unstable

view linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c @ 8785:aefd8b8c6b1f

Since USER_PTRS_PER_PGD depends on TASK_SIZE and thus on the current
thread, it must not be used in code that can be called in the context
switch path (otherwise the 32-bitness of the outgoing task is used
here rather then the 32-bitness of the incoming one, possibly
resulting in large parts of the page tables not getting converted to
read-only).

Signed-off-by: Jan Beulich <JBeulich@novell.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Feb 07 19:16:11 2006 +0100 (2006-02-07)
parents 2494b4e00cbb
children f00e257d200c
line source
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/io.h>
17 #ifdef CONFIG_XEN
18 #include <asm/pgalloc.h>
19 #include <asm/mmu_context.h>
21 LIST_HEAD(mm_unpinned);
22 DEFINE_SPINLOCK(mm_unpinned_lock);
24 static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
25 {
26 struct page *page = virt_to_page(pt);
27 unsigned long pfn = page_to_pfn(page);
29 BUG_ON(HYPERVISOR_update_va_mapping(
30 (unsigned long)__va(pfn << PAGE_SHIFT),
31 pfn_pte(pfn, flags), 0));
32 }
34 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
35 {
36 pgd_t *pgd;
37 pud_t *pud;
38 pmd_t *pmd;
39 pte_t *pte;
40 int g,u,m;
42 pgd = mm->pgd;
43 /*
44 * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
45 * be the 'current' task's pagetables (e.g., current may be 32-bit,
46 * but the pagetables may be for a 64-bit task).
47 * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
48 * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
49 */
50 for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
51 if (pgd_none(*pgd))
52 continue;
53 pud = pud_offset(pgd, 0);
54 if (PTRS_PER_PUD > 1) /* not folded */
55 mm_walk_set_prot(pud,flags);
56 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
57 if (pud_none(*pud))
58 continue;
59 pmd = pmd_offset(pud, 0);
60 if (PTRS_PER_PMD > 1) /* not folded */
61 mm_walk_set_prot(pmd,flags);
62 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
63 if (pmd_none(*pmd))
64 continue;
65 pte = pte_offset_kernel(pmd,0);
66 mm_walk_set_prot(pte,flags);
67 }
68 }
69 }
70 }
72 void mm_pin(struct mm_struct *mm)
73 {
74 spin_lock(&mm->page_table_lock);
76 mm_walk(mm, PAGE_KERNEL_RO);
77 BUG_ON(HYPERVISOR_update_va_mapping(
78 (unsigned long)mm->pgd,
79 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
80 UVMF_TLB_FLUSH));
81 BUG_ON(HYPERVISOR_update_va_mapping(
82 (unsigned long)__user_pgd(mm->pgd),
83 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
84 UVMF_TLB_FLUSH));
85 xen_pgd_pin(__pa(mm->pgd)); /* kernel */
86 xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
87 mm->context.pinned = 1;
88 spin_lock(&mm_unpinned_lock);
89 list_del(&mm->context.unpinned);
90 spin_unlock(&mm_unpinned_lock);
92 spin_unlock(&mm->page_table_lock);
93 }
95 void mm_unpin(struct mm_struct *mm)
96 {
97 spin_lock(&mm->page_table_lock);
99 xen_pgd_unpin(__pa(mm->pgd));
100 xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
101 BUG_ON(HYPERVISOR_update_va_mapping(
102 (unsigned long)mm->pgd,
103 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
104 BUG_ON(HYPERVISOR_update_va_mapping(
105 (unsigned long)__user_pgd(mm->pgd),
106 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
107 mm_walk(mm, PAGE_KERNEL);
108 xen_tlb_flush();
109 mm->context.pinned = 0;
110 spin_lock(&mm_unpinned_lock);
111 list_add(&mm->context.unpinned, &mm_unpinned);
112 spin_unlock(&mm_unpinned_lock);
114 spin_unlock(&mm->page_table_lock);
115 }
117 void mm_pin_all(void)
118 {
119 while (!list_empty(&mm_unpinned))
120 mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
121 context.unpinned));
122 }
124 void _arch_exit_mmap(struct mm_struct *mm)
125 {
126 struct task_struct *tsk = current;
128 task_lock(tsk);
130 /*
131 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
132 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
133 */
134 if ( tsk->active_mm == mm )
135 {
136 tsk->active_mm = &init_mm;
137 atomic_inc(&init_mm.mm_count);
139 switch_mm(mm, &init_mm, tsk);
141 atomic_dec(&mm->mm_count);
142 BUG_ON(atomic_read(&mm->mm_count) == 0);
143 }
145 task_unlock(tsk);
147 if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) )
148 mm_unpin(mm);
149 }
151 void pte_free(struct page *pte)
152 {
153 unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
155 if (!pte_write(*virt_to_ptep(va)))
156 BUG_ON(HYPERVISOR_update_va_mapping(
157 va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
158 __free_page(pte);
159 }
160 #endif /* CONFIG_XEN */
162 static inline pte_t *lookup_address(unsigned long address)
163 {
164 pgd_t *pgd = pgd_offset_k(address);
165 pud_t *pud;
166 pmd_t *pmd;
167 pte_t *pte;
168 if (pgd_none(*pgd))
169 return NULL;
170 pud = pud_offset(pgd, address);
171 if (!pud_present(*pud))
172 return NULL;
173 pmd = pmd_offset(pud, address);
174 if (!pmd_present(*pmd))
175 return NULL;
176 if (pmd_large(*pmd))
177 return (pte_t *)pmd;
178 pte = pte_offset_kernel(pmd, address);
179 if (pte && !pte_present(*pte))
180 pte = NULL;
181 return pte;
182 }
184 static struct page *split_large_page(unsigned long address, pgprot_t prot,
185 pgprot_t ref_prot)
186 {
187 int i;
188 unsigned long addr;
189 struct page *base = alloc_pages(GFP_KERNEL, 0);
190 pte_t *pbase;
191 if (!base)
192 return NULL;
193 address = __pa(address);
194 addr = address & LARGE_PAGE_MASK;
195 pbase = (pte_t *)page_address(base);
196 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
197 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
198 addr == address ? prot : ref_prot);
199 }
200 return base;
201 }
204 static void flush_kernel_map(void *address)
205 {
206 if (0 && address && cpu_has_clflush) {
207 /* is this worth it? */
208 int i;
209 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
210 asm volatile("clflush (%0)" :: "r" (address + i));
211 } else
212 asm volatile("wbinvd":::"memory");
213 if (address)
214 __flush_tlb_one(address);
215 else
216 __flush_tlb_all();
217 }
220 static inline void flush_map(unsigned long address)
221 {
222 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
223 }
225 struct deferred_page {
226 struct deferred_page *next;
227 struct page *fpage;
228 unsigned long address;
229 };
230 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
232 static inline void save_page(unsigned long address, struct page *fpage)
233 {
234 struct deferred_page *df;
235 df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
236 if (!df) {
237 flush_map(address);
238 __free_page(fpage);
239 } else {
240 df->next = df_list;
241 df->fpage = fpage;
242 df->address = address;
243 df_list = df;
244 }
245 }
247 /*
248 * No more special protections in this 2/4MB area - revert to a
249 * large page again.
250 */
251 static void revert_page(unsigned long address, pgprot_t ref_prot)
252 {
253 pgd_t *pgd;
254 pud_t *pud;
255 pmd_t *pmd;
256 pte_t large_pte;
258 pgd = pgd_offset_k(address);
259 BUG_ON(pgd_none(*pgd));
260 pud = pud_offset(pgd,address);
261 BUG_ON(pud_none(*pud));
262 pmd = pmd_offset(pud, address);
263 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
264 pgprot_val(ref_prot) |= _PAGE_PSE;
265 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
266 set_pte((pte_t *)pmd, large_pte);
267 }
269 static int
270 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
271 pgprot_t ref_prot)
272 {
273 pte_t *kpte;
274 struct page *kpte_page;
275 unsigned kpte_flags;
276 pgprot_t ref_prot2;
277 kpte = lookup_address(address);
278 if (!kpte) return 0;
279 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
280 kpte_flags = pte_val(*kpte);
281 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
282 if ((kpte_flags & _PAGE_PSE) == 0) {
283 set_pte(kpte, pfn_pte(pfn, prot));
284 } else {
285 /*
286 * split_large_page will take the reference for this change_page_attr
287 * on the split page.
288 */
290 struct page *split;
291 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
293 split = split_large_page(address, prot, ref_prot2);
294 if (!split)
295 return -ENOMEM;
296 set_pte(kpte,mk_pte(split, ref_prot2));
297 kpte_page = split;
298 }
299 get_page(kpte_page);
300 } else if ((kpte_flags & _PAGE_PSE) == 0) {
301 set_pte(kpte, pfn_pte(pfn, ref_prot));
302 __put_page(kpte_page);
303 } else
304 BUG();
306 /* on x86-64 the direct mapping set at boot is not using 4k pages */
307 /*
308 * ..., but the XEN guest kernels (currently) do:
309 * If the pte was reserved, it means it was created at boot
310 * time (not via split_large_page) and in turn we must not
311 * replace it with a large page.
312 */
313 #ifndef CONFIG_XEN
314 BUG_ON(PageReserved(kpte_page));
315 #else
316 if (!PageReserved(kpte_page))
317 #endif
318 switch (page_count(kpte_page)) {
319 case 1:
320 save_page(address, kpte_page);
321 revert_page(address, ref_prot);
322 break;
323 case 0:
324 BUG(); /* memleak and failed 2M page regeneration */
325 }
326 return 0;
327 }
329 /*
330 * Change the page attributes of an page in the linear mapping.
331 *
332 * This should be used when a page is mapped with a different caching policy
333 * than write-back somewhere - some CPUs do not like it when mappings with
334 * different caching policies exist. This changes the page attributes of the
335 * in kernel linear mapping too.
336 *
337 * The caller needs to ensure that there are no conflicting mappings elsewhere.
338 * This function only deals with the kernel linear map.
339 *
340 * Caller must call global_flush_tlb() after this.
341 */
342 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
343 {
344 int err = 0;
345 int i;
347 down_write(&init_mm.mmap_sem);
348 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
349 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
351 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
352 if (err)
353 break;
354 /* Handle kernel mapping too which aliases part of the
355 * lowmem */
356 if (__pa(address) < KERNEL_TEXT_SIZE) {
357 unsigned long addr2;
358 pgprot_t prot2 = prot;
359 addr2 = __START_KERNEL_map + __pa(address);
360 pgprot_val(prot2) &= ~_PAGE_NX;
361 err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
362 }
363 }
364 up_write(&init_mm.mmap_sem);
365 return err;
366 }
368 /* Don't call this for MMIO areas that may not have a mem_map entry */
369 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
370 {
371 unsigned long addr = (unsigned long)page_address(page);
372 return change_page_attr_addr(addr, numpages, prot);
373 }
375 void global_flush_tlb(void)
376 {
377 struct deferred_page *df, *next_df;
379 down_read(&init_mm.mmap_sem);
380 df = xchg(&df_list, NULL);
381 up_read(&init_mm.mmap_sem);
382 flush_map((df && !df->next) ? df->address : 0);
383 for (; df; df = next_df) {
384 next_df = df->next;
385 if (df->fpage)
386 __free_page(df->fpage);
387 kfree(df);
388 }
389 }
391 EXPORT_SYMBOL(change_page_attr);
392 EXPORT_SYMBOL(global_flush_tlb);