direct-io.hg

view linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c @ 11443:af50fb41612c

[IA64] Expand memory_op for PV-on-HVM on IPF

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Fri Sep 01 08:46:02 2006 -0600 (2006-09-01)
parents e351aace191e
children 7c5c72a0283f 4fad820a2233
line source
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/io.h>
17 #ifdef CONFIG_XEN
18 #include <asm/pgalloc.h>
19 #include <asm/mmu_context.h>
21 LIST_HEAD(mm_unpinned);
22 DEFINE_SPINLOCK(mm_unpinned_lock);
24 static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
25 {
26 struct page *page = virt_to_page(pt);
27 unsigned long pfn = page_to_pfn(page);
29 BUG_ON(HYPERVISOR_update_va_mapping(
30 (unsigned long)__va(pfn << PAGE_SHIFT),
31 pfn_pte(pfn, flags), 0));
32 }
34 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
35 {
36 pgd_t *pgd;
37 pud_t *pud;
38 pmd_t *pmd;
39 pte_t *pte;
40 int g,u,m;
42 pgd = mm->pgd;
43 /*
44 * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
45 * be the 'current' task's pagetables (e.g., current may be 32-bit,
46 * but the pagetables may be for a 64-bit task).
47 * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
48 * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
49 */
50 for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
51 if (pgd_none(*pgd))
52 continue;
53 pud = pud_offset(pgd, 0);
54 if (PTRS_PER_PUD > 1) /* not folded */
55 mm_walk_set_prot(pud,flags);
56 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
57 if (pud_none(*pud))
58 continue;
59 pmd = pmd_offset(pud, 0);
60 if (PTRS_PER_PMD > 1) /* not folded */
61 mm_walk_set_prot(pmd,flags);
62 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
63 if (pmd_none(*pmd))
64 continue;
65 pte = pte_offset_kernel(pmd,0);
66 mm_walk_set_prot(pte,flags);
67 }
68 }
69 }
70 }
72 void mm_pin(struct mm_struct *mm)
73 {
74 if (xen_feature(XENFEAT_writable_page_tables))
75 return;
77 spin_lock(&mm->page_table_lock);
79 mm_walk(mm, PAGE_KERNEL_RO);
80 BUG_ON(HYPERVISOR_update_va_mapping(
81 (unsigned long)mm->pgd,
82 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
83 UVMF_TLB_FLUSH));
84 BUG_ON(HYPERVISOR_update_va_mapping(
85 (unsigned long)__user_pgd(mm->pgd),
86 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
87 UVMF_TLB_FLUSH));
88 xen_pgd_pin(__pa(mm->pgd)); /* kernel */
89 xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
90 mm->context.pinned = 1;
91 spin_lock(&mm_unpinned_lock);
92 list_del(&mm->context.unpinned);
93 spin_unlock(&mm_unpinned_lock);
95 spin_unlock(&mm->page_table_lock);
96 }
98 void mm_unpin(struct mm_struct *mm)
99 {
100 if (xen_feature(XENFEAT_writable_page_tables))
101 return;
103 spin_lock(&mm->page_table_lock);
105 xen_pgd_unpin(__pa(mm->pgd));
106 xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
107 BUG_ON(HYPERVISOR_update_va_mapping(
108 (unsigned long)mm->pgd,
109 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
110 BUG_ON(HYPERVISOR_update_va_mapping(
111 (unsigned long)__user_pgd(mm->pgd),
112 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
113 mm_walk(mm, PAGE_KERNEL);
114 xen_tlb_flush();
115 mm->context.pinned = 0;
116 spin_lock(&mm_unpinned_lock);
117 list_add(&mm->context.unpinned, &mm_unpinned);
118 spin_unlock(&mm_unpinned_lock);
120 spin_unlock(&mm->page_table_lock);
121 }
123 void mm_pin_all(void)
124 {
125 if (xen_feature(XENFEAT_writable_page_tables))
126 return;
128 while (!list_empty(&mm_unpinned))
129 mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
130 context.unpinned));
131 }
133 void _arch_dup_mmap(struct mm_struct *mm)
134 {
135 if (!mm->context.pinned)
136 mm_pin(mm);
137 }
139 void _arch_exit_mmap(struct mm_struct *mm)
140 {
141 struct task_struct *tsk = current;
143 task_lock(tsk);
145 /*
146 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
147 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
148 */
149 if ( tsk->active_mm == mm )
150 {
151 tsk->active_mm = &init_mm;
152 atomic_inc(&init_mm.mm_count);
154 switch_mm(mm, &init_mm, tsk);
156 atomic_dec(&mm->mm_count);
157 BUG_ON(atomic_read(&mm->mm_count) == 0);
158 }
160 task_unlock(tsk);
162 if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
163 !mm->context.has_foreign_mappings )
164 mm_unpin(mm);
165 }
167 void pte_free(struct page *pte)
168 {
169 unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
171 if (!pte_write(*virt_to_ptep(va)))
172 BUG_ON(HYPERVISOR_update_va_mapping(
173 va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
174 __free_page(pte);
175 }
176 #endif /* CONFIG_XEN */
178 static inline pte_t *lookup_address(unsigned long address)
179 {
180 pgd_t *pgd = pgd_offset_k(address);
181 pud_t *pud;
182 pmd_t *pmd;
183 pte_t *pte;
184 if (pgd_none(*pgd))
185 return NULL;
186 pud = pud_offset(pgd, address);
187 if (!pud_present(*pud))
188 return NULL;
189 pmd = pmd_offset(pud, address);
190 if (!pmd_present(*pmd))
191 return NULL;
192 if (pmd_large(*pmd))
193 return (pte_t *)pmd;
194 pte = pte_offset_kernel(pmd, address);
195 if (pte && !pte_present(*pte))
196 pte = NULL;
197 return pte;
198 }
200 static struct page *split_large_page(unsigned long address, pgprot_t prot,
201 pgprot_t ref_prot)
202 {
203 int i;
204 unsigned long addr;
205 struct page *base = alloc_pages(GFP_KERNEL, 0);
206 pte_t *pbase;
207 if (!base)
208 return NULL;
209 address = __pa(address);
210 addr = address & LARGE_PAGE_MASK;
211 pbase = (pte_t *)page_address(base);
212 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
213 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
214 addr == address ? prot : ref_prot);
215 }
216 return base;
217 }
220 static void flush_kernel_map(void *address)
221 {
222 if (0 && address && cpu_has_clflush) {
223 /* is this worth it? */
224 int i;
225 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
226 asm volatile("clflush (%0)" :: "r" (address + i));
227 } else
228 asm volatile("wbinvd":::"memory");
229 if (address)
230 __flush_tlb_one(address);
231 else
232 __flush_tlb_all();
233 }
236 static inline void flush_map(unsigned long address)
237 {
238 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
239 }
241 struct deferred_page {
242 struct deferred_page *next;
243 struct page *fpage;
244 unsigned long address;
245 };
246 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
248 static inline void save_page(unsigned long address, struct page *fpage)
249 {
250 struct deferred_page *df;
251 df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
252 if (!df) {
253 flush_map(address);
254 __free_page(fpage);
255 } else {
256 df->next = df_list;
257 df->fpage = fpage;
258 df->address = address;
259 df_list = df;
260 }
261 }
263 /*
264 * No more special protections in this 2/4MB area - revert to a
265 * large page again.
266 */
267 static void revert_page(unsigned long address, pgprot_t ref_prot)
268 {
269 pgd_t *pgd;
270 pud_t *pud;
271 pmd_t *pmd;
272 pte_t large_pte;
274 pgd = pgd_offset_k(address);
275 BUG_ON(pgd_none(*pgd));
276 pud = pud_offset(pgd,address);
277 BUG_ON(pud_none(*pud));
278 pmd = pmd_offset(pud, address);
279 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
280 pgprot_val(ref_prot) |= _PAGE_PSE;
281 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
282 set_pte((pte_t *)pmd, large_pte);
283 }
285 static int
286 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
287 pgprot_t ref_prot)
288 {
289 pte_t *kpte;
290 struct page *kpte_page;
291 unsigned kpte_flags;
292 pgprot_t ref_prot2;
293 kpte = lookup_address(address);
294 if (!kpte) return 0;
295 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
296 kpte_flags = pte_val(*kpte);
297 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
298 if ((kpte_flags & _PAGE_PSE) == 0) {
299 set_pte(kpte, pfn_pte(pfn, prot));
300 } else {
301 /*
302 * split_large_page will take the reference for this change_page_attr
303 * on the split page.
304 */
306 struct page *split;
307 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
309 split = split_large_page(address, prot, ref_prot2);
310 if (!split)
311 return -ENOMEM;
312 set_pte(kpte,mk_pte(split, ref_prot2));
313 kpte_page = split;
314 }
315 get_page(kpte_page);
316 } else if ((kpte_flags & _PAGE_PSE) == 0) {
317 set_pte(kpte, pfn_pte(pfn, ref_prot));
318 __put_page(kpte_page);
319 } else
320 BUG();
322 /* on x86-64 the direct mapping set at boot is not using 4k pages */
323 /*
324 * ..., but the XEN guest kernels (currently) do:
325 * If the pte was reserved, it means it was created at boot
326 * time (not via split_large_page) and in turn we must not
327 * replace it with a large page.
328 */
329 #ifndef CONFIG_XEN
330 BUG_ON(PageReserved(kpte_page));
331 #else
332 if (!PageReserved(kpte_page))
333 #endif
334 switch (page_count(kpte_page)) {
335 case 1:
336 save_page(address, kpte_page);
337 revert_page(address, ref_prot);
338 break;
339 case 0:
340 BUG(); /* memleak and failed 2M page regeneration */
341 }
342 return 0;
343 }
345 /*
346 * Change the page attributes of an page in the linear mapping.
347 *
348 * This should be used when a page is mapped with a different caching policy
349 * than write-back somewhere - some CPUs do not like it when mappings with
350 * different caching policies exist. This changes the page attributes of the
351 * in kernel linear mapping too.
352 *
353 * The caller needs to ensure that there are no conflicting mappings elsewhere.
354 * This function only deals with the kernel linear map.
355 *
356 * Caller must call global_flush_tlb() after this.
357 */
358 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
359 {
360 int err = 0;
361 int i;
363 down_write(&init_mm.mmap_sem);
364 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
365 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
367 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
368 if (err)
369 break;
370 /* Handle kernel mapping too which aliases part of the
371 * lowmem */
372 if (__pa(address) < KERNEL_TEXT_SIZE) {
373 unsigned long addr2;
374 pgprot_t prot2 = prot;
375 addr2 = __START_KERNEL_map + __pa(address);
376 pgprot_val(prot2) &= ~_PAGE_NX;
377 err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
378 }
379 }
380 up_write(&init_mm.mmap_sem);
381 return err;
382 }
384 /* Don't call this for MMIO areas that may not have a mem_map entry */
385 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
386 {
387 unsigned long addr = (unsigned long)page_address(page);
388 return change_page_attr_addr(addr, numpages, prot);
389 }
391 void global_flush_tlb(void)
392 {
393 struct deferred_page *df, *next_df;
395 down_read(&init_mm.mmap_sem);
396 df = xchg(&df_list, NULL);
397 up_read(&init_mm.mmap_sem);
398 flush_map((df && !df->next) ? df->address : 0);
399 for (; df; df = next_df) {
400 next_df = df->next;
401 if (df->fpage)
402 __free_page(df->fpage);
403 kfree(df);
404 }
405 }
407 EXPORT_SYMBOL(change_page_attr);
408 EXPORT_SYMBOL(global_flush_tlb);