ia64/xen-unstable

view linux-2.6-xen-sparse/arch/i386/mm/highmem-xen.c @ 10584:4260eb8c0874

kunmap_atomic() must zap the PTE to avoid dangling references
when attempting to free memory back to Xen. We can implement
something more efficient in future.

Also add debug print message if guest tries to free 'in use'
memory. We'll make it a real guest-visible error in future.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@dhcp93.uk.xensource.com
date Wed Jun 28 18:17:41 2006 +0100 (2006-06-28)
parents 4b06313b9790
children cb9443bfdff8
line source
1 #include <linux/highmem.h>
2 #include <linux/module.h>
4 void *kmap(struct page *page)
5 {
6 might_sleep();
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
10 }
12 void kunmap(struct page *page)
13 {
14 if (in_interrupt())
15 BUG();
16 if (!PageHighMem(page))
17 return;
18 kunmap_high(page);
19 }
21 /*
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
25 *
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
28 */
29 static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
30 {
31 enum fixed_addresses idx;
32 unsigned long vaddr;
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 inc_preempt_count();
36 if (!PageHighMem(page))
37 return page_address(page);
39 idx = type + KM_TYPE_NR*smp_processor_id();
40 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
41 #ifdef CONFIG_DEBUG_HIGHMEM
42 if (!pte_none(*(kmap_pte-idx)))
43 BUG();
44 #endif
45 set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
47 return (void*) vaddr;
48 }
50 void *kmap_atomic(struct page *page, enum km_type type)
51 {
52 return __kmap_atomic(page, type, kmap_prot);
53 }
55 /* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
56 void *kmap_atomic_pte(struct page *page, enum km_type type)
57 {
58 return __kmap_atomic(page, type, PAGE_KERNEL_RO);
59 }
61 void kunmap_atomic(void *kvaddr, enum km_type type)
62 {
63 #ifdef CONFIG_DEBUG_HIGHMEM
64 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
65 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
67 if (vaddr < FIXADDR_START) { // FIXME
68 dec_preempt_count();
69 preempt_check_resched();
70 return;
71 }
73 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
74 BUG();
76 /*
77 * force other mappings to Oops if they'll try to access
78 * this pte without first remap it
79 */
80 pte_clear(&init_mm, vaddr, kmap_pte-idx);
81 __flush_tlb_one(vaddr);
82 #elif defined(CONFIG_XEN)
83 /*
84 * We must ensure there are no dangling pagetable references when
85 * returning memory to Xen (decrease_reservation).
86 * XXX TODO: We could make this faster by only zapping when
87 * kmap_flush_unused is called but that is trickier and more invasive.
88 */
89 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
90 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
91 pte_clear(&init_mm, vaddr, kmap_pte-idx);
92 #endif
94 dec_preempt_count();
95 preempt_check_resched();
96 }
98 /* This is the same as kmap_atomic() but can map memory that doesn't
99 * have a struct page associated with it.
100 */
101 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
102 {
103 enum fixed_addresses idx;
104 unsigned long vaddr;
106 inc_preempt_count();
108 idx = type + KM_TYPE_NR*smp_processor_id();
109 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
110 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
111 __flush_tlb_one(vaddr);
113 return (void*) vaddr;
114 }
116 struct page *kmap_atomic_to_page(void *ptr)
117 {
118 unsigned long idx, vaddr = (unsigned long)ptr;
119 pte_t *pte;
121 if (vaddr < FIXADDR_START)
122 return virt_to_page(ptr);
124 idx = virt_to_fix(vaddr);
125 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
126 return pte_page(*pte);
127 }
129 EXPORT_SYMBOL(kmap);
130 EXPORT_SYMBOL(kunmap);
131 EXPORT_SYMBOL(kmap_atomic);
132 EXPORT_SYMBOL(kunmap_atomic);
133 EXPORT_SYMBOL(kmap_atomic_to_page);