direct-io.hg

view xen/arch/x86/x86_32/domain_page.c @ 11216:f71f17e64e3a

Fix assertion.

Signed-off-by: Steven Hand <steven@xensource.com>
author Steven Hand <steven@xensource.com>
date Mon Aug 21 13:21:51 2006 +0100 (2006-08-21)
parents 36220033c67f
children 7b5115221dfc
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain pages.
5 *
6 * Copyright (c) 2003-2006, Keir Fraser <keir@xensource.com>
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/mm.h>
12 #include <xen/perfc.h>
13 #include <xen/domain_page.h>
14 #include <xen/shadow.h>
15 #include <asm/current.h>
16 #include <asm/flushtlb.h>
17 #include <asm/hardirq.h>
18 #include <asm/hvm/support.h>
20 static inline struct vcpu *mapcache_current_vcpu(void)
21 {
22 struct vcpu *v;
24 /* In the common case we use the mapcache of the running VCPU. */
25 v = current;
27 /*
28 * If guest_table is NULL, and we are running a paravirtualised guest,
29 * then it means we are running on the idle domain's page table and must
30 * therefore use its mapcache.
31 */
32 if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !hvm_guest(v) )
33 {
34 /* If we really are idling, perform lazy context switch now. */
35 if ( (v = idle_vcpu[smp_processor_id()]) == current )
36 __sync_lazy_execstate();
37 /* We must now be running on the idle page table. */
38 ASSERT(read_cr3() == __pa(idle_pg_table));
39 }
41 return v;
42 }
44 void *map_domain_page(unsigned long mfn)
45 {
46 unsigned long va;
47 unsigned int idx, i, vcpu;
48 struct vcpu *v;
49 struct mapcache *cache;
50 struct vcpu_maphash_entry *hashent;
52 ASSERT(!in_irq());
54 perfc_incrc(map_domain_page_count);
56 v = mapcache_current_vcpu();
58 vcpu = v->vcpu_id;
59 cache = &v->domain->arch.mapcache;
61 hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(mfn)];
62 if ( hashent->mfn == mfn )
63 {
64 idx = hashent->idx;
65 hashent->refcnt++;
66 ASSERT(idx < MAPCACHE_ENTRIES);
67 ASSERT(hashent->refcnt != 0);
68 ASSERT(l1e_get_pfn(cache->l1tab[idx]) == mfn);
69 goto out;
70 }
72 spin_lock(&cache->lock);
74 /* Has some other CPU caused a wrap? We must flush if so. */
75 if ( unlikely(cache->epoch != cache->shadow_epoch[vcpu]) )
76 {
77 cache->shadow_epoch[vcpu] = cache->epoch;
78 if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) )
79 {
80 perfc_incrc(domain_page_tlb_flush);
81 local_flush_tlb();
82 }
83 }
85 idx = find_next_zero_bit(cache->inuse, MAPCACHE_ENTRIES, cache->cursor);
86 if ( unlikely(idx >= MAPCACHE_ENTRIES) )
87 {
88 /* /First/, clean the garbage map and update the inuse list. */
89 for ( i = 0; i < ARRAY_SIZE(cache->garbage); i++ )
90 {
91 unsigned long x = xchg(&cache->garbage[i], 0);
92 cache->inuse[i] &= ~x;
93 }
95 /* /Second/, flush TLBs. */
96 perfc_incrc(domain_page_tlb_flush);
97 local_flush_tlb();
98 cache->shadow_epoch[vcpu] = ++cache->epoch;
99 cache->tlbflush_timestamp = tlbflush_current_time();
101 idx = find_first_zero_bit(cache->inuse, MAPCACHE_ENTRIES);
102 ASSERT(idx < MAPCACHE_ENTRIES);
103 }
105 set_bit(idx, cache->inuse);
106 cache->cursor = idx + 1;
108 spin_unlock(&cache->lock);
110 cache->l1tab[idx] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
112 out:
113 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
114 return (void *)va;
115 }
117 void unmap_domain_page(void *va)
118 {
119 unsigned int idx;
120 struct vcpu *v;
121 struct mapcache *cache;
122 unsigned long mfn;
123 struct vcpu_maphash_entry *hashent;
125 ASSERT(!in_irq());
127 ASSERT((void *)MAPCACHE_VIRT_START <= va);
128 ASSERT(va < (void *)MAPCACHE_VIRT_END);
130 v = mapcache_current_vcpu();
132 cache = &v->domain->arch.mapcache;
134 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
135 mfn = l1e_get_pfn(cache->l1tab[idx]);
136 hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(mfn)];
138 if ( hashent->idx == idx )
139 {
140 ASSERT(hashent->mfn == mfn);
141 ASSERT(hashent->refcnt != 0);
142 hashent->refcnt--;
143 }
144 else if ( hashent->refcnt == 0 )
145 {
146 if ( hashent->idx != MAPHASHENT_NOTINUSE )
147 {
148 /* /First/, zap the PTE. */
149 ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->mfn);
150 cache->l1tab[hashent->idx] = l1e_empty();
151 /* /Second/, mark as garbage. */
152 set_bit(hashent->idx, cache->garbage);
153 }
155 /* Add newly-freed mapping to the maphash. */
156 hashent->mfn = mfn;
157 hashent->idx = idx;
158 }
159 else
160 {
161 /* /First/, zap the PTE. */
162 cache->l1tab[idx] = l1e_empty();
163 /* /Second/, mark as garbage. */
164 set_bit(idx, cache->garbage);
165 }
166 }
168 void mapcache_init(struct domain *d)
169 {
170 unsigned int i, j;
171 struct vcpu_maphash_entry *hashent;
173 d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
174 (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
175 spin_lock_init(&d->arch.mapcache.lock);
177 /* Mark all maphash entries as not in use. */
178 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
179 {
180 for ( j = 0; j < MAPHASH_ENTRIES; j++ )
181 {
182 hashent = &d->arch.mapcache.vcpu_maphash[i].hash[j];
183 hashent->mfn = ~0UL; /* never valid to map */
184 hashent->idx = MAPHASHENT_NOTINUSE;
185 }
186 }
187 }
189 #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
190 static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
191 static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
192 static unsigned int inuse_cursor;
193 static DEFINE_SPINLOCK(globalmap_lock);
195 void *map_domain_page_global(unsigned long mfn)
196 {
197 l2_pgentry_t *pl2e;
198 l1_pgentry_t *pl1e;
199 unsigned int idx, i;
200 unsigned long va;
202 ASSERT(!in_irq() && local_irq_is_enabled());
204 spin_lock(&globalmap_lock);
206 idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
207 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
208 if ( unlikely(va >= FIXADDR_START) )
209 {
210 /* /First/, clean the garbage map and update the inuse list. */
211 for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
212 {
213 unsigned long x = xchg(&garbage[i], 0);
214 inuse[i] &= ~x;
215 }
217 /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
218 flush_tlb_all();
220 idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
221 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
222 ASSERT(va < FIXADDR_START);
223 }
225 set_bit(idx, inuse);
226 inuse_cursor = idx + 1;
228 spin_unlock(&globalmap_lock);
230 pl2e = virt_to_xen_l2e(va);
231 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
232 *pl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
234 return (void *)va;
235 }
237 void unmap_domain_page_global(void *va)
238 {
239 unsigned long __va = (unsigned long)va;
240 l2_pgentry_t *pl2e;
241 l1_pgentry_t *pl1e;
242 unsigned int idx;
244 ASSERT(__va >= IOREMAP_VIRT_START);
246 /* /First/, we zap the PTE. */
247 pl2e = virt_to_xen_l2e(__va);
248 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
249 *pl1e = l1e_empty();
251 /* /Second/, we add to the garbage map. */
252 idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
253 set_bit(idx, garbage);
254 }
256 paddr_t maddr_from_mapped_domain_page(void *va)
257 {
258 unsigned long __va = (unsigned long)va;
259 l2_pgentry_t *pl2e;
260 l1_pgentry_t *pl1e;
261 unsigned int idx;
262 struct mapcache *cache;
263 unsigned long mfn;
265 if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) )
266 {
267 cache = &mapcache_current_vcpu()->domain->arch.mapcache;
268 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
269 mfn = l1e_get_pfn(cache->l1tab[idx]);
270 }
271 else
272 {
273 ASSERT(__va >= IOREMAP_VIRT_START);
274 pl2e = virt_to_xen_l2e(__va);
275 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
276 mfn = l1e_get_pfn(*pl1e);
277 }
279 return ((paddr_t)mfn << PAGE_SHIFT) | ((unsigned long)va & ~PAGE_MASK);
280 }