ia64/xen-unstable

view xen/arch/x86/x86_32/domain_page.c @ 9086:e0f66dbe4b13

map_domain_page() now handles running on idle page tables.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 01 18:34:06 2006 +0100 (2006-03-01)
parents 9368d2ba43f2
children 94b10faa7577
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain pages.
5 *
6 * Copyright (c) 2003-2006, Keir Fraser <keir@xensource.com>
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/mm.h>
12 #include <xen/perfc.h>
13 #include <xen/domain_page.h>
14 #include <xen/shadow.h>
15 #include <asm/current.h>
16 #include <asm/flushtlb.h>
17 #include <asm/hardirq.h>
19 static inline struct vcpu *mapcache_current_vcpu(void)
20 {
21 struct vcpu *v;
23 /* In the common case we use the mapcache of the running VCPU. */
24 v = current;
26 /*
27 * If guest_table is NULL, and we are running a paravirtualised guest,
28 * then it means we are running on the idle domain's page table and must
29 * therefore use its mapcache.
30 */
31 if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !HVM_DOMAIN(v) )
32 {
33 /* If we really are idling, perform lazy context switch now. */
34 if ( (v = idle_vcpu[smp_processor_id()]) == current )
35 __sync_lazy_execstate();
36 /* We must now be running on the idle page table. */
37 ASSERT(read_cr3() == __pa(idle_pg_table));
38 }
40 return v;
41 }
43 void *map_domain_page(unsigned long pfn)
44 {
45 unsigned long va;
46 unsigned int idx, i, vcpu;
47 struct vcpu *v;
48 struct mapcache *cache;
49 struct vcpu_maphash_entry *hashent;
51 ASSERT(!in_irq());
53 perfc_incrc(map_domain_page_count);
55 v = mapcache_current_vcpu();
57 vcpu = v->vcpu_id;
58 cache = &v->domain->arch.mapcache;
60 hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)];
61 if ( hashent->pfn == pfn )
62 {
63 idx = hashent->idx;
64 hashent->refcnt++;
65 ASSERT(hashent->refcnt != 0);
66 ASSERT(l1e_get_pfn(cache->l1tab[idx]) == pfn);
67 goto out;
68 }
70 spin_lock(&cache->lock);
72 /* Has some other CPU caused a wrap? We must flush if so. */
73 if ( unlikely(cache->epoch != cache->shadow_epoch[vcpu]) )
74 {
75 cache->shadow_epoch[vcpu] = cache->epoch;
76 if ( NEED_FLUSH(tlbflush_time[smp_processor_id()],
77 cache->tlbflush_timestamp) )
78 {
79 perfc_incrc(domain_page_tlb_flush);
80 local_flush_tlb();
81 }
82 }
84 idx = find_next_zero_bit(cache->inuse, MAPCACHE_ENTRIES, cache->cursor);
85 if ( unlikely(idx >= MAPCACHE_ENTRIES) )
86 {
87 /* /First/, clean the garbage map and update the inuse list. */
88 for ( i = 0; i < ARRAY_SIZE(cache->garbage); i++ )
89 {
90 unsigned long x = xchg(&cache->garbage[i], 0);
91 cache->inuse[i] &= ~x;
92 }
94 /* /Second/, flush TLBs. */
95 perfc_incrc(domain_page_tlb_flush);
96 local_flush_tlb();
97 cache->shadow_epoch[vcpu] = ++cache->epoch;
98 cache->tlbflush_timestamp = tlbflush_current_time();
100 idx = find_first_zero_bit(cache->inuse, MAPCACHE_ENTRIES);
101 ASSERT(idx < MAPCACHE_ENTRIES);
102 }
104 set_bit(idx, cache->inuse);
105 cache->cursor = idx + 1;
107 spin_unlock(&cache->lock);
109 cache->l1tab[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
111 out:
112 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
113 return (void *)va;
114 }
116 void unmap_domain_page(void *va)
117 {
118 unsigned int idx;
119 struct vcpu *v;
120 struct mapcache *cache;
121 unsigned long pfn;
122 struct vcpu_maphash_entry *hashent;
124 ASSERT(!in_irq());
126 ASSERT((void *)MAPCACHE_VIRT_START <= va);
127 ASSERT(va < (void *)MAPCACHE_VIRT_END);
129 v = mapcache_current_vcpu();
131 cache = &v->domain->arch.mapcache;
133 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
134 pfn = l1e_get_pfn(cache->l1tab[idx]);
135 hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
137 if ( hashent->idx == idx )
138 {
139 ASSERT(hashent->pfn == pfn);
140 ASSERT(hashent->refcnt != 0);
141 hashent->refcnt--;
142 }
143 else if ( hashent->refcnt == 0 )
144 {
145 if ( hashent->idx != MAPHASHENT_NOTINUSE )
146 {
147 /* /First/, zap the PTE. */
148 ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->pfn);
149 cache->l1tab[hashent->idx] = l1e_empty();
150 /* /Second/, mark as garbage. */
151 set_bit(hashent->idx, cache->garbage);
152 }
154 /* Add newly-freed mapping to the maphash. */
155 hashent->pfn = pfn;
156 hashent->idx = idx;
157 }
158 else
159 {
160 /* /First/, zap the PTE. */
161 cache->l1tab[idx] = l1e_empty();
162 /* /Second/, mark as garbage. */
163 set_bit(idx, cache->garbage);
164 }
165 }
167 void mapcache_init(struct domain *d)
168 {
169 unsigned int i, j;
171 d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
172 (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
173 spin_lock_init(&d->arch.mapcache.lock);
175 /* Mark all maphash entries as not in use. */
176 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
177 for ( j = 0; j < MAPHASH_ENTRIES; j++ )
178 d->arch.mapcache.vcpu_maphash[i].hash[j].idx =
179 MAPHASHENT_NOTINUSE;
180 }
182 #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
183 static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
184 static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
185 static unsigned int inuse_cursor;
186 static spinlock_t globalmap_lock = SPIN_LOCK_UNLOCKED;
188 void *map_domain_page_global(unsigned long pfn)
189 {
190 l2_pgentry_t *pl2e;
191 l1_pgentry_t *pl1e;
192 unsigned int idx, i;
193 unsigned long va;
195 ASSERT(!in_irq() && local_irq_is_enabled());
197 spin_lock(&globalmap_lock);
199 idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
200 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
201 if ( unlikely(va >= FIXADDR_START) )
202 {
203 /* /First/, clean the garbage map and update the inuse list. */
204 for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
205 {
206 unsigned long x = xchg(&garbage[i], 0);
207 inuse[i] &= ~x;
208 }
210 /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
211 flush_tlb_all();
213 idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
214 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
215 ASSERT(va < FIXADDR_START);
216 }
218 set_bit(idx, inuse);
219 inuse_cursor = idx + 1;
221 spin_unlock(&globalmap_lock);
223 pl2e = virt_to_xen_l2e(va);
224 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
225 *pl1e = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
227 return (void *)va;
228 }
230 void unmap_domain_page_global(void *va)
231 {
232 unsigned long __va = (unsigned long)va;
233 l2_pgentry_t *pl2e;
234 l1_pgentry_t *pl1e;
235 unsigned int idx;
237 /* /First/, we zap the PTE. */
238 pl2e = virt_to_xen_l2e(__va);
239 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
240 *pl1e = l1e_empty();
242 /* /Second/, we add to the garbage map. */
243 idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
244 set_bit(idx, garbage);
245 }