ia64/xen-unstable

view xen/arch/x86/x86_32/domain_page.c @ 8572:1c186b28289b

map_domain_page per-vcpu cache still seems broken. Disable
while I fix it.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jan 11 20:03:53 2006 +0100 (2006-01-11)
parents 57c50578414d
children 87a97054b469
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain pages. Based on ideas from the
5 * Linux PKMAP code -- the copyrights and credits are retained below.
6 */
8 /*
9 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
10 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
11 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
12 */
14 #include <xen/config.h>
15 #include <xen/sched.h>
16 #include <xen/mm.h>
17 #include <xen/perfc.h>
18 #include <xen/domain_page.h>
19 #include <asm/current.h>
20 #include <asm/flushtlb.h>
21 #include <asm/hardirq.h>
23 void *map_domain_page(unsigned long pfn)
24 {
25 unsigned long va;
26 unsigned int idx, i, vcpu = current->vcpu_id;
27 struct domain *d;
28 struct mapcache *cache;
29 struct vcpu_maphash_entry *hashent;
31 ASSERT(!in_irq());
33 perfc_incrc(map_domain_page_count);
35 /* If we are the idle domain, ensure that we run on our own page tables. */
36 d = current->domain;
37 if ( unlikely(is_idle_domain(d)) )
38 __sync_lazy_execstate();
40 cache = &d->arch.mapcache;
42 hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)];
43 #if 0
44 if ( hashent->pfn == pfn )
45 {
46 idx = hashent->idx;
47 hashent->refcnt++;
48 ASSERT(hashent->refcnt != 0);
49 ASSERT(l1e_get_pfn(cache->l1tab[idx]) == pfn);
50 goto out;
51 }
52 #endif
54 spin_lock(&cache->lock);
56 /* Has some other CPU caused a wrap? We must flush if so. */
57 if ( unlikely(cache->epoch != cache->shadow_epoch[vcpu]) )
58 {
59 cache->shadow_epoch[vcpu] = cache->epoch;
60 if ( NEED_FLUSH(tlbflush_time[smp_processor_id()],
61 cache->tlbflush_timestamp) )
62 {
63 perfc_incrc(domain_page_tlb_flush);
64 local_flush_tlb();
65 }
66 }
68 idx = find_next_zero_bit(cache->inuse, MAPCACHE_ENTRIES, cache->cursor);
69 if ( unlikely(idx >= MAPCACHE_ENTRIES) )
70 {
71 /* /First/, clean the garbage map and update the inuse list. */
72 for ( i = 0; i < ARRAY_SIZE(cache->garbage); i++ )
73 {
74 unsigned long x = xchg(&cache->garbage[i], 0);
75 cache->inuse[i] &= ~x;
76 }
78 /* /Second/, flush TLBs. */
79 perfc_incrc(domain_page_tlb_flush);
80 local_flush_tlb();
81 cache->shadow_epoch[vcpu] = ++cache->epoch;
82 cache->tlbflush_timestamp = tlbflush_current_time();
84 idx = find_first_zero_bit(cache->inuse, MAPCACHE_ENTRIES);
85 ASSERT(idx < MAPCACHE_ENTRIES);
86 }
88 set_bit(idx, cache->inuse);
89 cache->cursor = idx + 1;
91 spin_unlock(&cache->lock);
93 cache->l1tab[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
95 /*out:*/
96 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
97 return (void *)va;
98 }
100 void unmap_domain_page(void *va)
101 {
102 unsigned int idx;
103 struct mapcache *cache = &current->domain->arch.mapcache;
104 unsigned long pfn;
105 struct vcpu_maphash_entry *hashent;
107 ASSERT(!in_irq());
109 ASSERT((void *)MAPCACHE_VIRT_START <= va);
110 ASSERT(va < (void *)MAPCACHE_VIRT_END);
112 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
113 pfn = l1e_get_pfn(cache->l1tab[idx]);
114 hashent = &cache->vcpu_maphash[current->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
116 if ( hashent->idx == idx )
117 {
118 ASSERT(hashent->pfn == pfn);
119 ASSERT(hashent->refcnt != 0);
120 hashent->refcnt--;
121 }
122 else if ( hashent->refcnt == 0 )
123 {
124 if ( hashent->idx != MAPHASHENT_NOTINUSE )
125 {
126 /* /First/, zap the PTE. */
127 ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->pfn);
128 cache->l1tab[hashent->idx] = l1e_empty();
129 /* /Second/, mark as garbage. */
130 set_bit(hashent->idx, cache->garbage);
131 }
133 /* Add newly-freed mapping to the maphash. */
134 hashent->pfn = pfn;
135 hashent->idx = idx;
136 }
137 else
138 {
139 /* /First/, zap the PTE. */
140 cache->l1tab[idx] = l1e_empty();
141 /* /Second/, mark as garbage. */
142 set_bit(idx, cache->garbage);
143 }
144 }
146 void mapcache_init(struct domain *d)
147 {
148 unsigned int i, j;
150 d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
151 (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
152 spin_lock_init(&d->arch.mapcache.lock);
154 /* Mark all maphash entries as not in use. */
155 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
156 for ( j = 0; j < MAPHASH_ENTRIES; j++ )
157 d->arch.mapcache.vcpu_maphash[i].hash[j].idx =
158 MAPHASHENT_NOTINUSE;
159 }
161 #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
162 static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
163 static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
164 static unsigned int inuse_cursor;
165 static spinlock_t globalmap_lock = SPIN_LOCK_UNLOCKED;
167 void *map_domain_page_global(unsigned long pfn)
168 {
169 l2_pgentry_t *pl2e;
170 l1_pgentry_t *pl1e;
171 unsigned int idx, i;
172 unsigned long va;
174 ASSERT(!in_irq() && local_irq_is_enabled());
176 spin_lock(&globalmap_lock);
178 idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
179 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
180 if ( unlikely(va >= FIXADDR_START) )
181 {
182 /* /First/, clean the garbage map and update the inuse list. */
183 for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
184 {
185 unsigned long x = xchg(&garbage[i], 0);
186 inuse[i] &= ~x;
187 }
189 /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
190 flush_tlb_all();
192 idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
193 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
194 ASSERT(va < FIXADDR_START);
195 }
197 set_bit(idx, inuse);
198 inuse_cursor = idx + 1;
200 spin_unlock(&globalmap_lock);
202 pl2e = virt_to_xen_l2e(va);
203 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
204 *pl1e = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
206 return (void *)va;
207 }
209 void unmap_domain_page_global(void *va)
210 {
211 unsigned long __va = (unsigned long)va;
212 l2_pgentry_t *pl2e;
213 l1_pgentry_t *pl1e;
214 unsigned int idx;
216 /* /First/, we zap the PTE. */
217 pl2e = virt_to_xen_l2e(__va);
218 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
219 *pl1e = l1e_empty();
221 /* /Second/, we add to the garbage map. */
222 idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
223 set_bit(idx, garbage);
224 }