ia64/xen-unstable

view xen/arch/x86/x86_32/domain_page.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 9bbb54fd9181
children
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain pages.
5 *
6 * Copyright (c) 2003-2006, Keir Fraser <keir@xensource.com>
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/mm.h>
12 #include <xen/perfc.h>
13 #include <xen/domain_page.h>
14 #include <asm/current.h>
15 #include <asm/flushtlb.h>
16 #include <asm/hardirq.h>
17 #include <asm/hvm/support.h>
19 static inline struct vcpu *mapcache_current_vcpu(void)
20 {
21 struct vcpu *v;
23 /* In the common case we use the mapcache of the running VCPU. */
24 v = current;
26 /*
27 * If guest_table is NULL, and we are running a paravirtualised guest,
28 * then it means we are running on the idle domain's page table and must
29 * therefore use its mapcache.
30 */
31 if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !is_hvm_vcpu(v) )
32 {
33 /* If we really are idling, perform lazy context switch now. */
34 if ( (v = idle_vcpu[smp_processor_id()]) == current )
35 __sync_lazy_execstate();
36 /* We must now be running on the idle page table. */
37 ASSERT(read_cr3() == __pa(idle_pg_table));
38 }
40 return v;
41 }
43 void *map_domain_page(unsigned long mfn)
44 {
45 unsigned long va;
46 unsigned int idx, i, flags;
47 struct vcpu *v;
48 struct mapcache_domain *dcache;
49 struct mapcache_vcpu *vcache;
50 struct vcpu_maphash_entry *hashent;
52 ASSERT(!in_irq());
54 perfc_incr(map_domain_page_count);
56 v = mapcache_current_vcpu();
58 dcache = &v->domain->arch.mapcache;
59 vcache = &v->arch.mapcache;
61 hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
62 if ( hashent->mfn == mfn )
63 {
64 idx = hashent->idx;
65 hashent->refcnt++;
66 ASSERT(idx < MAPCACHE_ENTRIES);
67 ASSERT(hashent->refcnt != 0);
68 ASSERT(l1e_get_pfn(dcache->l1tab[idx]) == mfn);
69 goto out;
70 }
72 spin_lock_irqsave(&dcache->lock, flags);
74 /* Has some other CPU caused a wrap? We must flush if so. */
75 if ( unlikely(dcache->epoch != vcache->shadow_epoch) )
76 {
77 vcache->shadow_epoch = dcache->epoch;
78 if ( NEED_FLUSH(this_cpu(tlbflush_time), dcache->tlbflush_timestamp) )
79 {
80 perfc_incr(domain_page_tlb_flush);
81 flush_tlb_local();
82 }
83 }
85 idx = find_next_zero_bit(dcache->inuse, MAPCACHE_ENTRIES, dcache->cursor);
86 if ( unlikely(idx >= MAPCACHE_ENTRIES) )
87 {
88 /* /First/, clean the garbage map and update the inuse list. */
89 for ( i = 0; i < ARRAY_SIZE(dcache->garbage); i++ )
90 {
91 unsigned long x = xchg(&dcache->garbage[i], 0);
92 dcache->inuse[i] &= ~x;
93 }
95 /* /Second/, flush TLBs. */
96 perfc_incr(domain_page_tlb_flush);
97 flush_tlb_local();
98 vcache->shadow_epoch = ++dcache->epoch;
99 dcache->tlbflush_timestamp = tlbflush_current_time();
101 idx = find_first_zero_bit(dcache->inuse, MAPCACHE_ENTRIES);
102 BUG_ON(idx >= MAPCACHE_ENTRIES);
103 }
105 set_bit(idx, dcache->inuse);
106 dcache->cursor = idx + 1;
108 spin_unlock_irqrestore(&dcache->lock, flags);
110 l1e_write(&dcache->l1tab[idx], l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
112 out:
113 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
114 return (void *)va;
115 }
117 void unmap_domain_page(const void *va)
118 {
119 unsigned int idx;
120 struct vcpu *v;
121 struct mapcache_domain *dcache;
122 unsigned long mfn;
123 struct vcpu_maphash_entry *hashent;
125 ASSERT(!in_irq());
127 ASSERT((void *)MAPCACHE_VIRT_START <= va);
128 ASSERT(va < (void *)MAPCACHE_VIRT_END);
130 v = mapcache_current_vcpu();
132 dcache = &v->domain->arch.mapcache;
134 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
135 mfn = l1e_get_pfn(dcache->l1tab[idx]);
136 hashent = &v->arch.mapcache.hash[MAPHASH_HASHFN(mfn)];
138 if ( hashent->idx == idx )
139 {
140 ASSERT(hashent->mfn == mfn);
141 ASSERT(hashent->refcnt != 0);
142 hashent->refcnt--;
143 }
144 else if ( hashent->refcnt == 0 )
145 {
146 if ( hashent->idx != MAPHASHENT_NOTINUSE )
147 {
148 /* /First/, zap the PTE. */
149 ASSERT(l1e_get_pfn(dcache->l1tab[hashent->idx]) == hashent->mfn);
150 l1e_write(&dcache->l1tab[hashent->idx], l1e_empty());
151 /* /Second/, mark as garbage. */
152 set_bit(hashent->idx, dcache->garbage);
153 }
155 /* Add newly-freed mapping to the maphash. */
156 hashent->mfn = mfn;
157 hashent->idx = idx;
158 }
159 else
160 {
161 /* /First/, zap the PTE. */
162 l1e_write(&dcache->l1tab[idx], l1e_empty());
163 /* /Second/, mark as garbage. */
164 set_bit(idx, dcache->garbage);
165 }
166 }
168 void mapcache_domain_init(struct domain *d)
169 {
170 d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
171 (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
172 spin_lock_init(&d->arch.mapcache.lock);
173 }
175 void mapcache_vcpu_init(struct vcpu *v)
176 {
177 unsigned int i;
178 struct vcpu_maphash_entry *hashent;
180 /* Mark all maphash entries as not in use. */
181 for ( i = 0; i < MAPHASH_ENTRIES; i++ )
182 {
183 hashent = &v->arch.mapcache.hash[i];
184 hashent->mfn = ~0UL; /* never valid to map */
185 hashent->idx = MAPHASHENT_NOTINUSE;
186 }
187 }
189 #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
190 static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
191 static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
192 static unsigned int inuse_cursor;
193 static DEFINE_SPINLOCK(globalmap_lock);
195 void *map_domain_page_global(unsigned long mfn)
196 {
197 l2_pgentry_t *pl2e;
198 l1_pgentry_t *pl1e;
199 unsigned int idx, i;
200 unsigned long va;
202 ASSERT(!in_irq() && local_irq_is_enabled());
204 /* At least half the ioremap space should be available to us. */
205 BUILD_BUG_ON(IOREMAP_VIRT_START + (IOREMAP_MBYTES << 19) >= FIXADDR_START);
207 spin_lock(&globalmap_lock);
209 idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
210 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
211 if ( unlikely(va >= FIXADDR_START) )
212 {
213 /* /First/, clean the garbage map and update the inuse list. */
214 for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
215 {
216 unsigned long x = xchg(&garbage[i], 0);
217 inuse[i] &= ~x;
218 }
220 /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
221 flush_tlb_all();
223 idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
224 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
225 if ( unlikely(va >= FIXADDR_START) )
226 {
227 spin_unlock(&globalmap_lock);
228 return NULL;
229 }
230 }
232 set_bit(idx, inuse);
233 inuse_cursor = idx + 1;
235 spin_unlock(&globalmap_lock);
237 pl2e = virt_to_xen_l2e(va);
238 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
239 l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
241 return (void *)va;
242 }
244 void unmap_domain_page_global(const void *va)
245 {
246 unsigned long __va = (unsigned long)va;
247 l2_pgentry_t *pl2e;
248 l1_pgentry_t *pl1e;
249 unsigned int idx;
251 ASSERT(__va >= IOREMAP_VIRT_START);
253 /* /First/, we zap the PTE. */
254 pl2e = virt_to_xen_l2e(__va);
255 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
256 l1e_write(pl1e, l1e_empty());
258 /* /Second/, we add to the garbage map. */
259 idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
260 set_bit(idx, garbage);
261 }