ia64/xen-unstable
changeset 5355:cc6c1889cdb0
bitkeeper revision 1.1688 (42a561d8CR-Fck16qBCLCOs9F40q3g)
Give the map_dom_mem_cache a better name, and fix a bug in
unmap_domain_mem_with_cache() [it shouldn't actually unmap the page!].
The bug was spotted by George Dunlap.
Signed-off-by: Keir Fraser <keir@xensource.com>
Give the map_dom_mem_cache a better name, and fix a bug in
unmap_domain_mem_with_cache() [it shouldn't actually unmap the page!].
The bug was spotted by George Dunlap.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Jun 07 08:59:04 2005 +0000 (2005-06-07) |
parents | 86fe63442842 |
children | 2d8e63df504a |
files | xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/domain_page.h xen/include/asm-x86/x86_64/domain_page.h |
line diff
1.1 --- a/xen/arch/x86/mm.c Tue Jun 07 06:57:24 2005 +0000 1.2 +++ b/xen/arch/x86/mm.c Tue Jun 07 08:59:04 2005 +0000 1.3 @@ -1940,7 +1940,7 @@ int do_mmu_update( 1.4 struct vcpu *v = current; 1.5 struct domain *d = v->domain; 1.6 u32 type_info; 1.7 - struct map_dom_mem_cache mapcache, sh_mapcache; 1.8 + struct domain_mmap_cache mapcache, sh_mapcache; 1.9 1.10 LOCK_BIGLOCK(d); 1.11 1.12 @@ -1956,8 +1956,8 @@ int do_mmu_update( 1.13 (void)get_user(done, pdone); 1.14 } 1.15 1.16 - init_map_domain_mem_cache(&mapcache); 1.17 - init_map_domain_mem_cache(&sh_mapcache); 1.18 + domain_mmap_cache_init(&mapcache); 1.19 + domain_mmap_cache_init(&sh_mapcache); 1.20 1.21 if ( !set_foreigndom(cpu, foreigndom) ) 1.22 { 1.23 @@ -2169,8 +2169,8 @@ int do_mmu_update( 1.24 } 1.25 1.26 out: 1.27 - destroy_map_domain_mem_cache(&mapcache); 1.28 - destroy_map_domain_mem_cache(&sh_mapcache); 1.29 + domain_mmap_cache_destroy(&mapcache); 1.30 + domain_mmap_cache_destroy(&sh_mapcache); 1.31 1.32 process_deferred_ops(cpu); 1.33
2.1 --- a/xen/arch/x86/shadow.c Tue Jun 07 06:57:24 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Tue Jun 07 08:59:04 2005 +0000 2.3 @@ -763,8 +763,8 @@ void free_monitor_pagetable(struct vcpu 2.4 2.5 int 2.6 set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn, 2.7 - struct map_dom_mem_cache *l2cache, 2.8 - struct map_dom_mem_cache *l1cache) 2.9 + struct domain_mmap_cache *l2cache, 2.10 + struct domain_mmap_cache *l1cache) 2.11 { 2.12 unsigned long phystab = pagetable_get_paddr(d->arch.phys_table); 2.13 l2_pgentry_t *l2, l2e; 2.14 @@ -808,14 +808,14 @@ alloc_p2m_table(struct domain *d) 2.15 struct pfn_info *page, *l2page; 2.16 l2_pgentry_t *l2; 2.17 unsigned long mfn, pfn; 2.18 - struct map_dom_mem_cache l1cache, l2cache; 2.19 + struct domain_mmap_cache l1cache, l2cache; 2.20 2.21 l2page = alloc_domheap_page(NULL); 2.22 if ( l2page == NULL ) 2.23 return 0; 2.24 2.25 - init_map_domain_mem_cache(&l1cache); 2.26 - init_map_domain_mem_cache(&l2cache); 2.27 + domain_mmap_cache_init(&l1cache); 2.28 + domain_mmap_cache_init(&l2cache); 2.29 2.30 d->arch.phys_table = mk_pagetable(page_to_phys(l2page)); 2.31 l2 = map_domain_mem_with_cache(page_to_phys(l2page), &l2cache); 2.32 @@ -851,8 +851,8 @@ alloc_p2m_table(struct domain *d) 2.33 list_ent = page->list.next; 2.34 } 2.35 2.36 - destroy_map_domain_mem_cache(&l2cache); 2.37 - destroy_map_domain_mem_cache(&l1cache); 2.38 + domain_mmap_cache_destroy(&l2cache); 2.39 + domain_mmap_cache_destroy(&l1cache); 2.40 2.41 return 1; 2.42 } 2.43 @@ -2682,7 +2682,7 @@ int shadow_fault(unsigned long va, struc 2.44 void shadow_l1_normal_pt_update( 2.45 struct domain *d, 2.46 unsigned long pa, l1_pgentry_t gpte, 2.47 - struct map_dom_mem_cache *cache) 2.48 + struct domain_mmap_cache *cache) 2.49 { 2.50 unsigned long sl1mfn; 2.51 l1_pgentry_t *spl1e, spte; 2.52 @@ -2707,7 +2707,7 @@ void shadow_l1_normal_pt_update( 2.53 void shadow_l2_normal_pt_update( 2.54 struct domain *d, 2.55 unsigned long pa, l2_pgentry_t gpde, 2.56 - struct map_dom_mem_cache *cache) 2.57 + struct domain_mmap_cache *cache) 2.58 { 2.59 unsigned long sl2mfn; 2.60 l2_pgentry_t *spl2e; 2.61 @@ -2732,7 +2732,7 @@ void shadow_l2_normal_pt_update( 2.62 void shadow_l3_normal_pt_update( 2.63 struct domain *d, 2.64 unsigned long pa, l3_pgentry_t gpde, 2.65 - struct map_dom_mem_cache *cache) 2.66 + struct domain_mmap_cache *cache) 2.67 { 2.68 BUG(); // not yet implemented 2.69 } 2.70 @@ -2742,7 +2742,7 @@ void shadow_l3_normal_pt_update( 2.71 void shadow_l4_normal_pt_update( 2.72 struct domain *d, 2.73 unsigned long pa, l4_pgentry_t gpde, 2.74 - struct map_dom_mem_cache *cache) 2.75 + struct domain_mmap_cache *cache) 2.76 { 2.77 BUG(); // not yet implemented 2.78 }
3.1 --- a/xen/include/asm-x86/shadow.h Tue Jun 07 06:57:24 2005 +0000 3.2 +++ b/xen/include/asm-x86/shadow.h Tue Jun 07 08:59:04 2005 +0000 3.3 @@ -121,25 +121,25 @@ extern void __shadow_sync_all(struct dom 3.4 extern int __shadow_out_of_sync(struct vcpu *v, unsigned long va); 3.5 extern int set_p2m_entry( 3.6 struct domain *d, unsigned long pfn, unsigned long mfn, 3.7 - struct map_dom_mem_cache *l2cache, 3.8 - struct map_dom_mem_cache *l1cache); 3.9 + struct domain_mmap_cache *l2cache, 3.10 + struct domain_mmap_cache *l1cache); 3.11 extern void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype); 3.12 3.13 extern void shadow_l1_normal_pt_update(struct domain *d, 3.14 unsigned long pa, l1_pgentry_t l1e, 3.15 - struct map_dom_mem_cache *cache); 3.16 + struct domain_mmap_cache *cache); 3.17 extern void shadow_l2_normal_pt_update(struct domain *d, 3.18 unsigned long pa, l2_pgentry_t l2e, 3.19 - struct map_dom_mem_cache *cache); 3.20 + struct domain_mmap_cache *cache); 3.21 #if CONFIG_PAGING_LEVELS >= 3 3.22 extern void shadow_l3_normal_pt_update(struct domain *d, 3.23 unsigned long pa, l3_pgentry_t l3e, 3.24 - struct map_dom_mem_cache *cache); 3.25 + struct domain_mmap_cache *cache); 3.26 #endif 3.27 #if CONFIG_PAGING_LEVELS >= 4 3.28 extern void shadow_l4_normal_pt_update(struct domain *d, 3.29 unsigned long pa, l4_pgentry_t l4e, 3.30 - struct map_dom_mem_cache *cache); 3.31 + struct domain_mmap_cache *cache); 3.32 #endif 3.33 extern int shadow_do_update_va_mapping(unsigned long va, 3.34 l1_pgentry_t val,
4.1 --- a/xen/include/asm-x86/x86_32/domain_page.h Tue Jun 07 06:57:24 2005 +0000 4.2 +++ b/xen/include/asm-x86/x86_32/domain_page.h Tue Jun 07 08:59:04 2005 +0000 4.3 @@ -27,31 +27,36 @@ extern void *map_domain_mem(unsigned lon 4.4 */ 4.5 extern void unmap_domain_mem(void *va); 4.6 4.7 -struct map_dom_mem_cache { 4.8 +#define DMCACHE_ENTRY_VALID 1UL 4.9 +#define DMCACHE_ENTRY_HELD 2UL 4.10 + 4.11 +struct domain_mmap_cache { 4.12 unsigned long pa; 4.13 void *va; 4.14 }; 4.15 4.16 static inline void 4.17 -init_map_domain_mem_cache(struct map_dom_mem_cache *cache) 4.18 +domain_mmap_cache_init(struct domain_mmap_cache *cache) 4.19 { 4.20 ASSERT(cache != NULL); 4.21 cache->pa = 0; 4.22 } 4.23 4.24 static inline void * 4.25 -map_domain_mem_with_cache(unsigned long pa, struct map_dom_mem_cache *cache) 4.26 +map_domain_mem_with_cache(unsigned long pa, struct domain_mmap_cache *cache) 4.27 { 4.28 ASSERT(cache != NULL); 4.29 + BUG_ON(cache->pa & DMCACHE_ENTRY_HELD); 4.30 4.31 if ( likely(cache->pa) ) 4.32 { 4.33 + cache->pa |= DMCACHE_ENTRY_HELD; 4.34 if ( likely((pa & PAGE_MASK) == (cache->pa & PAGE_MASK)) ) 4.35 goto done; 4.36 unmap_domain_mem(cache->va); 4.37 } 4.38 4.39 - cache->pa = (pa & PAGE_MASK) | 1; 4.40 + cache->pa = (pa & PAGE_MASK) | DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID; 4.41 cache->va = map_domain_mem(cache->pa); 4.42 4.43 done: 4.44 @@ -60,14 +65,14 @@ map_domain_mem_with_cache(unsigned long 4.45 } 4.46 4.47 static inline void 4.48 -unmap_domain_mem_with_cache(void *va, struct map_dom_mem_cache *cache) 4.49 +unmap_domain_mem_with_cache(void *va, struct domain_mmap_cache *cache) 4.50 { 4.51 ASSERT(cache != NULL); 4.52 - unmap_domain_mem(va); 4.53 + cache->pa &= ~DMCACHE_ENTRY_HELD; 4.54 } 4.55 4.56 static inline void 4.57 -destroy_map_domain_mem_cache(struct map_dom_mem_cache *cache) 4.58 +domain_mmap_cache_destroy(struct domain_mmap_cache *cache) 4.59 { 4.60 ASSERT(cache != NULL); 4.61 if ( likely(cache->pa) )
5.1 --- a/xen/include/asm-x86/x86_64/domain_page.h Tue Jun 07 06:57:24 2005 +0000 5.2 +++ b/xen/include/asm-x86/x86_64/domain_page.h Tue Jun 07 08:59:04 2005 +0000 5.3 @@ -10,12 +10,12 @@ 5.4 #define map_domain_mem(_pa) phys_to_virt(_pa) 5.5 #define unmap_domain_mem(_va) ((void)(_va)) 5.6 5.7 -struct map_dom_mem_cache { 5.8 +struct domain_mmap_cache { 5.9 }; 5.10 5.11 -#define init_map_domain_mem_cache(_c) ((void)(_c)) 5.12 +#define domain_mmap_cache_init(_c) ((void)(_c)) 5.13 #define map_domain_mem_with_cache(_p,_c) (map_domain_mem(_p)) 5.14 #define unmap_domain_mem_with_cache(_v,_c) ((void)(_v)) 5.15 -#define destroy_map_domain_mem_cache(_c) ((void)(_c)) 5.16 +#define domain_mmap_cache_destroy(_c) ((void)(_c)) 5.17 5.18 #endif /* __ASM_DOMAIN_PAGE_H__ */