ia64/xen-unstable
changeset 8551:ed7888c838ad
Update map_domain_page() documentation (mappings may only be
be used within the mapping vcpu). Implement TLB flush
filtering on the per-domain mapcache.
Signed-off-by: Keir Fraser <keir@xensource.com>
be used within the mapping vcpu). Implement TLB flush
filtering on the per-domain mapcache.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Jan 10 18:53:44 2006 +0100 (2006-01-10) |
parents | 542cb7acb21a |
children | 5ae96e117af2 |
files | xen/arch/x86/x86_32/domain_page.c xen/include/asm-x86/domain.h xen/include/xen/domain_page.h |
line diff
1.1 --- a/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 18:25:45 2006 +0100 1.2 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 18:53:44 2006 +0100 1.3 @@ -40,7 +40,8 @@ void *map_domain_pages(unsigned long pfn 1.4 { 1.5 unsigned long va; 1.6 unsigned int idx, i, flags, vcpu = current->vcpu_id; 1.7 - struct mapcache *cache = ¤t->domain->arch.mapcache; 1.8 + struct domain *d; 1.9 + struct mapcache *cache; 1.10 #ifndef NDEBUG 1.11 unsigned int flush_count = 0; 1.12 #endif 1.13 @@ -49,17 +50,24 @@ void *map_domain_pages(unsigned long pfn 1.14 perfc_incrc(map_domain_page_count); 1.15 1.16 /* If we are the idle domain, ensure that we run on our own page tables. */ 1.17 - if ( unlikely(is_idle_vcpu(current)) ) 1.18 + d = current->domain; 1.19 + if ( unlikely(is_idle_domain(d)) ) 1.20 __sync_lazy_execstate(); 1.21 1.22 + cache = &d->arch.mapcache; 1.23 + 1.24 spin_lock(&cache->lock); 1.25 1.26 /* Has some other CPU caused a wrap? We must flush if so. */ 1.27 - if ( cache->epoch != cache->shadow_epoch[vcpu] ) 1.28 + if ( unlikely(cache->epoch != cache->shadow_epoch[vcpu]) ) 1.29 { 1.30 - perfc_incrc(domain_page_tlb_flush); 1.31 - local_flush_tlb(); 1.32 cache->shadow_epoch[vcpu] = cache->epoch; 1.33 + if ( NEED_FLUSH(tlbflush_time[smp_processor_id()], 1.34 + cache->tlbflush_timestamp) ) 1.35 + { 1.36 + perfc_incrc(domain_page_tlb_flush); 1.37 + local_flush_tlb(); 1.38 + } 1.39 } 1.40 1.41 do { 1.42 @@ -71,6 +79,7 @@ void *map_domain_pages(unsigned long pfn 1.43 perfc_incrc(domain_page_tlb_flush); 1.44 local_flush_tlb(); 1.45 cache->shadow_epoch[vcpu] = ++cache->epoch; 1.46 + cache->tlbflush_timestamp = tlbflush_current_time(); 1.47 } 1.48 1.49 flags = 0;
2.1 --- a/xen/include/asm-x86/domain.h Tue Jan 10 18:25:45 2006 +0100 2.2 +++ b/xen/include/asm-x86/domain.h Tue Jan 10 18:53:44 2006 +0100 2.3 @@ -17,6 +17,7 @@ struct mapcache { 2.4 l1_pgentry_t *l1tab; 2.5 unsigned int cursor; 2.6 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS]; 2.7 + u32 tlbflush_timestamp; 2.8 spinlock_t lock; 2.9 }; 2.10
3.1 --- a/xen/include/xen/domain_page.h Tue Jan 10 18:25:45 2006 +0100 3.2 +++ b/xen/include/xen/domain_page.h Tue Jan 10 18:53:44 2006 +0100 3.3 @@ -17,21 +17,21 @@ 3.4 3.5 /* 3.6 * Maps a given range of page frames, returning the mapped virtual address. The 3.7 - * pages are now accessible within the current domain until a corresponding 3.8 + * pages are now accessible within the current VCPU until a corresponding 3.9 * call to unmap_domain_page(). 3.10 */ 3.11 extern void *map_domain_pages(unsigned long pfn, unsigned int order); 3.12 3.13 /* 3.14 * Pass a VA within the first page of a range previously mapped in the context 3.15 - * of the currently-executing domain via a call to map_domain_pages(). Those 3.16 + * of the currently-executing VCPU via a call to map_domain_pages(). Those 3.17 * pages will then be removed from the mapping lists. 3.18 */ 3.19 extern void unmap_domain_pages(void *va, unsigned int order); 3.20 3.21 /* 3.22 * Similar to the above calls, except the mapping is accessible in all 3.23 - * address spaces (not just within the domain that created the mapping). Global 3.24 + * address spaces (not just within the VCPU that created the mapping). Global 3.25 * mappings can also be unmapped from any context. 3.26 */ 3.27 extern void *map_domain_page_global(unsigned long pfn);