ia64/xen-unstable
changeset 9086:e0f66dbe4b13
map_domain_page() now handles running on idle page tables.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Wed Mar 01 18:34:06 2006 +0100 (2006-03-01) |
parents | 4d979b6689ea |
children | b0dfd76ef5d2 |
files | xen/arch/x86/x86_32/domain_page.c |
line diff
1.1 --- a/xen/arch/x86/x86_32/domain_page.c Wed Mar 01 18:33:36 2006 +0100 1.2 +++ b/xen/arch/x86/x86_32/domain_page.c Wed Mar 01 18:34:06 2006 +0100 1.3 @@ -11,15 +11,40 @@ 1.4 #include <xen/mm.h> 1.5 #include <xen/perfc.h> 1.6 #include <xen/domain_page.h> 1.7 +#include <xen/shadow.h> 1.8 #include <asm/current.h> 1.9 #include <asm/flushtlb.h> 1.10 #include <asm/hardirq.h> 1.11 1.12 +static inline struct vcpu *mapcache_current_vcpu(void) 1.13 +{ 1.14 + struct vcpu *v; 1.15 + 1.16 + /* In the common case we use the mapcache of the running VCPU. */ 1.17 + v = current; 1.18 + 1.19 + /* 1.20 + * If guest_table is NULL, and we are running a paravirtualised guest, 1.21 + * then it means we are running on the idle domain's page table and must 1.22 + * therefore use its mapcache. 1.23 + */ 1.24 + if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !HVM_DOMAIN(v) ) 1.25 + { 1.26 + /* If we really are idling, perform lazy context switch now. */ 1.27 + if ( (v = idle_vcpu[smp_processor_id()]) == current ) 1.28 + __sync_lazy_execstate(); 1.29 + /* We must now be running on the idle page table. */ 1.30 + ASSERT(read_cr3() == __pa(idle_pg_table)); 1.31 + } 1.32 + 1.33 + return v; 1.34 +} 1.35 + 1.36 void *map_domain_page(unsigned long pfn) 1.37 { 1.38 unsigned long va; 1.39 - unsigned int idx, i, vcpu = current->vcpu_id; 1.40 - struct domain *d; 1.41 + unsigned int idx, i, vcpu; 1.42 + struct vcpu *v; 1.43 struct mapcache *cache; 1.44 struct vcpu_maphash_entry *hashent; 1.45 1.46 @@ -27,12 +52,10 @@ void *map_domain_page(unsigned long pfn) 1.47 1.48 perfc_incrc(map_domain_page_count); 1.49 1.50 - /* If we are the idle domain, ensure that we run on our own page tables. */ 1.51 - d = current->domain; 1.52 - if ( unlikely(is_idle_domain(d)) ) 1.53 - __sync_lazy_execstate(); 1.54 + v = mapcache_current_vcpu(); 1.55 1.56 - cache = &d->arch.mapcache; 1.57 + vcpu = v->vcpu_id; 1.58 + cache = &v->domain->arch.mapcache; 1.59 1.60 hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)]; 1.61 if ( hashent->pfn == pfn ) 1.62 @@ -93,7 +116,8 @@ void *map_domain_page(unsigned long pfn) 1.63 void unmap_domain_page(void *va) 1.64 { 1.65 unsigned int idx; 1.66 - struct mapcache *cache = ¤t->domain->arch.mapcache; 1.67 + struct vcpu *v; 1.68 + struct mapcache *cache; 1.69 unsigned long pfn; 1.70 struct vcpu_maphash_entry *hashent; 1.71 1.72 @@ -102,9 +126,13 @@ void unmap_domain_page(void *va) 1.73 ASSERT((void *)MAPCACHE_VIRT_START <= va); 1.74 ASSERT(va < (void *)MAPCACHE_VIRT_END); 1.75 1.76 + v = mapcache_current_vcpu(); 1.77 + 1.78 + cache = &v->domain->arch.mapcache; 1.79 + 1.80 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; 1.81 pfn = l1e_get_pfn(cache->l1tab[idx]); 1.82 - hashent = &cache->vcpu_maphash[current->vcpu_id].hash[MAPHASH_HASHFN(pfn)]; 1.83 + hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(pfn)]; 1.84 1.85 if ( hashent->idx == idx ) 1.86 {