ia64/xen-unstable
changeset 8580:87a97054b469
Re-enable per-cpu cache on map_domain_page(). Fix the
offending caller that broke the vcpu cache (writable
pagetable code kept a mapping outstanding until flush,
which could occur on a different vcpu).
Signed-off-by: Keir Fraser <keir@xensource.com>
offending caller that broke the vcpu cache (writable
pagetable code kept a mapping outstanding until flush,
which could occur on a different vcpu).
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Jan 12 15:43:22 2006 +0100 (2006-01-12) |
parents | bfcdf4099d23 |
children | 4520b451a70e |
files | xen/arch/x86/mm.c xen/arch/x86/x86_32/domain_page.c xen/include/asm-x86/mm.h |
line diff
1.1 --- a/xen/arch/x86/mm.c Thu Jan 12 14:27:55 2006 +0100 1.2 +++ b/xen/arch/x86/mm.c Thu Jan 12 15:43:22 2006 +0100 1.3 @@ -2999,7 +2999,7 @@ void ptwr_flush(struct domain *d, const 1.4 BUG(); 1.5 } 1.6 PTWR_PRINTK("[%c] disconnected_l1va at %p is %"PRIpte"\n", 1.7 - PTWR_PRINT_WHICH, ptep, pte.l1); 1.8 + PTWR_PRINT_WHICH, ptep, l1e_get_intpte(pte)); 1.9 l1e_remove_flags(pte, _PAGE_RW); 1.10 1.11 /* Write-protect the p.t. page in the guest page table. */ 1.12 @@ -3017,13 +3017,13 @@ void ptwr_flush(struct domain *d, const 1.13 /* NB. INVLPG is a serialising instruction: flushes pending updates. */ 1.14 flush_tlb_one_mask(d->domain_dirty_cpumask, l1va); 1.15 PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n", 1.16 - PTWR_PRINT_WHICH, ptep, pte.l1); 1.17 + PTWR_PRINT_WHICH, ptep, trawl l1e_get_intpte(pte)); 1.18 1.19 /* 1.20 * STEP 2. Validate any modified PTEs. 1.21 */ 1.22 1.23 - pl1e = d->arch.ptwr[which].pl1e; 1.24 + pl1e = map_domain_page(l1e_get_pfn(pte)); 1.25 modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page); 1.26 unmap_domain_page(pl1e); 1.27 perfc_incr_histo(wpt_updates, modified, PT_UPDATES); 1.28 @@ -3206,7 +3206,7 @@ int ptwr_do_page_fault(struct domain *d, 1.29 { 1.30 unsigned long pfn; 1.31 struct pfn_info *page; 1.32 - l1_pgentry_t pte; 1.33 + l1_pgentry_t *pl1e, pte; 1.34 l2_pgentry_t *pl2e, l2e; 1.35 int which, flags; 1.36 unsigned long l2_idx; 1.37 @@ -3343,11 +3343,10 @@ int ptwr_do_page_fault(struct domain *d, 1.38 } 1.39 1.40 /* Temporarily map the L1 page, and make a copy of it. */ 1.41 - d->arch.ptwr[which].pl1e = map_domain_page(pfn); 1.42 - memcpy(d->arch.ptwr[which].page, 1.43 - d->arch.ptwr[which].pl1e, 1.44 - L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t)); 1.45 - 1.46 + pl1e = map_domain_page(pfn); 1.47 + memcpy(d->arch.ptwr[which].page, pl1e, PAGE_SIZE); 1.48 + unmap_domain_page(pl1e); 1.49 + 1.50 /* Finally, make the p.t. page writable by the guest OS. */ 1.51 l1e_add_flags(pte, _PAGE_RW); 1.52 if ( unlikely(__put_user(pte.l1, 1.53 @@ -3356,7 +3355,6 @@ int ptwr_do_page_fault(struct domain *d, 1.54 MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *) 1.55 &linear_pg_table[l1_linear_offset(addr)]); 1.56 /* Toss the writable pagetable state and crash. */ 1.57 - unmap_domain_page(d->arch.ptwr[which].pl1e); 1.58 d->arch.ptwr[which].l1va = 0; 1.59 domain_crash(d); 1.60 return 0;
2.1 --- a/xen/arch/x86/x86_32/domain_page.c Thu Jan 12 14:27:55 2006 +0100 2.2 +++ b/xen/arch/x86/x86_32/domain_page.c Thu Jan 12 15:43:22 2006 +0100 2.3 @@ -40,7 +40,6 @@ void *map_domain_page(unsigned long pfn) 2.4 cache = &d->arch.mapcache; 2.5 2.6 hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)]; 2.7 -#if 0 2.8 if ( hashent->pfn == pfn ) 2.9 { 2.10 idx = hashent->idx; 2.11 @@ -49,7 +48,6 @@ void *map_domain_page(unsigned long pfn) 2.12 ASSERT(l1e_get_pfn(cache->l1tab[idx]) == pfn); 2.13 goto out; 2.14 } 2.15 -#endif 2.16 2.17 spin_lock(&cache->lock); 2.18 2.19 @@ -92,7 +90,7 @@ void *map_domain_page(unsigned long pfn) 2.20 2.21 cache->l1tab[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR); 2.22 2.23 -/*out:*/ 2.24 + out: 2.25 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT); 2.26 return (void *)va; 2.27 }
3.1 --- a/xen/include/asm-x86/mm.h Thu Jan 12 14:27:55 2006 +0100 3.2 +++ b/xen/include/asm-x86/mm.h Thu Jan 12 15:43:22 2006 +0100 3.3 @@ -309,16 +309,13 @@ struct ptwr_info { 3.4 unsigned long l1va; 3.5 /* Copy of the p.t. page, taken before guest is given write access. */ 3.6 l1_pgentry_t *page; 3.7 - /* A temporary Xen mapping of the actual p.t. page. */ 3.8 - l1_pgentry_t *pl1e; 3.9 /* Index in L2 page table where this L1 p.t. is always hooked. */ 3.10 unsigned int l2_idx; /* NB. Only used for PTWR_PT_ACTIVE. */ 3.11 /* Info about last ptwr update batch. */ 3.12 unsigned int prev_nr_updates; 3.13 - /* Exec domain which created writable mapping. */ 3.14 + /* VCPU which created writable mapping. */ 3.15 struct vcpu *vcpu; 3.16 - /* EIP of the address which took the original write fault 3.17 - used for stats collection only */ 3.18 + /* EIP of the original write fault (stats collection only). */ 3.19 unsigned long eip; 3.20 }; 3.21