ia64/xen-unstable
changeset 8200:42d4059108d4
Add a defensive batched tlb flush to free_page_type(), to
ensure the linear_pg_table remains in sync with modified
page table structure. Otherwise we can update stale entries
and screw reference counts (but probably only when running
a malicious domain).
Signed-off-by: Keir Fraser <keir@xensource.com>
ensure the linear_pg_table remains in sync with modified
page table structure. Otherwise we can update stale entries
and screw reference counts (but probably only when running
a malicious domain).
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Dec 02 18:56:24 2005 +0100 (2005-12-02) |
parents | b21be984945a |
children | 7bf19284bf3f |
files | xen/arch/x86/mm.c |
line diff
1.1 --- a/xen/arch/x86/mm.c Fri Dec 02 18:00:49 2005 +0100 1.2 +++ b/xen/arch/x86/mm.c Fri Dec 02 18:56:24 2005 +0100 1.3 @@ -128,8 +128,9 @@ static int mod_l1_entry(l1_pgentry_t *, 1.4 1.5 /* Used to defer flushing of memory structures. */ 1.6 static struct { 1.7 -#define DOP_FLUSH_TLB (1<<0) /* Flush the TLB. */ 1.8 -#define DOP_RELOAD_LDT (1<<1) /* Reload the LDT shadow mapping. */ 1.9 +#define DOP_FLUSH_TLB (1<<0) /* Flush the local TLB. */ 1.10 +#define DOP_FLUSH_ALL_TLBS (1<<1) /* Flush TLBs of all VCPUs of current dom. */ 1.11 +#define DOP_RELOAD_LDT (1<<2) /* Reload the LDT shadow mapping. */ 1.12 unsigned int deferred_ops; 1.13 /* If non-NULL, specifies a foreign subject domain for some operations. */ 1.14 struct domain *foreign; 1.15 @@ -1323,14 +1324,28 @@ void free_page_type(struct pfn_info *pag 1.16 struct domain *owner = page_get_owner(page); 1.17 unsigned long gpfn; 1.18 1.19 - if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) ) 1.20 + if ( likely(owner != NULL) ) 1.21 { 1.22 - mark_dirty(owner, page_to_pfn(page)); 1.23 - if ( unlikely(shadow_mode_refcounts(owner)) ) 1.24 - return; 1.25 - gpfn = __mfn_to_gpfn(owner, page_to_pfn(page)); 1.26 - ASSERT(VALID_M2P(gpfn)); 1.27 - remove_shadow(owner, gpfn, type & PGT_type_mask); 1.28 + /* 1.29 + * We have to flush before the next use of the linear mapping 1.30 + * (e.g., update_va_mapping()) or we could end up modifying a page 1.31 + * that is no longer a page table (and hence screw up ref counts). 1.32 + */ 1.33 + percpu_info[smp_processor_id()].deferred_ops |= DOP_FLUSH_ALL_TLBS; 1.34 + 1.35 + if ( unlikely(shadow_mode_enabled(owner)) ) 1.36 + { 1.37 + /* Raw page tables are rewritten during save/restore. */ 1.38 + if ( !shadow_mode_translate(owner) ) 1.39 + mark_dirty(owner, page_to_pfn(page)); 1.40 + 1.41 + if ( shadow_mode_refcounts(owner) ) 1.42 + return; 1.43 + 1.44 + gpfn = __mfn_to_gpfn(owner, page_to_pfn(page)); 1.45 + ASSERT(VALID_M2P(gpfn)); 1.46 + remove_shadow(owner, gpfn, type & PGT_type_mask); 1.47 + } 1.48 } 1.49 1.50 switch ( type & PGT_type_mask ) 1.51 @@ -1600,11 +1615,14 @@ static void process_deferred_ops(unsigne 1.52 deferred_ops = percpu_info[cpu].deferred_ops; 1.53 percpu_info[cpu].deferred_ops = 0; 1.54 1.55 - if ( deferred_ops & DOP_FLUSH_TLB ) 1.56 + if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) ) 1.57 { 1.58 if ( shadow_mode_enabled(d) ) 1.59 shadow_sync_all(d); 1.60 - local_flush_tlb(); 1.61 + if ( deferred_ops & DOP_FLUSH_ALL_TLBS ) 1.62 + flush_tlb_mask(d->cpumask); 1.63 + else 1.64 + local_flush_tlb(); 1.65 } 1.66 1.67 if ( deferred_ops & DOP_RELOAD_LDT )