ia64/xen-unstable
changeset 15311:2c8c6ca1296b
[XEN] Clean up locking/init code around log-dirty interfaces
to avoid deadlocks and make sure locks/functions are in place for
PV domains to be put in log-dirty mode if they're not already shadowed.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
to avoid deadlocks and make sure locks/functions are in place for
PV domains to be put in log-dirty mode if they're not already shadowed.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Mon Jun 11 14:38:46 2007 +0100 (2007-06-11) |
parents | 3d5f39c610ad |
children | ed254cf78f7c |
files | xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/paging.c xen/arch/x86/mm/shadow/common.c |
line diff
1.1 --- a/xen/arch/x86/mm/hap/hap.c Mon Jun 11 14:35:52 2007 +0100 1.2 +++ b/xen/arch/x86/mm/hap/hap.c Mon Jun 11 14:38:46 2007 +0100 1.3 @@ -425,6 +425,10 @@ void hap_domain_init(struct domain *d) 1.4 { 1.5 hap_lock_init(d); 1.6 INIT_LIST_HEAD(&d->arch.paging.hap.freelists); 1.7 + 1.8 + /* This domain will use HAP for log-dirty mode */ 1.9 + paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty, 1.10 + hap_clean_dirty_bitmap); 1.11 } 1.12 1.13 /* return 0 for success, -errno for failure */ 1.14 @@ -455,10 +459,6 @@ int hap_enable(struct domain *d, u32 mod 1.15 } 1.16 } 1.17 1.18 - /* initialize log dirty here */ 1.19 - paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty, 1.20 - hap_clean_dirty_bitmap); 1.21 - 1.22 /* allocate P2m table */ 1.23 if ( mode & PG_translate ) { 1.24 rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page);
2.1 --- a/xen/arch/x86/mm/paging.c Mon Jun 11 14:35:52 2007 +0100 2.2 +++ b/xen/arch/x86/mm/paging.c Mon Jun 11 14:38:46 2007 +0100 2.3 @@ -53,6 +53,21 @@ boolean_param("hap", opt_hap_enabled); 2.4 #undef page_to_mfn 2.5 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table)) 2.6 2.7 +/* The log-dirty lock. This protects the log-dirty bitmap from 2.8 + * concurrent accesses (and teardowns, etc). 2.9 + * 2.10 + * Locking discipline: always acquire shadow or HAP lock before this one. 2.11 + * 2.12 + * Because mark_dirty is called from a lot of places, the log-dirty lock 2.13 + * may be acquired with the shadow or HAP locks already held. When the 2.14 + * log-dirty code makes callbacks into HAP or shadow code to reset 2.15 + * various traps that will trigger the mark_dirty calls, it must *not* 2.16 + * have the log-dirty lock held, or it risks deadlock. Because the only 2.17 + * purpose of those calls is to make sure that *guest* actions will 2.18 + * cause mark_dirty to be called (hypervisor actions explictly call it 2.19 + * anyway), it is safe to release the log-dirty lock before the callback 2.20 + * as long as the domain is paused for the entire operation. */ 2.21 + 2.22 #define log_dirty_lock_init(_d) \ 2.23 do { \ 2.24 spin_lock_init(&(_d)->arch.paging.log_dirty.lock); \ 2.25 @@ -85,7 +100,9 @@ boolean_param("hap", opt_hap_enabled); 2.26 /* allocate bitmap resources for log dirty */ 2.27 int paging_alloc_log_dirty_bitmap(struct domain *d) 2.28 { 2.29 - ASSERT(d->arch.paging.log_dirty.bitmap == NULL); 2.30 + if ( d->arch.paging.log_dirty.bitmap != NULL ) 2.31 + return 0; 2.32 + 2.33 d->arch.paging.log_dirty.bitmap_size = 2.34 (domain_get_maximum_gpfn(d) + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); 2.35 d->arch.paging.log_dirty.bitmap = 2.36 @@ -133,9 +150,16 @@ int paging_log_dirty_enable(struct domai 2.37 goto out; 2.38 } 2.39 2.40 - ret = d->arch.paging.log_dirty.enable_log_dirty(d); 2.41 - if ( ret != 0 ) 2.42 - paging_free_log_dirty_bitmap(d); 2.43 + log_dirty_unlock(d); 2.44 + 2.45 + /* Safe because the domain is paused. */ 2.46 + ret = d->arch.paging.log_dirty.enable_log_dirty(d); 2.47 + 2.48 + /* Possibility of leaving the bitmap allocated here but it'll be 2.49 + * tidied on domain teardown. */ 2.50 + 2.51 + domain_unpause(d); 2.52 + return ret; 2.53 2.54 out: 2.55 log_dirty_unlock(d); 2.56 @@ -148,8 +172,9 @@ int paging_log_dirty_disable(struct doma 2.57 int ret; 2.58 2.59 domain_pause(d); 2.60 + /* Safe because the domain is paused. */ 2.61 + ret = d->arch.paging.log_dirty.disable_log_dirty(d); 2.62 log_dirty_lock(d); 2.63 - ret = d->arch.paging.log_dirty.disable_log_dirty(d); 2.64 if ( !paging_mode_log_dirty(d) ) 2.65 paging_free_log_dirty_bitmap(d); 2.66 log_dirty_unlock(d); 2.67 @@ -182,7 +207,10 @@ void paging_mark_dirty(struct domain *d, 2.68 * Nothing to do here... 2.69 */ 2.70 if ( unlikely(!VALID_M2P(pfn)) ) 2.71 + { 2.72 + log_dirty_unlock(d); 2.73 return; 2.74 + } 2.75 2.76 if ( likely(pfn < d->arch.paging.log_dirty.bitmap_size) ) 2.77 { 2.78 @@ -237,11 +265,6 @@ int paging_log_dirty_op(struct domain *d 2.79 { 2.80 d->arch.paging.log_dirty.fault_count = 0; 2.81 d->arch.paging.log_dirty.dirty_count = 0; 2.82 - 2.83 - /* We need to further call clean_dirty_bitmap() functions of specific 2.84 - * paging modes (shadow or hap). 2.85 - */ 2.86 - d->arch.paging.log_dirty.clean_dirty_bitmap(d); 2.87 } 2.88 2.89 if ( guest_handle_is_null(sc->dirty_bitmap) ) 2.90 @@ -280,6 +303,17 @@ int paging_log_dirty_op(struct domain *d 2.91 } 2.92 #undef CHUNK 2.93 2.94 + log_dirty_unlock(d); 2.95 + 2.96 + if ( clean ) 2.97 + { 2.98 + /* We need to further call clean_dirty_bitmap() functions of specific 2.99 + * paging modes (shadow or hap). Safe because the domain is paused. */ 2.100 + d->arch.paging.log_dirty.clean_dirty_bitmap(d); 2.101 + } 2.102 + domain_unpause(d); 2.103 + return rv; 2.104 + 2.105 out: 2.106 log_dirty_unlock(d); 2.107 domain_unpause(d); 2.108 @@ -291,6 +325,8 @@ int paging_log_dirty_op(struct domain *d 2.109 * these functions for log dirty code to call. This function usually is 2.110 * invoked when paging is enabled. Check shadow_enable() and hap_enable() for 2.111 * reference. 2.112 + * 2.113 + * These function pointers must not be followed with the log-dirty lock held. 2.114 */ 2.115 void paging_log_dirty_init(struct domain *d, 2.116 int (*enable_log_dirty)(struct domain *d), 2.117 @@ -319,8 +355,13 @@ void paging_log_dirty_teardown(struct do 2.118 void paging_domain_init(struct domain *d) 2.119 { 2.120 p2m_init(d); 2.121 + 2.122 + /* The order of the *_init calls below is important, as the later 2.123 + * ones may rewrite some common fields. Shadow pagetables are the 2.124 + * default... */ 2.125 shadow_domain_init(d); 2.126 2.127 + /* ... but we will use hardware assistance if it's available. */ 2.128 if ( opt_hap_enabled && is_hvm_domain(d) ) 2.129 hap_domain_init(d); 2.130 } 2.131 @@ -397,13 +438,13 @@ int paging_domctl(struct domain *d, xen_ 2.132 /* Call when destroying a domain */ 2.133 void paging_teardown(struct domain *d) 2.134 { 2.135 - /* clean up log dirty resources. */ 2.136 - paging_log_dirty_teardown(d); 2.137 - 2.138 if ( opt_hap_enabled && is_hvm_domain(d) ) 2.139 hap_teardown(d); 2.140 else 2.141 shadow_teardown(d); 2.142 + 2.143 + /* clean up log dirty resources. */ 2.144 + paging_log_dirty_teardown(d); 2.145 } 2.146 2.147 /* Call once all of the references to the domain have gone away */
3.1 --- a/xen/arch/x86/mm/shadow/common.c Mon Jun 11 14:35:52 2007 +0100 3.2 +++ b/xen/arch/x86/mm/shadow/common.c Mon Jun 11 14:38:46 2007 +0100 3.3 @@ -49,6 +49,10 @@ void shadow_domain_init(struct domain *d 3.4 INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]); 3.5 INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist); 3.6 INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows); 3.7 + 3.8 + /* Use shadow pagetables for log-dirty support */ 3.9 + paging_log_dirty_init(d, shadow_enable_log_dirty, 3.10 + shadow_disable_log_dirty, shadow_clean_dirty_bitmap); 3.11 } 3.12 3.13 /* Setup the shadow-specfic parts of a vcpu struct. Note: The most important 3.14 @@ -2453,10 +2457,6 @@ int shadow_enable(struct domain *d, u32 3.15 } 3.16 } 3.17 3.18 - /* initialize log dirty here */ 3.19 - paging_log_dirty_init(d, shadow_enable_log_dirty, 3.20 - shadow_disable_log_dirty, shadow_clean_dirty_bitmap); 3.21 - 3.22 /* Init the P2M table. Must be done before we take the shadow lock 3.23 * to avoid possible deadlock. */ 3.24 if ( mode & PG_translate )