direct-io.hg
changeset 4864:ab9da433c598
bitkeeper revision 1.1389.19.3 (42832ff7ACb43Qx1ZO4faAq_Dh0ClA)
Make shadow_lock() acquire the recursive per-domain BIGLOCK. This change
is easily reverted at the top of shadow.h. This also fixes a problem
with nested shadow_locking -- this is okay because BIGLOCK is nestable.
Signed-off-by: Keir Fraser <keir@xensource.com>
Make shadow_lock() acquire the recursive per-domain BIGLOCK. This change
is easily reverted at the top of shadow.h. This also fixes a problem
with nested shadow_locking -- this is okay because BIGLOCK is nestable.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu May 12 10:29:11 2005 +0000 (2005-05-12) |
parents | cabc09d5f3b8 |
children | b6186f6b202e |
files | xen/arch/x86/shadow.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/shadow.c Thu May 12 09:28:21 2005 +0000 1.2 +++ b/xen/arch/x86/shadow.c Thu May 12 10:29:11 2005 +0000 1.3 @@ -1217,7 +1217,7 @@ static int shadow_mode_table_op( 1.4 int i, rc = 0; 1.5 struct exec_domain *ed; 1.6 1.7 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.8 + ASSERT(shadow_lock_is_acquired(d)); 1.9 1.10 SH_VLOG("shadow mode table op %lx %lx count %d", 1.11 pagetable_val(d->exec_domain[0]->arch.guest_table), /* XXX SMP */ 1.12 @@ -1813,7 +1813,7 @@ shadow_mark_mfn_out_of_sync(struct exec_ 1.13 struct pfn_info *page = &frame_table[mfn]; 1.14 struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d); 1.15 1.16 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.17 + ASSERT(shadow_lock_is_acquired(d)); 1.18 ASSERT(pfn_valid(mfn)); 1.19 1.20 #ifndef NDEBUG 1.21 @@ -1943,7 +1943,7 @@ int __shadow_out_of_sync(struct exec_dom 1.22 l2_pgentry_t l2e; 1.23 unsigned long l1pfn, l1mfn; 1.24 1.25 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.26 + ASSERT(shadow_lock_is_acquired(d)); 1.27 ASSERT(VALID_M2P(l2pfn)); 1.28 1.29 perfc_incrc(shadow_out_of_sync_calls); 1.30 @@ -2127,7 +2127,7 @@ int shadow_remove_all_write_access( 1.31 u32 found = 0, fixups, write_refs; 1.32 unsigned long prediction, predicted_gpfn, predicted_smfn; 1.33 1.34 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.35 + ASSERT(shadow_lock_is_acquired(d)); 1.36 ASSERT(VALID_MFN(readonly_gmfn)); 1.37 1.38 perfc_incrc(remove_write_access); 1.39 @@ -2245,7 +2245,7 @@ u32 shadow_remove_all_access(struct doma 1.40 if ( unlikely(!shadow_mode_enabled(d)) ) 1.41 return 0; 1.42 1.43 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.44 + ASSERT(shadow_lock_is_acquired(d)); 1.45 perfc_incrc(remove_all_access); 1.46 1.47 for (i = 0; i < shadow_ht_buckets; i++) 1.48 @@ -2287,7 +2287,7 @@ static int resync_all(struct domain *d, 1.49 int unshadow; 1.50 int changed; 1.51 1.52 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.53 + ASSERT(shadow_lock_is_acquired(d)); 1.54 1.55 for ( entry = d->arch.out_of_sync; entry; entry = entry->next) 1.56 { 1.57 @@ -2485,7 +2485,7 @@ void __shadow_sync_all(struct domain *d) 1.58 1.59 perfc_incrc(shadow_sync_all); 1.60 1.61 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.62 + ASSERT(shadow_lock_is_acquired(d)); 1.63 1.64 // First, remove all write permissions to the page tables 1.65 //
2.1 --- a/xen/include/asm-x86/domain.h Thu May 12 09:28:21 2005 +0000 2.2 +++ b/xen/include/asm-x86/domain.h Thu May 12 10:29:11 2005 +0000 2.3 @@ -30,7 +30,7 @@ struct arch_domain 2.4 2.5 /* Shadow mode status and controls. */ 2.6 unsigned int shadow_mode; /* flags to control shadow table operation */ 2.7 - spinlock_t shadow_lock; 2.8 + unsigned int shadow_nest; /* Recursive depth of shadow_lock() nesting */ 2.9 /* Shadow mode has tainted page reference counts? */ 2.10 unsigned int shadow_tainted_refcnts; 2.11
3.1 --- a/xen/include/asm-x86/shadow.h Thu May 12 09:28:21 2005 +0000 3.2 +++ b/xen/include/asm-x86/shadow.h Thu May 12 10:29:11 2005 +0000 3.3 @@ -60,9 +60,45 @@ 3.4 #define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.5 (PERDOMAIN_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))) 3.6 3.7 -#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock) 3.8 -#define shadow_lock(_d) do { ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); spin_lock(&(_d)->arch.shadow_lock); } while (0) 3.9 -#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock) 3.10 +/* 3.11 + * For now we use the per-domain BIGLOCK rather than a shadow-specific lock. 3.12 + * We usually have the BIGLOCK already acquired anyway, so this is unlikely 3.13 + * to cause much unnecessary extra serialisation. Also it's a recursive 3.14 + * lock, and there are some code paths containing nested shadow_lock(). 3.15 + * The #if0'ed code below is therefore broken until such nesting is removed. 3.16 + */ 3.17 +#if 0 3.18 +#define shadow_lock_init(_d) \ 3.19 + spin_lock_init(&(_d)->arch.shadow_lock) 3.20 +#define shadow_lock_is_acquired(_d) \ 3.21 + spin_is_locked(&(_d)->arch.shadow_lock) 3.22 +#define shadow_lock(_d) \ 3.23 +do { \ 3.24 + ASSERT(!shadow_lock_is_acquired(_d)); \ 3.25 + spin_lock(&(_d)->arch.shadow_lock); \ 3.26 +} while (0) 3.27 +#define shadow_unlock(_d) \ 3.28 +do { \ 3.29 + ASSERT(!shadow_lock_is_acquired(_d)); \ 3.30 + spin_unlock(&(_d)->arch.shadow_lock); \ 3.31 +} while (0) 3.32 +#else 3.33 +#define shadow_lock_init(_d) \ 3.34 + ((_d)->arch.shadow_nest = 0) 3.35 +#define shadow_lock_is_acquired(_d) \ 3.36 + (spin_is_locked(&(_d)->big_lock) && ((_d)->arch.shadow_nest != 0)) 3.37 +#define shadow_lock(_d) \ 3.38 +do { \ 3.39 + LOCK_BIGLOCK(_d); \ 3.40 + (_d)->arch.shadow_nest++; \ 3.41 +} while (0) 3.42 +#define shadow_unlock(_d) \ 3.43 +do { \ 3.44 + ASSERT(shadow_lock_is_acquired(_d)); \ 3.45 + (_d)->arch.shadow_nest--; \ 3.46 + UNLOCK_BIGLOCK(_d); \ 3.47 +} while (0) 3.48 +#endif 3.49 3.50 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min)) 3.51 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1)) 3.52 @@ -403,7 +439,7 @@ static inline int __mark_dirty(struct do 3.53 unsigned long pfn; 3.54 int rc = 0; 3.55 3.56 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 3.57 + ASSERT(shadow_lock_is_acquired(d)); 3.58 ASSERT(d->arch.shadow_dirty_bitmap != NULL); 3.59 3.60 if ( !VALID_MFN(mfn) ) 3.61 @@ -1137,7 +1173,7 @@ static inline unsigned long __shadow_sta 3.62 ? __gpfn_to_mfn(d, gpfn) 3.63 : INVALID_MFN); 3.64 3.65 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 3.66 + ASSERT(shadow_lock_is_acquired(d)); 3.67 ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 3.68 ASSERT(stype && !(stype & ~PGT_type_mask)); 3.69 3.70 @@ -1186,7 +1222,7 @@ shadow_max_pgtable_type(struct domain *d 3.71 struct shadow_status *x; 3.72 u32 pttype = PGT_none, type; 3.73 3.74 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 3.75 + ASSERT(shadow_lock_is_acquired(d)); 3.76 ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 3.77 3.78 perfc_incrc(shadow_max_type); 3.79 @@ -1280,7 +1316,7 @@ static inline void delete_shadow_status( 3.80 struct shadow_status *p, *x, *n, *head; 3.81 unsigned long key = gpfn | stype; 3.82 3.83 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 3.84 + ASSERT(shadow_lock_is_acquired(d)); 3.85 ASSERT(!(gpfn & ~PGT_mfn_mask)); 3.86 ASSERT(stype && !(stype & ~PGT_type_mask)); 3.87 3.88 @@ -1362,7 +1398,7 @@ static inline void set_shadow_status( 3.89 3.90 SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype); 3.91 3.92 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 3.93 + ASSERT(shadow_lock_is_acquired(d)); 3.94 3.95 ASSERT(shadow_mode_translate(d) || gpfn); 3.96 ASSERT(!(gpfn & ~PGT_mfn_mask));