ia64/xen-unstable
changeset 18693:ba163d6dc986
[IA64] protect ridblock_owner.
protect ridblock_owner by spin lock.
deallocate_rid() is called by arch_domain_destroy() which
is called as rcu callback.
On the other hand allocate_rid() is called from domctl hypercall.
So the access to ridblock_owner is racy.
Protect it by spin lock.
So far probably xend serializes creating domains, so it hasn't
been caused issues.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
protect ridblock_owner by spin lock.
deallocate_rid() is called by arch_domain_destroy() which
is called as rcu callback.
On the other hand allocate_rid() is called from domctl hypercall.
So the access to ridblock_owner is racy.
Protect it by spin lock.
So far probably xend serializes creating domains, so it hasn't
been caused issues.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | Isaku Yamahata <yamahata@valinux.co.jp> |
---|---|
date | Tue Oct 28 14:31:58 2008 +0900 (2008-10-28) |
parents | ecfb1637cef9 |
children | a6b1be5a83de |
files | xen/arch/ia64/xen/regionreg.c |
line diff
1.1 --- a/xen/arch/ia64/xen/regionreg.c Tue Oct 28 12:20:27 2008 +0900 1.2 +++ b/xen/arch/ia64/xen/regionreg.c Tue Oct 28 14:31:58 2008 +0900 1.3 @@ -100,6 +100,7 @@ static unsigned long allocate_metaphysic 1.4 1.5 static int implemented_rid_bits = 0; 1.6 static int mp_rid_shift; 1.7 +static DEFINE_SPINLOCK(ridblock_lock); 1.8 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 }; 1.9 1.10 void __init init_rid_allocator (void) 1.11 @@ -169,6 +170,7 @@ int allocate_rid_range(struct domain *d, 1.12 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS); 1.13 1.14 // skip over block 0, reserved for "meta-physical mappings (and Xen)" 1.15 + spin_lock(&ridblock_lock); 1.16 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) { 1.17 if (ridblock_owner[i] == NULL) { 1.18 for (j = i; j < i + n_rid_blocks; ++j) { 1.19 @@ -182,16 +184,19 @@ int allocate_rid_range(struct domain *d, 1.20 break; 1.21 } 1.22 } 1.23 - 1.24 - if (i >= MAX_RID_BLOCKS) 1.25 + 1.26 + if (i >= MAX_RID_BLOCKS) { 1.27 + spin_unlock(&ridblock_lock); 1.28 return 0; 1.29 - 1.30 + } 1.31 + 1.32 // found an unused block: 1.33 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits) 1.34 // mark this block as owned 1.35 for (j = i; j < i + n_rid_blocks; ++j) 1.36 ridblock_owner[j] = d; 1.37 - 1.38 + spin_unlock(&ridblock_lock); 1.39 + 1.40 // setup domain struct 1.41 d->arch.rid_bits = ridbits; 1.42 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; 1.43 @@ -221,11 +226,12 @@ int deallocate_rid_range(struct domain * 1.44 if (d->arch.rid_bits == 0) 1.45 return 1; 1.46 1.47 - 1.48 + spin_lock(&ridblock_lock); 1.49 for (i = rid_block_start; i < rid_block_end; ++i) { 1.50 ASSERT(ridblock_owner[i] == d); 1.51 ridblock_owner[i] = NULL; 1.52 } 1.53 + spin_unlock(&ridblock_lock); 1.54 1.55 d->arch.rid_bits = 0; 1.56 d->arch.starting_rid = 0;