ia64/xen-unstable
changeset 2429:6b7809060d4a
bitkeeper revision 1.1159.69.11 (413a3592ceCfqW4DvqOdKq04_bXOig)
Fix our freeing of domain memory when a domain dies.
Fix our freeing of domain memory when a domain dies.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Sat Sep 04 21:37:22 2004 +0000 (2004-09-04) |
parents | e7ad903ca36c |
children | d1f4d611f5ae |
files | xen/arch/x86/domain.c |
line diff
1.1 --- a/xen/arch/x86/domain.c Sat Sep 04 19:58:36 2004 +0000 1.2 +++ b/xen/arch/x86/domain.c Sat Sep 04 21:37:22 2004 +0000 1.3 @@ -459,37 +459,24 @@ static void relinquish_list(struct domai 1.4 /* Use a recursive lock, as we may enter 'free_domheap_page'. */ 1.5 spin_lock_recursive(&d->page_alloc_lock); 1.6 1.7 - /* 1.8 - * Careful! Any time we might decrement a page's reference count we 1.9 - * might invalidate our page pointer or our pointer into the page list. 1.10 - * In such cases we have to exit the current iteration of the loop and 1.11 - * start back at the beginning of the list. We are guaranteed to make 1.12 - * forward progress because nothign will get added to the list (the domain 1.13 - * is dying) and no pages will become pinned after we unpin them. 1.14 - */ 1.15 ent = list->next; 1.16 while ( ent != list ) 1.17 { 1.18 page = list_entry(ent, struct pfn_info, list); 1.19 1.20 - if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) 1.21 + /* Grab a reference to the page so it won't disappear from under us. */ 1.22 + if ( unlikely(!get_page(page, d)) ) 1.23 { 1.24 - /* NB. Check the allocation pin /before/ put_page_and_type()! */ 1.25 - if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 1.26 - put_page(page); 1.27 - put_page_and_type(page); 1.28 - /* May have lost our place in the list - start over. */ 1.29 - ent = list->next; 1.30 + /* Couldn't get a reference -- someone is freeing this page. */ 1.31 + ent = ent->next; 1.32 continue; 1.33 } 1.34 1.35 + if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) 1.36 + put_page_and_type(page); 1.37 + 1.38 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 1.39 - { 1.40 put_page(page); 1.41 - /* May have lost our place in the list - start over. */ 1.42 - ent = list->next; 1.43 - continue; 1.44 - } 1.45 1.46 /* 1.47 * Forcibly invalidate base page tables at this point to break circular 1.48 @@ -498,40 +485,27 @@ static void relinquish_list(struct domai 1.49 * tables are not in use so a non-zero count means circular reference. 1.50 */ 1.51 y = page->u.inuse.type_info; 1.52 - do { 1.53 + for ( ; ; ) 1.54 + { 1.55 x = y; 1.56 if ( likely((x & (PGT_type_mask|PGT_validated)) != 1.57 (PGT_base_page_table|PGT_validated)) ) 1.58 - { 1.59 - /* 1.60 - * We have done no work on this iteration, so it is safe 1.61 - * to move on to the next page in the list. 1.62 - */ 1.63 - ent = ent->next; 1.64 break; 1.65 - } 1.66 + 1.67 y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated); 1.68 if ( likely(y == x) ) 1.69 { 1.70 free_page_type(page, PGT_base_page_table); 1.71 - /* May have lost our place in the list - start over. */ 1.72 - ent = list->next; 1.73 + break; 1.74 } 1.75 } 1.76 - while ( unlikely(y != x) ); 1.77 + 1.78 + /* Follow the list chain and /then/ potentially free the page. */ 1.79 + ent = ent->next; 1.80 + put_page(page); 1.81 } 1.82 1.83 spin_unlock_recursive(&d->page_alloc_lock); 1.84 - 1.85 - /* 1.86 - * Another CPU may have raced us to free some pages. Wait for those 1.87 - * to trickle out now that we have released the lock. 1.88 - */ 1.89 - while ( !list_empty(list) ) 1.90 - { 1.91 - smp_mb(); 1.92 - cpu_relax(); 1.93 - } 1.94 } 1.95 1.96