direct-io.hg
changeset 11543:3e39cc16dd41
[XEN] Clean up mark_dirty() usage in mm.c some more.
Add clarifying comments. Move mark_dirty() in alloc_page_type()
to end of the function (more correct).
Signed-off-by: Keir Fraser <keir@xensource.com>
Add clarifying comments. Move mark_dirty() in alloc_page_type()
to end of the function (more correct).
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Sep 20 17:58:10 2006 +0100 (2006-09-20) |
parents | 596b0e4fbef4 |
children | c3b4b9dc23ac |
files | xen/arch/x86/mm.c xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/mm.c Wed Sep 20 16:52:15 2006 +0100 1.2 +++ b/xen/arch/x86/mm.c Wed Sep 20 17:58:10 2006 +0100 1.3 @@ -1490,24 +1490,26 @@ static int mod_l4_entry(l4_pgentry_t *pl 1.4 1.5 int alloc_page_type(struct page_info *page, unsigned long type) 1.6 { 1.7 - struct domain *owner = page_get_owner(page); 1.8 - 1.9 - if ( owner != NULL ) 1.10 - mark_dirty(owner, page_to_mfn(page)); 1.11 + int rc; 1.12 1.13 switch ( type & PGT_type_mask ) 1.14 { 1.15 case PGT_l1_page_table: 1.16 - return alloc_l1_table(page); 1.17 + rc = alloc_l1_table(page); 1.18 + break; 1.19 case PGT_l2_page_table: 1.20 - return alloc_l2_table(page, type); 1.21 + rc = alloc_l2_table(page, type); 1.22 + break; 1.23 case PGT_l3_page_table: 1.24 - return alloc_l3_table(page); 1.25 + rc = alloc_l3_table(page); 1.26 + break; 1.27 case PGT_l4_page_table: 1.28 - return alloc_l4_table(page); 1.29 + rc = alloc_l4_table(page); 1.30 + break; 1.31 case PGT_gdt_page: 1.32 case PGT_ldt_page: 1.33 - return alloc_segdesc_page(page); 1.34 + rc = alloc_segdesc_page(page); 1.35 + break; 1.36 default: 1.37 printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 1.38 type, page->u.inuse.type_info, 1.39 @@ -1515,7 +1517,15 @@ int alloc_page_type(struct page_info *pa 1.40 BUG(); 1.41 } 1.42 1.43 - return 0; 1.44 + /* 1.45 + * A page is dirtied when its type count becomes non-zero. 1.46 + * It is safe to mark dirty here because any PTE modifications in 1.47 + * alloc_l?_table have now happened. The caller has already set the type 1.48 + * and incremented the reference count. 1.49 + */ 1.50 + mark_dirty(page_get_owner(page), page_to_mfn(page)); 1.51 + 1.52 + return rc; 1.53 } 1.54 1.55 1.56 @@ -1615,16 +1625,13 @@ void put_page_type(struct page_info *pag 1.57 } 1.58 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 1.59 1.60 - if( likely(owner != NULL) ) 1.61 - { 1.62 - if (shadow_mode_enabled(owner)) 1.63 - { 1.64 - if (shadow_lock_is_acquired(owner)) /* this is a shadow page */ 1.65 - return; 1.66 - 1.67 - mark_dirty(owner, page_to_mfn(page)); 1.68 - } 1.69 - } 1.70 + /* 1.71 + * A page is dirtied when its type count becomes zero. 1.72 + * We cannot set the dirty flag earlier than this because we must wait 1.73 + * until the type count has been zeroed by the CMPXCHG above. 1.74 + */ 1.75 + if ( unlikely((nx & PGT_count_mask) == 0) ) 1.76 + mark_dirty(owner, page_to_mfn(page)); 1.77 } 1.78 1.79 1.80 @@ -1984,6 +1991,7 @@ int do_mmuext_op( 1.81 break; 1.82 } 1.83 1.84 + /* A page is dirtied when its pin status is set. */ 1.85 mark_dirty(d, mfn); 1.86 1.87 break; 1.88 @@ -2006,8 +2014,9 @@ int do_mmuext_op( 1.89 { 1.90 shadow_lock(d); 1.91 shadow_remove_all_shadows(v, _mfn(mfn)); 1.92 + /* A page is dirtied when its pin status is cleared. */ 1.93 + sh_mark_dirty(d, _mfn(mfn)); 1.94 shadow_unlock(d); 1.95 - mark_dirty(d, mfn); 1.96 } 1.97 } 1.98 else
2.1 --- a/xen/include/asm-x86/shadow.h Wed Sep 20 16:52:15 2006 +0100 2.2 +++ b/xen/include/asm-x86/shadow.h Wed Sep 20 17:58:10 2006 +0100 2.3 @@ -325,12 +325,17 @@ void shadow_final_teardown(struct domain 2.4 void sh_do_mark_dirty(struct domain *d, mfn_t gmfn); 2.5 static inline void mark_dirty(struct domain *d, unsigned long gmfn) 2.6 { 2.7 - if ( shadow_mode_log_dirty(d) ) 2.8 - { 2.9 + int caller_locked; 2.10 + 2.11 + if ( unlikely(d == NULL) || likely(!shadow_mode_log_dirty(d)) ) 2.12 + return; 2.13 + 2.14 + caller_locked = shadow_lock_is_acquired(d); 2.15 + if ( !caller_locked ) 2.16 shadow_lock(d); 2.17 - sh_do_mark_dirty(d, _mfn(gmfn)); 2.18 + sh_do_mark_dirty(d, _mfn(gmfn)); 2.19 + if ( !caller_locked ) 2.20 shadow_unlock(d); 2.21 - } 2.22 } 2.23 2.24 /* Internal version, for when the shadow lock is already held */