ia64/xen-unstable
changeset 11551:affbab70fca2
[XEN] Move mark_dirty() from alloc_page_type() to get_page_type(),
for symmetry with put_page_type(). Limit calls to mark_dirty() in
put_page_type() so that they happen only for page-table pages.
Signed-off-by: Keir Fraser <keir@xensource.com>
for symmetry with put_page_type(). Limit calls to mark_dirty() in
put_page_type() so that they happen only for page-table pages.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Sep 20 18:41:44 2006 +0100 (2006-09-20) |
parents | 6d83c86ebfe9 |
children | 1adaf06e8abc |
files | xen/arch/x86/mm.c |
line diff
1.1 --- a/xen/arch/x86/mm.c Wed Sep 20 18:33:26 2006 +0100 1.2 +++ b/xen/arch/x86/mm.c Wed Sep 20 18:41:44 2006 +0100 1.3 @@ -1490,26 +1490,19 @@ static int mod_l4_entry(l4_pgentry_t *pl 1.4 1.5 int alloc_page_type(struct page_info *page, unsigned long type) 1.6 { 1.7 - int rc; 1.8 - 1.9 switch ( type & PGT_type_mask ) 1.10 { 1.11 case PGT_l1_page_table: 1.12 - rc = alloc_l1_table(page); 1.13 - break; 1.14 + return alloc_l1_table(page); 1.15 case PGT_l2_page_table: 1.16 - rc = alloc_l2_table(page, type); 1.17 - break; 1.18 + return alloc_l2_table(page, type); 1.19 case PGT_l3_page_table: 1.20 - rc = alloc_l3_table(page); 1.21 - break; 1.22 + return alloc_l3_table(page); 1.23 case PGT_l4_page_table: 1.24 - rc = alloc_l4_table(page); 1.25 - break; 1.26 + return alloc_l4_table(page); 1.27 case PGT_gdt_page: 1.28 case PGT_ldt_page: 1.29 - rc = alloc_segdesc_page(page); 1.30 - break; 1.31 + return alloc_segdesc_page(page); 1.32 default: 1.33 printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 1.34 type, page->u.inuse.type_info, 1.35 @@ -1517,15 +1510,7 @@ int alloc_page_type(struct page_info *pa 1.36 BUG(); 1.37 } 1.38 1.39 - /* 1.40 - * A page is dirtied when its type count becomes non-zero. 1.41 - * It is safe to mark dirty here because any PTE modifications in 1.42 - * alloc_l?_table have now happened. The caller has already set the type 1.43 - * and incremented the reference count. 1.44 - */ 1.45 - mark_dirty(page_get_owner(page), page_to_mfn(page)); 1.46 - 1.47 - return rc; 1.48 + return 0; 1.49 } 1.50 1.51 1.52 @@ -1625,11 +1610,11 @@ void put_page_type(struct page_info *pag 1.53 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 1.54 1.55 /* 1.56 - * A page is dirtied when its type count becomes zero. 1.57 + * A page table is dirtied when its type count becomes zero. 1.58 * We cannot set the dirty flag earlier than this because we must wait 1.59 * until the type count has been zeroed by the CMPXCHG above. 1.60 */ 1.61 - if ( unlikely((nx & PGT_count_mask) == 0) ) 1.62 + if ( unlikely((nx & (PGT_validated|PGT_count_mask)) == 0) ) 1.63 mark_dirty(page_get_owner(page), page_to_mfn(page)); 1.64 } 1.65 1.66 @@ -1716,6 +1701,13 @@ int get_page_type(struct page_info *page 1.67 1.68 /* Noone else is updating simultaneously. */ 1.69 __set_bit(_PGT_validated, &page->u.inuse.type_info); 1.70 + 1.71 + /* 1.72 + * A page table is dirtied when its type count becomes non-zero. It is 1.73 + * safe to mark dirty here because any PTE modifications in 1.74 + * alloc_page_type() have now happened. 1.75 + */ 1.76 + mark_dirty(page_get_owner(page), page_to_mfn(page)); 1.77 } 1.78 1.79 return 1;