ia64/xen-unstable

changeset 11550:6d83c86ebfe9

merge
author Emmanuel Ackaouy <ack@xensource.com>
date Wed Sep 20 18:33:26 2006 +0100 (2006-09-20)
parents 70fe022d3589 c3b4b9dc23ac
children affbab70fca2
files
line diff
     1.1 --- a/xen/arch/x86/mm.c	Wed Sep 20 18:32:54 2006 +0100
     1.2 +++ b/xen/arch/x86/mm.c	Wed Sep 20 18:33:26 2006 +0100
     1.3 @@ -1490,24 +1490,26 @@ static int mod_l4_entry(l4_pgentry_t *pl
     1.4  
     1.5  int alloc_page_type(struct page_info *page, unsigned long type)
     1.6  {
     1.7 -    struct domain *owner = page_get_owner(page);
     1.8 -
     1.9 -    if ( owner != NULL )
    1.10 -        mark_dirty(owner, page_to_mfn(page));
    1.11 +    int rc;
    1.12  
    1.13      switch ( type & PGT_type_mask )
    1.14      {
    1.15      case PGT_l1_page_table:
    1.16 -        return alloc_l1_table(page);
    1.17 +        rc = alloc_l1_table(page);
    1.18 +        break;
    1.19      case PGT_l2_page_table:
    1.20 -        return alloc_l2_table(page, type);
    1.21 +        rc = alloc_l2_table(page, type);
    1.22 +        break;
    1.23      case PGT_l3_page_table:
    1.24 -        return alloc_l3_table(page);
    1.25 +        rc = alloc_l3_table(page);
    1.26 +        break;
    1.27      case PGT_l4_page_table:
    1.28 -        return alloc_l4_table(page);
    1.29 +        rc = alloc_l4_table(page);
    1.30 +        break;
    1.31      case PGT_gdt_page:
    1.32      case PGT_ldt_page:
    1.33 -        return alloc_segdesc_page(page);
    1.34 +        rc = alloc_segdesc_page(page);
    1.35 +        break;
    1.36      default:
    1.37          printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 
    1.38                 type, page->u.inuse.type_info,
    1.39 @@ -1515,7 +1517,15 @@ int alloc_page_type(struct page_info *pa
    1.40          BUG();
    1.41      }
    1.42  
    1.43 -    return 0;
    1.44 +    /*
    1.45 +     * A page is dirtied when its type count becomes non-zero.
    1.46 +     * It is safe to mark dirty here because any PTE modifications in
    1.47 +     * alloc_l?_table have now happened. The caller has already set the type
    1.48 +     * and incremented the reference count.
    1.49 +     */
    1.50 +    mark_dirty(page_get_owner(page), page_to_mfn(page));
    1.51 +
    1.52 +    return rc;
    1.53  }
    1.54  
    1.55  
    1.56 @@ -1580,7 +1590,6 @@ void free_page_type(struct page_info *pa
    1.57  void put_page_type(struct page_info *page)
    1.58  {
    1.59      unsigned long nx, x, y = page->u.inuse.type_info;
    1.60 -    struct domain *owner = page_get_owner(page);
    1.61  
    1.62   again:
    1.63      do {
    1.64 @@ -1615,16 +1624,13 @@ void put_page_type(struct page_info *pag
    1.65      }
    1.66      while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
    1.67  
    1.68 -    if( likely(owner != NULL) )
    1.69 -    {
    1.70 -        if (shadow_mode_enabled(owner))
    1.71 -        {
    1.72 -            if (shadow_lock_is_acquired(owner))  /* this is a shadow page */
    1.73 -                return;
    1.74 -
    1.75 -            mark_dirty(owner, page_to_mfn(page));
    1.76 -        }
    1.77 -    }
    1.78 +    /*
    1.79 +     * A page is dirtied when its type count becomes zero.
    1.80 +     * We cannot set the dirty flag earlier than this because we must wait
    1.81 +     * until the type count has been zeroed by the CMPXCHG above.
    1.82 +     */
    1.83 +    if ( unlikely((nx & PGT_count_mask) == 0) )
    1.84 +        mark_dirty(page_get_owner(page), page_to_mfn(page));
    1.85  }
    1.86  
    1.87  
    1.88 @@ -1984,6 +1990,7 @@ int do_mmuext_op(
    1.89                  break;
    1.90              }
    1.91  
    1.92 +            /* A page is dirtied when its pin status is set. */
    1.93              mark_dirty(d, mfn);
    1.94             
    1.95              break;
    1.96 @@ -2006,8 +2013,9 @@ int do_mmuext_op(
    1.97                  {
    1.98                      shadow_lock(d);
    1.99                      shadow_remove_all_shadows(v, _mfn(mfn));
   1.100 +                    /* A page is dirtied when its pin status is cleared. */
   1.101 +                    sh_mark_dirty(d, _mfn(mfn));
   1.102                      shadow_unlock(d);
   1.103 -                    mark_dirty(d, mfn);
   1.104                  }
   1.105              }
   1.106              else
     2.1 --- a/xen/include/asm-x86/shadow.h	Wed Sep 20 18:32:54 2006 +0100
     2.2 +++ b/xen/include/asm-x86/shadow.h	Wed Sep 20 18:33:26 2006 +0100
     2.3 @@ -325,12 +325,17 @@ void shadow_final_teardown(struct domain
     2.4  void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
     2.5  static inline void mark_dirty(struct domain *d, unsigned long gmfn)
     2.6  {
     2.7 -    if ( shadow_mode_log_dirty(d) )
     2.8 -    {
     2.9 +    int caller_locked;
    2.10 +
    2.11 +    if ( unlikely(d == NULL) || likely(!shadow_mode_log_dirty(d)) )
    2.12 +        return;
    2.13 +
    2.14 +    caller_locked = shadow_lock_is_acquired(d);
    2.15 +    if ( !caller_locked )
    2.16          shadow_lock(d);
    2.17 -        sh_do_mark_dirty(d, _mfn(gmfn));
    2.18 +    sh_do_mark_dirty(d, _mfn(gmfn));
    2.19 +    if ( !caller_locked )
    2.20          shadow_unlock(d);
    2.21 -    }
    2.22  }
    2.23  
    2.24  /* Internal version, for when the shadow lock is already held */