ia64/xen-unstable

changeset 16310:46f91ed0f7d1

Live migration with MMIO pages: fix shadow pte propagation.
Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir@xensource.com>
date Fri Nov 02 10:37:59 2007 +0000 (2007-11-02)
parents 3e397fa3a6ad
children e11b24680480 db9f62d8f7f4
files xen/arch/x86/mm/shadow/multi.c
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Nov 02 10:22:22 2007 +0000
     1.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Nov 02 10:37:59 2007 +0000
     1.3 @@ -697,7 +697,8 @@ static always_inline void
     1.4      /* N.B. For pass-through MMIO, either this test needs to be relaxed,
     1.5       * and shadow_set_l1e() trained to handle non-valid MFNs (ugh), or the
     1.6       * MMIO areas need to be added to the frame-table to make them "valid". */
     1.7 -    if ( !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
     1.8 +    if ( shadow_mode_refcounts(d) && 
     1.9 +         !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
    1.10      {
    1.11          ASSERT((ft == ft_prefetch));
    1.12          *sp = shadow_l1e_empty();
    1.13 @@ -713,6 +714,8 @@ static always_inline void
    1.14                         _PAGE_RW | _PAGE_PRESENT);
    1.15      if ( guest_supports_nx(v) )
    1.16          pass_thru_flags |= _PAGE_NX_BIT;
    1.17 +    if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
    1.18 +        pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
    1.19      sflags = gflags & pass_thru_flags;
    1.20  
    1.21      /* Only change memory caching type for pass-through domain */
    1.22 @@ -765,10 +768,12 @@ static always_inline void
    1.23      // p2m_ram_logdirty p2m type: only HAP uses that.)
    1.24      if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )
    1.25      {
    1.26 -        if ( ft & FETCH_TYPE_WRITE ) 
    1.27 -            paging_mark_dirty(d, mfn_x(target_mfn));
    1.28 -        else if ( !sh_mfn_is_dirty(d, target_mfn) )
    1.29 -            sflags &= ~_PAGE_RW;
    1.30 +        if ( mfn_valid(target_mfn) ) {
    1.31 +            if ( ft & FETCH_TYPE_WRITE ) 
    1.32 +                paging_mark_dirty(d, mfn_x(target_mfn));
    1.33 +            else if ( !sh_mfn_is_dirty(d, target_mfn) )
    1.34 +                sflags &= ~_PAGE_RW;
    1.35 +        }
    1.36      }
    1.37  
    1.38      /* Read-only memory */
    1.39 @@ -2843,7 +2848,8 @@ static int sh_page_fault(struct vcpu *v,
    1.40      gfn = guest_l1e_get_gfn(gw.eff_l1e);
    1.41      gmfn = gfn_to_mfn(d, gfn, &p2mt);
    1.42  
    1.43 -    if ( !p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn)) )
    1.44 +    if ( shadow_mode_refcounts(d) && 
    1.45 +         (!p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) )
    1.46      {
    1.47          perfc_incr(shadow_fault_bail_bad_gfn);
    1.48          SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",