ia64/xen-unstable

changeset 18320:d96bf4cd0f37

x86: Fix shadow code's handling of p2m superpage changes

When a p2m superpage entry is shattered, it's important not to
unshadow any parts of the 2MB region that are still there afterwards.
Otherwise shattering a superpage that contains the guest's top-level
pagetable will cause the guest to be killed.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 13 12:13:59 2008 +0100 (2008-08-13)
parents 641e10533c89
children 29c242c06ac2
files xen/arch/x86/mm/shadow/common.c
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Wed Aug 13 12:12:08 2008 +0100
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Wed Aug 13 12:13:59 2008 +0100
     1.3 @@ -3357,23 +3357,45 @@ shadow_write_p2m_entry(struct vcpu *v, u
     1.4          }
     1.5      }
     1.6  
     1.7 -    /* If we're removing a superpage mapping from the p2m, remove all the
     1.8 -     * MFNs covered by it from the shadows too. */
     1.9 +    /* If we're removing a superpage mapping from the p2m, we need to check 
    1.10 +     * all the pages covered by it.  If they're still there in the new 
    1.11 +     * scheme, that's OK, but otherwise they must be unshadowed. */
    1.12      if ( level == 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) &&
    1.13           (l1e_get_flags(*p) & _PAGE_PSE) )
    1.14      {
    1.15          unsigned int i;
    1.16 -        mfn_t mfn = _mfn(l1e_get_pfn(*p));
    1.17 +        cpumask_t flushmask;
    1.18 +        mfn_t omfn = _mfn(l1e_get_pfn(*p));
    1.19 +        mfn_t nmfn = _mfn(l1e_get_pfn(new));
    1.20 +        l1_pgentry_t *npte = NULL;
    1.21          p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
    1.22 -        if ( p2m_is_valid(p2mt) && mfn_valid(mfn) )
    1.23 +        if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
    1.24          {
    1.25 +            cpus_clear(flushmask);
    1.26 +
    1.27 +            /* If we're replacing a superpage with a normal L1 page, map it */
    1.28 +            if ( (l1e_get_flags(new) & _PAGE_PRESENT)
    1.29 +                 && !(l1e_get_flags(new) & _PAGE_PSE) 
    1.30 +                 && mfn_valid(nmfn) )
    1.31 +                npte = map_domain_page(mfn_x(nmfn));
    1.32 +            
    1.33              for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    1.34              {
    1.35 -                sh_remove_all_shadows_and_parents(v, mfn);
    1.36 -                if ( sh_remove_all_mappings(v, mfn) )
    1.37 -                    flush_tlb_mask(d->domain_dirty_cpumask);
    1.38 -                mfn = _mfn(mfn_x(mfn) + 1);
    1.39 +                if ( !npte 
    1.40 +                     || !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i])))
    1.41 +                     || l1e_get_pfn(npte[i]) != mfn_x(omfn) )
    1.42 +                {
    1.43 +                    /* This GFN->MFN mapping has gone away */
    1.44 +                    sh_remove_all_shadows_and_parents(v, omfn);
    1.45 +                    if ( sh_remove_all_mappings(v, omfn) )
    1.46 +                        cpus_or(flushmask, flushmask, d->domain_dirty_cpumask);
    1.47 +                }
    1.48 +                omfn = _mfn(mfn_x(omfn) + 1);
    1.49              }
    1.50 +            flush_tlb_mask(flushmask);
    1.51 +            
    1.52 +            if ( npte )
    1.53 +                unmap_domain_page(npte);
    1.54          }
    1.55      }
    1.56