ia64/xen-unstable

changeset 11925:cb0d26d68adf

[XEN] Stricter TLB-flush discipline when unshadowing pagetables
It's OK for the guest to see old entries in the TLB, but not for the
shadow fault handler to see them in its linear mappings.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Oct 20 16:06:53 2006 +0100 (2006-10-20)
parents a10ef8002af6
children c3602d217110
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/perfc_defn.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Oct 20 16:01:49 2006 +0100
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Oct 20 16:06:53 2006 +0100
     1.3 @@ -567,13 +567,18 @@ void shadow_prealloc(struct domain *d, u
     1.4  {
     1.5      /* Need a vpcu for calling unpins; for now, since we don't have
     1.6       * per-vcpu shadows, any will do */
     1.7 -    struct vcpu *v = d->vcpu[0];
     1.8 +    struct vcpu *v, *v2;
     1.9      struct list_head *l, *t;
    1.10      struct page_info *pg;
    1.11 +    cpumask_t flushmask = CPU_MASK_NONE;
    1.12      mfn_t smfn;
    1.13  
    1.14      if ( chunk_is_available(d, order) ) return; 
    1.15      
    1.16 +    v = current;
    1.17 +    if ( v->domain != d )
    1.18 +        v = d->vcpu[0];
    1.19 +
    1.20      /* Stage one: walk the list of top-level pages, unpinning them */
    1.21      perfc_incrc(shadow_prealloc_1);
    1.22      list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
    1.23 @@ -592,28 +597,30 @@ void shadow_prealloc(struct domain *d, u
    1.24       * loaded in cr3 on some vcpu.  Walk them, unhooking the non-Xen
    1.25       * mappings. */
    1.26      perfc_incrc(shadow_prealloc_2);
    1.27 -    v = current;
    1.28 -    if ( v->domain != d )
    1.29 -        v = d->vcpu[0];
    1.30 -    /* Walk the list from the tail: recently used toplevels have been pulled
    1.31 -     * to the head */
    1.32      list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
    1.33      {
    1.34          pg = list_entry(l, struct page_info, list);
    1.35          smfn = page_to_mfn(pg);
    1.36          shadow_unhook_mappings(v, smfn);
    1.37  
    1.38 -        /* Need to flush TLB if we've altered our own tables */
    1.39 -        if ( !shadow_mode_external(d) &&
    1.40 -             (pagetable_get_pfn(current->arch.shadow_table[0]) == mfn_x(smfn)
    1.41 -              || pagetable_get_pfn(current->arch.shadow_table[1]) == mfn_x(smfn)
    1.42 -              || pagetable_get_pfn(current->arch.shadow_table[2]) == mfn_x(smfn)
    1.43 -              || pagetable_get_pfn(current->arch.shadow_table[3]) == mfn_x(smfn)
    1.44 -                 ) )
    1.45 -            local_flush_tlb();
    1.46 -        
    1.47 +        /* Remember to flush TLBs: we have removed shadow entries that 
    1.48 +         * were in use by some vcpu(s). */
    1.49 +        for_each_vcpu(d, v2) 
    1.50 +        {
    1.51 +            if ( pagetable_get_pfn(v2->arch.shadow_table[0]) == mfn_x(smfn)
    1.52 +                 || pagetable_get_pfn(v2->arch.shadow_table[1]) == mfn_x(smfn)
    1.53 +                 || pagetable_get_pfn(v2->arch.shadow_table[2]) == mfn_x(smfn) 
    1.54 +                 || pagetable_get_pfn(v2->arch.shadow_table[3]) == mfn_x(smfn)
    1.55 +                )
    1.56 +                cpus_or(flushmask, v2->vcpu_dirty_cpumask, flushmask);
    1.57 +        }
    1.58 +
    1.59          /* See if that freed up a chunk of appropriate size */
    1.60 -        if ( chunk_is_available(d, order) ) return;
    1.61 +        if ( chunk_is_available(d, order) ) 
    1.62 +        {
    1.63 +            flush_tlb_mask(flushmask);
    1.64 +            return;
    1.65 +        }
    1.66      }
    1.67      
    1.68      /* Nothing more we can do: all remaining shadows are of pages that
    1.69 @@ -2216,6 +2223,10 @@ void sh_remove_shadows(struct vcpu *v, m
    1.70          if ( all ) 
    1.71              domain_crash(v->domain);
    1.72      }
    1.73 +
    1.74 +    /* Need to flush TLBs now, so that linear maps are safe next time we 
    1.75 +     * take a fault. */
    1.76 +    flush_tlb_mask(v->domain->domain_dirty_cpumask);
    1.77  }
    1.78  
    1.79  void
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Oct 20 16:01:49 2006 +0100
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Oct 20 16:06:53 2006 +0100
     2.3 @@ -2562,41 +2562,11 @@ static inline void check_for_early_unsha
     2.4           sh_mfn_is_a_page_table(gmfn) )
     2.5      {
     2.6          u32 flags = mfn_to_page(gmfn)->shadow_flags;
     2.7 -        mfn_t smfn;
     2.8          if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
     2.9          {
    2.10              perfc_incrc(shadow_early_unshadow);
    2.11              sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
    2.12 -            return;
    2.13 -        }
    2.14 -        /* SHF_unhooked_mappings is set to make sure we only unhook
    2.15 -         * once in a single batch of updates. It is reset when this
    2.16 -         * top-level page is loaded into CR3 again */
    2.17 -        if ( !(flags & SHF_unhooked_mappings) ) 
    2.18 -        {
    2.19 -            perfc_incrc(shadow_early_unshadow_top);
    2.20 -            mfn_to_page(gmfn)->shadow_flags |= SHF_unhooked_mappings;
    2.21 -            if ( flags & SHF_L2_32 )
    2.22 -            {
    2.23 -                smfn = get_shadow_status(v, gmfn, PGC_SH_l2_32_shadow);
    2.24 -                shadow_unhook_mappings(v, smfn);
    2.25 -            }
    2.26 -            if ( flags & SHF_L2_PAE ) 
    2.27 -            {
    2.28 -                smfn = get_shadow_status(v, gmfn, PGC_SH_l2_pae_shadow);
    2.29 -                shadow_unhook_mappings(v, smfn);
    2.30 -            }
    2.31 -            if ( flags & SHF_L2H_PAE ) 
    2.32 -            {
    2.33 -                smfn = get_shadow_status(v, gmfn, PGC_SH_l2h_pae_shadow);
    2.34 -                shadow_unhook_mappings(v, smfn);
    2.35 -            }
    2.36 -            if ( flags & SHF_L4_64 ) 
    2.37 -            {
    2.38 -                smfn = get_shadow_status(v, gmfn, PGC_SH_l4_64_shadow);
    2.39 -                shadow_unhook_mappings(v, smfn);
    2.40 -            }
    2.41 -        }
    2.42 +        } 
    2.43      }
    2.44      v->arch.shadow.last_emulated_mfn = mfn_x(gmfn);
    2.45  #endif
     3.1 --- a/xen/include/asm-x86/perfc_defn.h	Fri Oct 20 16:01:49 2006 +0100
     3.2 +++ b/xen/include/asm-x86/perfc_defn.h	Fri Oct 20 16:06:53 2006 +0100
     3.3 @@ -76,7 +76,6 @@ PERFCOUNTER_CPU(shadow_writeable_bf,   "
     3.4  PERFCOUNTER_CPU(shadow_mappings,       "shadow removes all mappings")
     3.5  PERFCOUNTER_CPU(shadow_mappings_bf,    "shadow rm-mappings brute-force")
     3.6  PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
     3.7 -PERFCOUNTER_CPU(shadow_early_unshadow_top, "shadow unhooks for fork/exit")
     3.8  PERFCOUNTER_CPU(shadow_unshadow,       "shadow unshadows a page")
     3.9  PERFCOUNTER_CPU(shadow_up_pointer,     "shadow unshadow by up-pointer")
    3.10  PERFCOUNTER_CPU(shadow_unshadow_bf,    "shadow unshadow brute-force")