const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
zero_page[PAGE_SIZE];
-static void invalidate_shadow_ldt(struct vcpu *v, int flush)
+/*
+ * Flush the LDT, dropping any typerefs. Returns a boolean indicating whether
+ * mappings have been removed (i.e. a TLB flush is needed).
+ */
+static bool invalidate_shadow_ldt(struct vcpu *v)
{
l1_pgentry_t *pl1e;
- unsigned int i;
+ unsigned int i, mappings_dropped = 0;
struct page_info *page;
BUG_ON(unlikely(in_irq()));
if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
goto out;
- v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
pl1e = pv_ldt_ptes(v);
for ( i = 0; i < 16; i++ )
{
if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
continue;
+
page = l1e_get_page(pl1e[i]);
l1e_write(&pl1e[i], l1e_empty());
+ mappings_dropped++;
+
ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
ASSERT_PAGE_IS_DOMAIN(page, v->domain);
put_page_and_type(page);
}
- /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
- if ( flush )
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
+ v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
out:
spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+ return mappings_dropped;
}
(l1e_owner == pg_owner) )
{
for_each_vcpu ( pg_owner, v )
- invalidate_shadow_ldt(v, 1);
+ {
+ if ( invalidate_shadow_ldt(v) )
+ flush_tlb_mask(v->vcpu_dirty_cpumask);
+ }
}
put_page(page);
}
if ( (x & PGT_type_mask) != type )
{
/*
- * On type change we check to flush stale TLB entries. This
- * may be unnecessary (e.g., page was GDT/LDT) but those
- * circumstances should be very rare.
+ * On type change we check to flush stale TLB entries. It is
+ * vital that no other CPUs are left with mappings of a frame
+ * which is about to become writeable to the guest.
*/
cpumask_t *mask = this_cpu(scratch_cpumask);
return rc;
}
- invalidate_shadow_ldt(curr, 0);
+ invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
write_ptbase(curr);
return 0;
return rc;
}
- invalidate_shadow_ldt(curr, 0);
+ invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
fill_ro_mpt(mfn);
else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
(curr->arch.pv_vcpu.ldt_base != ptr) )
{
- invalidate_shadow_ldt(curr, 0);
- flush_tlb_local();
+ if ( invalidate_shadow_ldt(curr) )
+ flush_tlb_local();
+
curr->arch.pv_vcpu.ldt_base = ptr;
curr->arch.pv_vcpu.ldt_ents = ents;
load_LDT(curr);