* Copyright (c) 2003-2006, K A Fraser
*/
+#include <xen/paging.h>
#include <xen/sched.h>
#include <xen/smp.h>
#include <xen/softirq.h>
raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
skip_clocktick:
- hvm_flush_guest_tlbs();
-
return t2;
}
local_irq_save(flags);
t = pre_flush();
+ hvm_flush_guest_tlbs();
old_cr4 = read_cr4();
ASSERT(!(old_cr4 & X86_CR4_PCIDE) || !(old_cr4 & X86_CR4_PGE));
do_tlb_flush();
}
+ if ( flags & FLUSH_HVM_ASID_CORE )
+ hvm_flush_guest_tlbs();
+
if ( flags & FLUSH_CACHE )
{
const struct cpuinfo_x86 *c = ¤t_cpu_data;
return flags;
}
+
+unsigned int guest_flush_tlb_flags(const struct domain *d)
+{
+ bool shadow = paging_mode_shadow(d);
+ bool asid = is_hvm_domain(d) && (cpu_has_svm || shadow);
+
+ return (shadow ? FLUSH_TLB : 0) | (asid ? FLUSH_HVM_ASID_CORE : 0);
+}
+
+void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask)
+{
+ unsigned int flags = guest_flush_tlb_flags(d);
+
+ if ( flags )
+ flush_mask(mask, flags);
+}
p2m_change_type_range(d, begin_pfn, begin_pfn + nr,
p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
memset(dirty_bitmap, 0xff, size); /* consider all pages dirty */
}
* to be read-only, or via hardware-assisted log-dirty.
*/
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
return 0;
}
* be read-only, or via hardware-assisted log-dirty.
*/
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
/************************************************/
safe_write_pte(p, new);
if ( old_flags & _PAGE_PRESENT )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
paging_unlock(d);
safe_write_pte(p, new);
if (old_flags & _PAGE_PRESENT)
- flush_tlb_mask(p2m->dirty_cpumask);
+ guest_flush_tlb_mask(d, p2m->dirty_cpumask);
paging_unlock(d);
l1_pgentry_t *tab;
unsigned long gfn = 0;
unsigned int i, changed;
+ const struct domain *d = p2m->domain;
if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
return;
- ASSERT(hap_enabled(p2m->domain));
+ ASSERT(hap_enabled(d));
tab = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
for ( changed = i = 0; i < (1 << PAGETABLE_ORDER); ++i )
unmap_domain_page(tab);
if ( changed )
- flush_tlb_mask(p2m->domain->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m,
p2m_unlock(p2m);
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
/*
}
if ( ftlb )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
return 0;
}
/* See if that freed up enough space */
if ( d->arch.paging.shadow.free_pages >= pages )
{
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
return;
}
}
pagetable_get_mfn(v->arch.shadow_table[i]), 0);
/* Make sure everyone sees the unshadowings */
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
void shadow_blow_tables_per_domain(struct domain *d)
if ( unlikely(!cpumask_empty(&mask)) )
{
perfc_incr(shadow_alloc_tlbflush);
- flush_tlb_mask(&mask);
+ guest_flush_tlb_mask(d, &mask);
}
/* Now safe to clear the page for reuse */
clear_domain_page(page_to_mfn(sp));
/* Need to flush TLBs now, so that linear maps are safe next time we
* take a fault. */
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
paging_unlock(d);
}
{
sh_remove_all_shadows_and_parents(d, mfn);
if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
}
}
omfn = mfn_add(omfn, 1);
}
- flush_tlb_mask(&flushmask);
+ guest_flush_tlb_mask(d, &flushmask);
if ( npte )
unmap_domain_page(npte);
}
}
if ( flush_tlb )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
goto out;
out_sl1ma:
}
/* Flush TLBs on all CPUs with dirty vcpu state. */
- flush_tlb_mask(mask);
+ guest_flush_tlb_mask(d, mask);
/* Done. */
for_each_vcpu ( d, v )
if ( rc & SHADOW_SET_FLUSH )
/* Need to flush TLBs to pick up shadow PT changes */
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
if ( rc & SHADOW_SET_ERROR )
{
};
#endif
+/* Helper to perform a local TLB flush. */
+static void sh_flush_local(const struct domain *d)
+{
+ flush_local(guest_flush_tlb_flags(d));
+}
+
/**************************************************************************/
/* Hash table mapping from guest pagetables to shadows
*
perfc_incr(shadow_rm_write_flush_tlb);
smp_wmb();
atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
if ( mfn_to_page(sl1mfn)->u.sh.type
== SH_type_fl1_shadow )
{
- flush_tlb_local();
+ sh_flush_local(v->domain);
return false;
}
* linear pagetable to read a top-level shadow page table entry. But,
* without this change, it would fetch the wrong value due to a stale TLB.
*/
- flush_tlb_local();
+ sh_flush_local(d);
}
* (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 2
if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
/* PAE guests have four shadow_table entries, based on the
}
}
if ( flush )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
/* Now install the new shadows. */
for ( i = 0; i < 4; i++ )
{
}
#elif GUEST_PAGING_LEVELS == 4
if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
{
}
}
if ( flush )
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
/* Remember that we've seen the guest use this interface, so we
* can rely on it using it in future, instead of guessing at
mfn_to_page(gmfn)->pagetable_dying = true;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
- flush_tlb_mask(d->dirty_cpumask);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
}
/* Remember that we've seen the guest use this interface, so we
#define FLUSH_VCPU_STATE 0x1000
/* Flush the per-cpu root page table */
#define FLUSH_ROOT_PGTBL 0x2000
+#if CONFIG_HVM
+ /* Flush all HVM guests linear TLB (using ASID/VPID) */
+#define FLUSH_HVM_ASID_CORE 0x4000
+#else
+#define FLUSH_HVM_ASID_CORE 0
+#endif
/* Flush local TLBs/caches. */
unsigned int flush_area_local(const void *va, unsigned int flags);
return clean_and_invalidate_dcache_va_range(p, size);
}
+unsigned int guest_flush_tlb_flags(const struct domain *d);
+void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask);
+
#endif /* __FLUSHTLB_H__ */