return -ENOSYS;
}
+void arch_domain_creation_finished(struct domain *d)
+{
+ /*
+ * To avoid flushing the whole guest RAM on the first Set/Way, we
+ * invalidate the P2M to track what has been accessed.
+ *
+ * This is only turned when IOMMU is not used or the page-table are
+ * not shared because bit[0] (e.g valid bit) unset will result
+ * IOMMU fault that could be not fixed-up.
+ */
+ if ( !iommu_use_hap_pt(d) )
+ p2m_invalidate_root(p2m_get_hostp2m(d));
+}
+
static int is_guest_pv32_psr(uint32_t psr)
{
switch (psr & PSR_MODE_MASK)
p2m->need_flush = true;
}
+/*
+ * Invalidate all entries in the root page-tables. This is
+ * useful to get fault on entry and do an action.
+ */
+void p2m_invalidate_root(struct p2m_domain *p2m)
+{
+ unsigned int i;
+
+ p2m_write_lock(p2m);
+
+ for ( i = 0; i < P2M_ROOT_LEVEL; i++ )
+ p2m_invalidate_table(p2m, page_to_mfn(p2m->root + i));
+
+ p2m_write_unlock(p2m);
+}
+
/*
* Resolve any translation fault due to change in the p2m. This
* includes break-before-make and valid bit cleared.
*/
if ( gfn_eq(start, next_block_gfn) )
{
- mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
+ bool valid;
+
+ mfn = p2m_get_entry(p2m, start, &t, NULL, &order, &valid);
next_block_gfn = gfn_next_boundary(start, order);
- if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
+ if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) || !valid )
{
count++;
start = next_block_gfn;
*/
void p2m_flush_vm(struct vcpu *v)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
int rc;
gfn_t start = _gfn(0);
"P2M has not been correctly cleaned (rc = %d)\n",
rc);
+ /*
+ * Invalidate the p2m to track which page was modified by the guest
+ * between call of p2m_flush_vm().
+ */
+ p2m_invalidate_root(p2m);
+
v->arch.need_flush_to_ram = false;
}
int arch_domain_soft_reset(struct domain *d);
+void arch_domain_creation_finished(struct domain *d);
+
void arch_p2m_set_access_required(struct domain *d, bool access_required);
int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);