write_atomic(&entryptr->epte, new.epte);
+ /*
+ * The recalc field on the EPT is used to signal either that a
+ * recalculation of the EMT field is required (which doesn't effect the
+ * IOMMU), or a type change. Type changes can only be between ram_rw,
+ * logdirty and ioreq_server: changes to/from logdirty won't work well with
+ * an IOMMU anyway, as IOMMU #PFs are not synchronous and will lead to
+ * aborts, and changes to/from ioreq_server are already fully flushed
+ * before returning to guest context (see
+ * XEN_DMOP_map_mem_type_to_ioreq_server).
+ */
+ if ( !new.recalc && iommu_use_hap_pt(p2m->domain) )
+ iommu_sync_cache(entryptr, sizeof(*entryptr));
+
return 0;
}
break;
}
+ if ( iommu_use_hap_pt(p2m->domain) )
+ iommu_sync_cache(table, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
+
unmap_domain_page(table);
/* Even failed we should install the newly allocated ept page. */
if ( !next )
return GUEST_TABLE_MAP_FAILED;
+ if ( iommu_use_hap_pt(p2m->domain) )
+ iommu_sync_cache(next, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
+
rc = atomic_write_ept_entry(p2m, ept_entry, e, next_level);
ASSERT(rc == 0);
}
need_modify_vtd_table )
{
if ( iommu_use_hap_pt(d) )
- rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
+ rc = iommu_iotlb_flush(d, _dfn(gfn), (1u << order),
+ (iommu_flags ? IOMMU_FLUSHF_added : 0) |
+ (vtd_pte_present ? IOMMU_FLUSHF_modified
+ : 0));
else if ( need_iommu_pt_sync(d) )
rc = iommu_flags ?
iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
return 0;
}
-int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
- int order, int present)
-{
- struct acpi_drhd_unit *drhd;
- struct vtd_iommu *iommu = NULL;
- struct domain_iommu *hd = dom_iommu(d);
- bool_t flush_dev_iotlb;
- int iommu_domid;
- int rc = 0;
-
- iommu_sync_cache(pte, sizeof(struct dma_pte));
-
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
- continue;
-
- flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
- iommu_domid= domain_iommu_domid(d, iommu);
- if ( iommu_domid == -1 )
- continue;
-
- rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
- __dfn_to_daddr(dfn),
- order, !present, flush_dev_iotlb);
- if ( rc > 0 )
- {
- iommu_flush_write_buffer(iommu);
- rc = 0;
- }
- }
-
- if ( unlikely(rc) )
- {
- if ( !d->is_shutting_down && printk_ratelimit() )
- printk(XENLOG_ERR VTDPREFIX
- " d%d: IOMMU pages flush failed: %d\n",
- d->domain_id, rc);
-
- if ( !is_hardware_domain(d) )
- domain_crash(d);
- }
-
- return rc;
-}
-
static int __init vtd_ept_page_compatible(struct vtd_iommu *iommu)
{
u64 ept_cap, vtd_cap = iommu->cap;
: 0;
}
-/* While VT-d specific, this must get declared in a generic header. */
-int __must_check iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
- int order, int present);
-
static inline bool iommu_supports_x2apic(void)
{
return iommu_init_ops && iommu_init_ops->supports_x2apic