unsigned int order);
void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
daddr_t daddr, unsigned int order);
-void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf);
+void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf,
+ domid_t domid);
void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
_amd_iommu_flush_pages(d, __dfn_to_daddr(dfn), order);
}
-void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf)
+void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf,
+ domid_t domid)
{
invalidate_dev_table_entry(iommu, bdf);
flush_command_buffer(iommu, 0);
+
+ /* Also invalidate IOMMU TLB entries when flushing the DTE. */
+ if ( domid != DOMID_INVALID )
+ {
+ invalidate_iommu_pages(iommu, INV_IOMMU_ALL_PAGES_ADDRESS, domid, 0);
+ flush_command_buffer(iommu, 0);
+ }
}
void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf)
static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
{
- uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id;
+ uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id, prev_domid;
struct amd_iommu_dte *gdte, *mdte, *dte_base;
struct amd_iommu *iommu = NULL;
struct guest_iommu *g_iommu;
req_id = get_dma_requestor_id(iommu->seg, mbdf);
dte_base = iommu->dev_table.buffer;
mdte = &dte_base[req_id];
+ prev_domid = mdte->domain_id;
spin_lock_irqsave(&iommu->lock, flags);
dte_set_gcr3_table(mdte, hdom_id, gcr3_mfn << PAGE_SHIFT, gv, glx);
spin_unlock_irqrestore(&iommu->lock, flags);
- amd_iommu_flush_device(iommu, req_id);
+ amd_iommu_flush_device(iommu, req_id, prev_domid);
return 0;
}
req_id = ivrs_mappings[bdf].dte_requestor_id;
if ( iommu )
{
- amd_iommu_flush_device(iommu, req_id);
+ /*
+ * IOMMU TLB flush performed separately (see
+ * invalidate_all_domain_pages()).
+ */
+ amd_iommu_flush_device(iommu, req_id, DOMID_INVALID);
amd_iommu_flush_intremap(iommu, req_id);
}
}
spin_unlock_irqrestore(&iommu->lock, flags);
- amd_iommu_flush_device(iommu, req_id);
+ /* DTE didn't have DMA translations enabled, do not flush the TLB. */
+ amd_iommu_flush_device(iommu, req_id, DOMID_INVALID);
}
else if ( dte->pt_root != mfn_x(page_to_mfn(root_pg)) )
{
+ domid_t prev_domid = dte->domain_id;
+
/*
* Strictly speaking if the device is the only one with this requestor
* ID, it could be allowed to be re-assigned regardless of unity map
spin_unlock_irqrestore(&iommu->lock, flags);
- amd_iommu_flush_device(iommu, req_id);
+ amd_iommu_flush_device(iommu, req_id, prev_domid);
}
else
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&iommu->lock, flags);
if ( dte->tv || dte->v )
{
+ domid_t prev_domid = dte->domain_id;
+
/* See the comment in amd_iommu_setup_device_table(). */
dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_ABORTED;
smp_wmb();
spin_unlock_irqrestore(&iommu->lock, flags);
- amd_iommu_flush_device(iommu, req_id);
+ amd_iommu_flush_device(iommu, req_id, prev_domid);
AMD_IOMMU_DEBUG("Disable: device id = %#x, "
"domain = %d, paging mode = %d\n",
spin_unlock_irqrestore(&iommu->lock, flags);
- amd_iommu_flush_device(iommu, bdf);
+ /* DTE didn't have DMA translations enabled, do not flush the TLB. */
+ amd_iommu_flush_device(iommu, bdf, DOMID_INVALID);
}
if ( amd_iommu_reserve_domain_unity_map(