if ( DMA_TLB_IAIG(val) == 0 )
dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: flush IOTLB failed\n");
- if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
- dprintk(XENLOG_INFO VTDPREFIX,
- "IOMMU: tlb flush request %x, actual %x\n",
- (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
/* flush context entry will implictly flush write buffer */
return 0;
}
unmap_vtd_domain_page(context_entries);
/* Context entry was previously non-present (with domid 0). */
- iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT, 1);
- if ( iommu_flush_iotlb_dsi(iommu, 0, 1) )
+ if ( iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT, 1) )
iommu_flush_write_buffer(iommu);
+ else
+ iommu_flush_iotlb_dsi(iommu, 0, 1);
set_bit(iommu->index, &hd->iommu_bitmap);
spin_unlock_irqrestore(&iommu->lock, flags);
context_clear_present(*context);
context_clear_entry(*context);
iommu_flush_cache_entry(context);
- iommu_flush_context_domain(iommu, domain_iommu_domid(domain), 0);
- iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
+
+ if ( iommu_flush_context_domain(iommu, domain_iommu_domid(domain), 0) )
+ iommu_flush_write_buffer(iommu);
+ else
+ iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
+
unmap_vtd_domain_page(context_entries);
spin_unlock_irqrestore(&iommu->lock, flags);