It is one of the new architectural commands supported by iommu v2.
It instructs iommu to clear all address translation and interrupt
remapping caches for all devices and all domains.
Signed-off-by: Wei Wang <wei.wang2@amd.com>
Committed-by: Keir Fraser <keir@xen.org>
send_iommu_command(iommu, cmd);
}
+void invalidate_iommu_all(struct amd_iommu *iommu)
+{
+ u32 cmd[4], entry;
+
+ cmd[3] = cmd[2] = cmd[0] = 0;
+
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_ALL, 0,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+ cmd[1] = entry;
+
+ send_iommu_command(iommu, cmd);
+}
+
void amd_iommu_flush_iotlb(struct pci_dev *pdev,
uint64_t gaddr, unsigned int order)
{
invalidate_interrupt_table(iommu, bdf);
flush_command_buffer(iommu);
}
+
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
+{
+ ASSERT( spin_is_locked(&iommu->lock) );
+
+ invalidate_iommu_all(iommu);
+ flush_command_buffer(iommu);
+}
set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
+ if ( iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) )
+ amd_iommu_flush_all_caches(iommu);
+
iommu->enabled = 1;
spin_unlock_irqrestore(&iommu->lock, flags);
}
/* flush all cache entries after iommu re-enabled */
- invalidate_all_devices();
- invalidate_all_domain_pages();
+ if ( !iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) )
+ {
+ invalidate_all_devices();
+ invalidate_all_domain_pages();
+ }
}
#define IOMMU_CMD_INVALIDATE_IOMMU_PAGES 0x3
#define IOMMU_CMD_INVALIDATE_IOTLB_PAGES 0x4
#define IOMMU_CMD_INVALIDATE_INT_TABLE 0x5
+#define IOMMU_CMD_INVALIDATE_IOMMU_ALL 0x8
/* COMPLETION_WAIT command */
#define IOMMU_COMP_WAIT_DATA_BUFFER_SIZE 8
unsigned int order);
void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf);
void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
/* find iommu for bdf */
struct amd_iommu *find_iommu_for_device(int seg, int bdf);