disable_IO_APIC();
hpet_disable();
+ iommu_quiesce();
}
}
pci_disable_msi_all();
disable_IO_APIC();
hpet_disable();
+ iommu_quiesce();
if ( num_online_cpus() > 1 )
{
void cf_check amd_iommu_resume(void);
int __must_check cf_check amd_iommu_suspend(void);
void cf_check amd_iommu_crash_shutdown(void);
+void cf_check amd_iommu_quiesce(void);
static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
{
invalidate_all_domain_pages();
}
}
+
+void cf_check amd_iommu_quiesce(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_amd_iommu ( iommu )
+ {
+ if ( iommu->ctrl.int_cap_xt_en )
+ {
+ iommu->ctrl.int_cap_xt_en = false;
+ writeq(iommu->ctrl.raw,
+ iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ }
+ else
+ amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
+ }
+}
.crash_shutdown = amd_iommu_crash_shutdown,
.get_reserved_device_memory = amd_iommu_get_reserved_device_memory,
.dump_page_tables = amd_dump_page_tables,
+ .quiesce = amd_iommu_quiesce,
};
static const struct iommu_init_ops __initconstrel _iommu_init_ops = {
#endif
}
+void iommu_quiesce(void)
+{
+ const struct iommu_ops *ops;
+
+ if ( !iommu_enabled )
+ return;
+
+ ops = iommu_get_ops();
+ if ( ops->quiesce )
+ iommu_vcall(ops, quiesce);
+}
+
int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
{
const struct iommu_ops *ops;
return rc;
}
+static void cf_check vtd_quiesce(void)
+{
+ const struct acpi_drhd_unit *drhd;
+
+ for_each_drhd_unit ( drhd )
+ {
+ const struct vtd_iommu *iommu = drhd->iommu;
+ uint32_t sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
+
+ /*
+ * Open code dma_msi_mask() to avoid taking the spinlock which could
+ * deadlock if called from crash context.
+ */
+ sts |= DMA_FECTL_IM;
+ dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
+ }
+}
+
static const struct iommu_ops __initconst_cf_clobber vtd_ops = {
.page_sizes = PAGE_SIZE_4K,
.init = intel_iommu_domain_init,
.iotlb_flush = iommu_flush_iotlb,
.get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
.dump_page_tables = vtd_dump_page_tables,
+ .quiesce = vtd_quiesce,
};
const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
*/
int (*dt_xlate)(device_t *dev, const struct dt_phandle_args *args);
#endif
+ /* Inhibit all interrupt generation, to be used at shutdown. */
+ void (*quiesce)(void);
};
/*
int __must_check iommu_suspend(void);
void iommu_resume(void);
void iommu_crash_shutdown(void);
+void iommu_quiesce(void);
int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
int iommu_quarantine_dev_init(device_t *dev);