int enable_intremap(struct vtd_iommu *iommu, int eim);
void disable_intremap(struct vtd_iommu *iommu);
-void iommu_sync_cache(const void *addr, unsigned int size);
int iommu_alloc(struct acpi_drhd_unit *drhd);
void iommu_free(struct acpi_drhd_unit *drhd);
static int iommus_incoherent;
-void iommu_sync_cache(const void *addr, unsigned int size)
+static void sync_cache(const void *addr, unsigned int size)
{
int i;
static unsigned int clflush_size = 0;
vaddr = __map_domain_page(cur_pg);
memset(vaddr, 0, PAGE_SIZE);
- iommu_sync_cache(vaddr, PAGE_SIZE);
+ sync_cache(vaddr, PAGE_SIZE);
unmap_domain_page(vaddr);
cur_pg++;
}
.iotlb_flush_all = iommu_flush_iotlb_all,
.get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
.dump_p2m_table = vtd_dump_p2m_table,
+ .sync_cache = sync_cache,
};
const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
const uint8_t gvec);
+#define iommu_sync_cache(addr, size) ({ \
+ const struct iommu_ops *ops = iommu_get_ops(); \
+ \
+ if ( ops->sync_cache ) \
+ iommu_vcall(ops, sync_cache, addr, size); \
+})
+
#endif /* !__ARCH_X86_IOMMU_H__ */
/*
* Local variables:
int (*setup_hpet_msi)(struct msi_desc *);
int (*adjust_irq_affinities)(void);
+ void (*sync_cache)(const void *addr, unsigned int size);
#endif /* CONFIG_X86 */
int __must_check (*suspend)(void);