return NULL;
}
-struct iommu *ioapic_to_iommu(unsigned int apic_id)
+struct vtd_iommu *ioapic_to_iommu(unsigned int apic_id)
{
struct acpi_drhd_unit *drhd;
return NULL;
}
-struct iommu *hpet_to_iommu(unsigned int hpet_id)
+struct vtd_iommu *hpet_to_iommu(unsigned int hpet_id)
{
struct acpi_drhd_unit *drhd = hpet_to_drhd(hpet_id);
for_each_drhd_unit ( drhd )
{
const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
- struct iommu *iommu = drhd->iommu;
+ struct vtd_iommu *iommu = drhd->iommu;
if ( ret )
break;
u64 address; /* register base address of the unit */
u16 segment;
u8 include_all:1;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
struct list_head ioapic_list;
struct list_head hpet_list;
};
} while (0)
int vtd_hw_check(void);
-void disable_pmr(struct iommu *iommu);
+void disable_pmr(struct vtd_iommu *iommu);
int is_igd_drhd(struct acpi_drhd_unit *drhd);
#endif /* _DMAR_H_ */
extern const struct iommu_ops intel_iommu_ops;
void print_iommu_regs(struct acpi_drhd_unit *drhd);
-void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn);
+void print_vtd_entries(struct vtd_iommu *iommu, int bus, int devfn, u64 gmfn);
keyhandler_fn_t vtd_dump_iommu_info;
bool intel_iommu_supports_eim(void);
int intel_iommu_enable_eim(void);
void intel_iommu_disable_eim(void);
-int enable_qinval(struct iommu *iommu);
-void disable_qinval(struct iommu *iommu);
-int enable_intremap(struct iommu *iommu, int eim);
-void disable_intremap(struct iommu *iommu);
+int enable_qinval(struct vtd_iommu *iommu);
+void disable_qinval(struct vtd_iommu *iommu);
+int enable_intremap(struct vtd_iommu *iommu, int eim);
+void disable_intremap(struct vtd_iommu *iommu);
void iommu_flush_cache_entry(void *addr, unsigned int size);
void iommu_flush_cache_page(void *addr, unsigned long npages);
int iommu_alloc(struct acpi_drhd_unit *drhd);
void iommu_free(struct acpi_drhd_unit *drhd);
-int iommu_flush_iec_global(struct iommu *iommu);
-int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx);
-void clear_fault_bits(struct iommu *iommu);
+int iommu_flush_iec_global(struct vtd_iommu *iommu);
+int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 im, u16 iidx);
+void clear_fault_bits(struct vtd_iommu *iommu);
-struct iommu *ioapic_to_iommu(unsigned int apic_id);
-struct iommu *hpet_to_iommu(unsigned int hpet_id);
+struct vtd_iommu *ioapic_to_iommu(unsigned int apic_id);
+struct vtd_iommu *hpet_to_iommu(unsigned int hpet_id);
struct acpi_drhd_unit *ioapic_to_drhd(unsigned int apic_id);
struct acpi_drhd_unit *hpet_to_drhd(unsigned int hpet_id);
struct acpi_rhsa_unit *drhd_to_rhsa(const struct acpi_drhd_unit *drhd);
-struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu);
+struct acpi_drhd_unit *find_ats_dev_drhd(struct vtd_iommu *iommu);
int ats_device(const struct pci_dev *, const struct acpi_drhd_unit *);
-int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
+int dev_invalidate_iotlb(struct vtd_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type);
-int __must_check qinval_device_iotlb_sync(struct iommu *iommu,
+int __must_check qinval_device_iotlb_sync(struct vtd_iommu *iommu,
struct pci_dev *pdev,
u16 did, u16 size, u64 addr);
void free_pgtable_maddr(u64 maddr);
void *map_vtd_domain_page(u64 maddr);
void unmap_vtd_domain_page(void *va);
-int domain_context_mapping_one(struct domain *domain, struct iommu *iommu,
+int domain_context_mapping_one(struct domain *domain, struct vtd_iommu *iommu,
u8 bus, u8 devfn, const struct pci_dev *);
-int domain_context_unmap_one(struct domain *domain, struct iommu *iommu,
+int domain_context_unmap_one(struct domain *domain, struct vtd_iommu *iommu,
u8 bus, u8 devfn);
int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
int is_igd_vt_enabled_quirk(void);
void platform_quirks_init(void);
-void vtd_ops_preamble_quirk(struct iommu* iommu);
-void vtd_ops_postamble_quirk(struct iommu* iommu);
+void vtd_ops_preamble_quirk(struct vtd_iommu *iommu);
+void vtd_ops_postamble_quirk(struct vtd_iommu *iommu);
int __must_check me_wifi_quirk(struct domain *domain,
u8 bus, u8 devfn, int map);
void pci_vtd_quirk(const struct pci_dev *);
* present an atomic update to VT-d hardware even when cmpxchg16b
* instruction is not supported.
*/
-static void update_irte(struct iommu *iommu, struct iremap_entry *entry,
+static void update_irte(struct vtd_iommu *iommu, struct iremap_entry *entry,
const struct iremap_entry *new_ire, bool atomic)
{
ASSERT(spin_is_locked(&iommu_ir_ctrl(iommu)->iremap_lock));
}
/* Mark specified intr remap entry as free */
-static void free_remap_entry(struct iommu *iommu, int index)
+static void free_remap_entry(struct vtd_iommu *iommu, int index)
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { };
struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
* Look for a free intr remap entry (or a contiguous set thereof).
* Need hold iremap_lock, and setup returned entry before releasing lock.
*/
-static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr)
+static unsigned int alloc_remap_entry(struct vtd_iommu *iommu, unsigned int nr)
{
struct iremap_entry *iremap_entries = NULL;
struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
}
static int remap_entry_to_ioapic_rte(
- struct iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte)
+ struct vtd_iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte)
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries;
unsigned long flags;
return 0;
}
-static int ioapic_rte_to_remap_entry(struct iommu *iommu,
+static int ioapic_rte_to_remap_entry(struct vtd_iommu *iommu,
int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
unsigned int rte_upper, unsigned int value)
{
int index;
struct IO_xAPIC_route_entry old_rte = { 0 };
int rte_upper = (reg & 1) ? 1 : 0;
- struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
+ struct vtd_iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
if ( !ir_ctrl->iremap_num ||
struct IO_xAPIC_route_entry old_rte = { 0 };
struct IO_APIC_route_remap_entry *remap_rte;
unsigned int rte_upper = (reg & 1) ? 1 : 0;
- struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
+ struct vtd_iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
int saved_mask;
old_rte = __ioapic_read_entry(apic, ioapic_pin, 1);
}
static int remap_entry_to_msi_msg(
- struct iommu *iommu, struct msi_msg *msg, unsigned int index)
+ struct vtd_iommu *iommu, struct msi_msg *msg, unsigned int index)
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries;
struct msi_msg_remap_entry *remap_rte;
}
static int msi_msg_to_remap_entry(
- struct iommu *iommu, struct pci_dev *pdev,
+ struct vtd_iommu *iommu, struct pci_dev *pdev,
struct msi_desc *msi_desc, struct msi_msg *msg)
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { };
int __init intel_setup_hpet_msi(struct msi_desc *msi_desc)
{
- struct iommu *iommu = hpet_to_iommu(msi_desc->hpet_id);
+ struct vtd_iommu *iommu = hpet_to_iommu(msi_desc->hpet_id);
struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
unsigned long flags;
int rc = 0;
return rc;
}
-int enable_intremap(struct iommu *iommu, int eim)
+int enable_intremap(struct vtd_iommu *iommu, int eim)
{
struct ir_ctrl *ir_ctrl;
u32 sts, gcmd;
return init_apic_pin_2_ir_idx();
}
-void disable_intremap(struct iommu *iommu)
+void disable_intremap(struct vtd_iommu *iommu)
{
u32 sts;
u64 irta;
int intel_iommu_enable_eim(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
if ( system_state < SYS_STATE_active && !platform_supports_x2apic() )
return -ENXIO;
static void setup_hwdom_rmrr(struct domain *d);
static int domain_iommu_domid(struct domain *d,
- struct iommu *iommu)
+ struct vtd_iommu *iommu)
{
unsigned long nr_dom, i;
#define DID_HIGH_OFFSET 8
static int context_set_domain_id(struct context_entry *context,
struct domain *d,
- struct iommu *iommu)
+ struct vtd_iommu *iommu)
{
unsigned long nr_dom, i;
int found = 0;
}
static int context_get_domain_id(struct context_entry *context,
- struct iommu *iommu)
+ struct vtd_iommu *iommu)
{
unsigned long dom_index, nr_dom;
int domid = -1;
}
/* context entry handling */
-static u64 bus_to_context_maddr(struct iommu *iommu, u8 bus)
+static u64 bus_to_context_maddr(struct vtd_iommu *iommu, u8 bus)
{
struct root_entry *root, *root_entries;
u64 maddr;
return pte_maddr;
}
-static void iommu_flush_write_buffer(struct iommu *iommu)
+static void iommu_flush_write_buffer(struct vtd_iommu *iommu)
{
u32 val;
unsigned long flags;
u8 function_mask, u64 type,
bool_t flush_non_present_entry)
{
- struct iommu *iommu = (struct iommu *) _iommu;
+ struct vtd_iommu *iommu = _iommu;
u64 val = 0;
unsigned long flags;
return 0;
}
-static int __must_check iommu_flush_context_global(struct iommu *iommu,
+static int __must_check iommu_flush_context_global(struct vtd_iommu *iommu,
bool_t flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
flush_non_present_entry);
}
-static int __must_check iommu_flush_context_device(struct iommu *iommu,
+static int __must_check iommu_flush_context_device(struct vtd_iommu *iommu,
u16 did, u16 source_id,
u8 function_mask,
bool_t flush_non_present_entry)
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
- struct iommu *iommu = (struct iommu *) _iommu;
+ struct vtd_iommu *iommu = _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0;
unsigned long flags;
return 0;
}
-static int __must_check iommu_flush_iotlb_global(struct iommu *iommu,
+static int __must_check iommu_flush_iotlb_global(struct vtd_iommu *iommu,
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
return status;
}
-static int __must_check iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
+static int __must_check iommu_flush_iotlb_dsi(struct vtd_iommu *iommu, u16 did,
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
return status;
}
-static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did,
+static int __must_check iommu_flush_iotlb_psi(struct vtd_iommu *iommu, u16 did,
u64 addr, unsigned int order,
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
static int __must_check iommu_flush_all(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
bool_t flush_dev_iotlb;
int rc = 0;
{
struct domain_iommu *hd = dom_iommu(d);
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
bool_t flush_dev_iotlb;
int iommu_domid;
int rc = 0;
free_pgtable_maddr(pt_maddr);
}
-static int iommu_set_root_entry(struct iommu *iommu)
+static int iommu_set_root_entry(struct vtd_iommu *iommu)
{
u32 sts;
unsigned long flags;
{
u32 sts;
unsigned long flags;
- struct iommu *iommu = drhd->iommu;
+ struct vtd_iommu *iommu = drhd->iommu;
if ( is_igd_drhd(drhd) )
{
disable_pmr(iommu);
}
-static void iommu_disable_translation(struct iommu *iommu)
+static void iommu_disable_translation(struct vtd_iommu *iommu)
{
u32 sts;
unsigned long flags;
}
}
-static int iommu_page_fault_do_one(struct iommu *iommu, int type,
+static int iommu_page_fault_do_one(struct vtd_iommu *iommu, int type,
u8 fault_reason, u16 source_id, u64 addr)
{
const char *reason, *kind;
}
#define PRIMARY_FAULT_REG_LEN (16)
-static void __do_iommu_page_fault(struct iommu *iommu)
+static void __do_iommu_page_fault(struct vtd_iommu *iommu)
{
int reg, fault_index;
u32 fault_status;
static void dma_msi_unmask(struct irq_desc *desc)
{
- struct iommu *iommu = desc->action->dev_id;
+ struct vtd_iommu *iommu = desc->action->dev_id;
unsigned long flags;
u32 sts;
static void dma_msi_mask(struct irq_desc *desc)
{
unsigned long flags;
- struct iommu *iommu = desc->action->dev_id;
+ struct vtd_iommu *iommu = desc->action->dev_id;
u32 sts;
/* mask it */
struct msi_msg msg;
unsigned int dest;
unsigned long flags;
- struct iommu *iommu = desc->action->dev_id;
+ struct vtd_iommu *iommu = desc->action->dev_id;
dest = set_desc_affinity(desc, mask);
if (dest == BAD_APICID){
{
int irq, ret;
struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
- struct iommu *iommu = drhd->iommu;
+ struct vtd_iommu *iommu = drhd->iommu;
struct irq_desc *desc;
irq = create_irq(rhsa ? pxm_to_node(rhsa->proximity_domain)
int __init iommu_alloc(struct acpi_drhd_unit *drhd)
{
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
unsigned long sagaw, nr_dom;
int agaw;
return -ENOMEM;
}
- iommu = xzalloc(struct iommu);
+ iommu = xzalloc(struct vtd_iommu);
if ( iommu == NULL )
return -ENOMEM;
void __init iommu_free(struct acpi_drhd_unit *drhd)
{
- struct iommu *iommu = drhd->iommu;
+ struct vtd_iommu *iommu = drhd->iommu;
if ( iommu == NULL )
return;
int domain_context_mapping_one(
struct domain *domain,
- struct iommu *iommu,
+ struct vtd_iommu *iommu,
u8 bus, u8 devfn, const struct pci_dev *pdev)
{
struct domain_iommu *hd = dom_iommu(domain);
int domain_context_unmap_one(
struct domain *domain,
- struct iommu *iommu,
+ struct vtd_iommu *iommu,
u8 bus, u8 devfn)
{
struct context_entry *context, *context_entries;
struct pci_dev *pdev)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
int ret = 0;
u8 seg = pdev->seg, bus = pdev->bus, tmp_bus, tmp_devfn, secbus;
int found = 0;
int order, int present)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu = NULL;
+ struct vtd_iommu *iommu = NULL;
struct domain_iommu *hd = dom_iommu(d);
bool_t flush_dev_iotlb;
int iommu_domid;
return rc;
}
-static int __init vtd_ept_page_compatible(struct iommu *iommu)
+static int __init vtd_ept_page_compatible(struct vtd_iommu *iommu)
{
u64 ept_cap, vtd_cap = iommu->cap;
return domain_context_mapping(pdev->domain, devfn, pdev);
}
-void clear_fault_bits(struct iommu *iommu)
+void clear_fault_bits(struct vtd_iommu *iommu)
{
u64 val;
unsigned long flags;
static int __must_check init_vtd_hw(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
struct iommu_flush *flush = NULL;
int ret;
unsigned long flags;
static int __init vtd_setup(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
int ret;
if ( list_empty(&acpi_drhd_units) )
static int __must_check vtd_suspend(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
u32 i;
int rc;
static void vtd_crash_shutdown(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
if ( !iommu_enabled )
return;
static void vtd_resume(void)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
u32 i;
unsigned long flags;
struct acpi_drhd_unit *drhd;
};
-struct iommu {
+struct vtd_iommu {
struct list_head list;
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u32 index; /* Sequence number of iommu */
u16 *domid_map; /* domain id mapping array */
};
-static inline struct qi_ctrl *iommu_qi_ctrl(struct iommu *iommu)
+static inline struct qi_ctrl *iommu_qi_ctrl(struct vtd_iommu *iommu)
{
return iommu ? &iommu->intel->qi_ctrl : NULL;
}
-static inline struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu)
+static inline struct ir_ctrl *iommu_ir_ctrl(struct vtd_iommu *iommu)
{
return iommu ? &iommu->intel->ir_ctrl : NULL;
}
-static inline struct iommu_flush *iommu_get_flush(struct iommu *iommu)
+static inline struct iommu_flush *iommu_get_flush(struct vtd_iommu *iommu)
{
return iommu ? &iommu->intel->flush : NULL;
}
#define VTD_QI_TIMEOUT 1
-static int __must_check invalidate_sync(struct iommu *iommu);
+static int __must_check invalidate_sync(struct vtd_iommu *iommu);
-static void print_qi_regs(struct iommu *iommu)
+static void print_qi_regs(struct vtd_iommu *iommu)
{
u64 val;
printk("DMAR_IQT_REG = %"PRIx64"\n", val);
}
-static unsigned int qinval_next_index(struct iommu *iommu)
+static unsigned int qinval_next_index(struct vtd_iommu *iommu)
{
u64 tail;
return tail;
}
-static void qinval_update_qtail(struct iommu *iommu, unsigned int index)
+static void qinval_update_qtail(struct vtd_iommu *iommu, unsigned int index)
{
u64 val;
dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
}
-static int __must_check queue_invalidate_context_sync(struct iommu *iommu,
+static int __must_check queue_invalidate_context_sync(struct vtd_iommu *iommu,
u16 did, u16 source_id,
u8 function_mask,
u8 granu)
return invalidate_sync(iommu);
}
-static int __must_check queue_invalidate_iotlb_sync(struct iommu *iommu,
+static int __must_check queue_invalidate_iotlb_sync(struct vtd_iommu *iommu,
u8 granu, u8 dr, u8 dw,
u16 did, u8 am, u8 ih,
u64 addr)
return invalidate_sync(iommu);
}
-static int __must_check queue_invalidate_wait(struct iommu *iommu,
+static int __must_check queue_invalidate_wait(struct vtd_iommu *iommu,
u8 iflag, u8 sw, u8 fn,
bool_t flush_dev_iotlb)
{
return -EOPNOTSUPP;
}
-static int __must_check invalidate_sync(struct iommu *iommu)
+static int __must_check invalidate_sync(struct vtd_iommu *iommu)
{
struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
return queue_invalidate_wait(iommu, 0, 1, 1, 0);
}
-static int __must_check dev_invalidate_sync(struct iommu *iommu,
+static int __must_check dev_invalidate_sync(struct vtd_iommu *iommu,
struct pci_dev *pdev, u16 did)
{
struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
return rc;
}
-int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev,
+int qinval_device_iotlb_sync(struct vtd_iommu *iommu, struct pci_dev *pdev,
u16 did, u16 size, u64 addr)
{
unsigned long flags;
return dev_invalidate_sync(iommu, pdev, did);
}
-static int __must_check queue_invalidate_iec_sync(struct iommu *iommu,
+static int __must_check queue_invalidate_iec_sync(struct vtd_iommu *iommu,
u8 granu, u8 im, u16 iidx)
{
unsigned long flags;
return ret;
}
-int iommu_flush_iec_global(struct iommu *iommu)
+int iommu_flush_iec_global(struct vtd_iommu *iommu)
{
return queue_invalidate_iec_sync(iommu, IEC_GLOBAL_INVL, 0, 0);
}
-int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
+int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 im, u16 iidx)
{
return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
}
u16 sid, u8 fm, u64 type,
bool_t flush_non_present_entry)
{
- struct iommu *iommu = (struct iommu *)_iommu;
+ struct vtd_iommu *iommu = _iommu;
struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
ASSERT(qi_ctrl->qinval_maddr);
{
u8 dr = 0, dw = 0;
int ret = 0, rc;
- struct iommu *iommu = (struct iommu *)_iommu;
+ struct vtd_iommu *iommu = _iommu;
struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
ASSERT(qi_ctrl->qinval_maddr);
return ret;
}
-int enable_qinval(struct iommu *iommu)
+int enable_qinval(struct vtd_iommu *iommu)
{
struct qi_ctrl *qi_ctrl;
struct iommu_flush *flush;
return 0;
}
-void disable_qinval(struct iommu *iommu)
+void disable_qinval(struct vtd_iommu *iommu)
{
u32 sts;
unsigned long flags;
/*
* force IGD to exit low power mode by accessing a IGD 3D regsiter.
*/
-static int cantiga_vtd_ops_preamble(struct iommu* iommu)
+static int cantiga_vtd_ops_preamble(struct vtd_iommu *iommu)
{
struct intel_iommu *intel = iommu->intel;
struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL;
* parameter to a numerical value enables the quirk and
* sets the timeout to that numerical number of msecs.
*/
-static void snb_vtd_ops_preamble(struct iommu* iommu)
+static void snb_vtd_ops_preamble(struct vtd_iommu *iommu)
{
struct intel_iommu *intel = iommu->intel;
struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL;
*(volatile u32 *)(igd_reg_va + 0x2050) = 0x10001;
}
-static void snb_vtd_ops_postamble(struct iommu* iommu)
+static void snb_vtd_ops_postamble(struct vtd_iommu *iommu)
{
struct intel_iommu *intel = iommu->intel;
struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL;
* call before VT-d translation enable and IOTLB flush operations.
*/
-void vtd_ops_preamble_quirk(struct iommu* iommu)
+void vtd_ops_preamble_quirk(struct vtd_iommu *iommu)
{
cantiga_vtd_ops_preamble(iommu);
if ( snb_igd_timeout != 0 )
/*
* call after VT-d translation enable and IOTLB flush operations.
*/
-void vtd_ops_postamble_quirk(struct iommu* iommu)
+void vtd_ops_postamble_quirk(struct vtd_iommu *iommu)
{
if ( snb_igd_timeout != 0 )
{
#include <asm/io_apic.h>
/* Disable vt-d protected memory registers. */
-void disable_pmr(struct iommu *iommu)
+void disable_pmr(struct vtd_iommu *iommu)
{
u32 val;
unsigned long flags;
void print_iommu_regs(struct acpi_drhd_unit *drhd)
{
- struct iommu *iommu = drhd->iommu;
+ struct vtd_iommu *iommu = drhd->iommu;
u64 cap;
printk("---- print_iommu_regs ----\n");
return gmfn & LEVEL_MASK;
}
-void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn)
+void print_vtd_entries(struct vtd_iommu *iommu, int bus, int devfn, u64 gmfn)
{
struct context_entry *ctxt_entry;
struct root_entry *root_entry;
void vtd_dump_iommu_info(unsigned char key)
{
struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
+ struct vtd_iommu *iommu;
int i;
for_each_drhd_unit ( drhd )
static LIST_HEAD(ats_dev_drhd_units);
-struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
+struct acpi_drhd_unit *find_ats_dev_drhd(struct vtd_iommu *iommu)
{
struct acpi_drhd_unit *drhd;
list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
return pos;
}
-static bool device_in_domain(const struct iommu *iommu,
+static bool device_in_domain(const struct vtd_iommu *iommu,
const struct pci_dev *pdev, uint16_t did)
{
struct root_entry *root_entry;
return found;
}
-int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
+int dev_invalidate_iotlb(struct vtd_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
{
struct pci_dev *pdev, *temp;