/* allocate per-device interrupt remapping table */
if ( amd_iommu_perdev_intremap )
ivrs_mappings[alias_id].intremap_table =
- amd_iommu_alloc_intremap_table(
- &ivrs_mappings[alias_id].intremap_inuse);
+ amd_iommu_alloc_intremap_table(
+ iommu,
+ &ivrs_mappings[alias_id].intremap_inuse);
else
{
if ( shared_intremap_table == NULL )
shared_intremap_table = amd_iommu_alloc_intremap_table(
+ iommu,
&shared_intremap_inuse);
ivrs_mappings[alias_id].intremap_table = shared_intremap_table;
ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse;
return 0;
}
-void* __init amd_iommu_alloc_intremap_table(unsigned long **inuse_map)
+void *__init amd_iommu_alloc_intremap_table(
+ const struct amd_iommu *iommu, unsigned long **inuse_map)
{
void *tb;
tb = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
/* interrupt remapping */
int amd_iommu_setup_ioapic_remapping(void);
-void *amd_iommu_alloc_intremap_table(unsigned long **);
+void *amd_iommu_alloc_intremap_table(
+ const struct amd_iommu *, unsigned long **);
int amd_iommu_free_intremap_table(
const struct amd_iommu *, struct ivrs_mappings *);
void amd_iommu_ioapic_update_ire(