Both users will want to know IOMMU properties (specifically the IRTE
size) subsequently. Leverage this to avoid pointless calls to the
callback when IVRS mapping table entries are unpopulated. To avoid
leaking interrupt remapping tables (bogusly) allocated for IOMMUs
themselves, this requires suppressing their allocation in the first
place, taking a step further what commit
757122c0cf ('AMD/IOMMU: don't
"add" IOMMUs') had done.
Additionally suppress the call for alias entries, as again both users
don't care about these anyway. In fact this eliminates a fair bit of
redundancy from dump output.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Brian Woods <brian.woods@amd.com>
/* override flags for range of devices */
ivrs_mappings[bdf].device_flags = flags;
- if (ivrs_mappings[alias_id].intremap_table == NULL )
+ /* Don't map an IOMMU by itself. */
+ if ( iommu->bdf == bdf )
+ return;
+
+ if ( !ivrs_mappings[alias_id].intremap_table )
{
/* allocate per-device interrupt remapping table */
if ( amd_iommu_perdev_intremap )
ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse;
}
}
- /* Assign IOMMU hardware, but don't map an IOMMU by itself. */
- ivrs_mappings[bdf].iommu = iommu->bdf != bdf ? iommu : NULL;
+
+ /* Assign IOMMU hardware. */
+ ivrs_mappings[bdf].iommu = iommu;
}
static struct amd_iommu * __init find_iommu_from_bdf_cap(
return rc;
}
-int iterate_ivrs_entries(int (*handler)(u16 seg, struct ivrs_mappings *))
+int iterate_ivrs_entries(int (*handler)(const struct amd_iommu *,
+ struct ivrs_mappings *))
{
u16 seg = 0;
int rc = 0;
break;
seg = IVRS_MAPPINGS_SEG(map);
for ( bdf = 0; !rc && bdf < ivrs_bdf_entries; ++bdf )
- rc = handler(seg, map + bdf);
+ {
+ const struct amd_iommu *iommu = map[bdf].iommu;
+
+ if ( iommu && map[bdf].dte_requestor_id == bdf )
+ rc = handler(iommu, &map[bdf]);
+ }
} while ( !rc && ++seg );
return rc;
}
int __init amd_iommu_free_intremap_table(
- u16 seg, struct ivrs_mappings *ivrs_mapping)
+ const struct amd_iommu *iommu, struct ivrs_mappings *ivrs_mapping)
{
void *tb = ivrs_mapping->intremap_table;
}
}
-static int dump_intremap_mapping(u16 seg, struct ivrs_mappings *ivrs_mapping)
+static int dump_intremap_mapping(const struct amd_iommu *iommu,
+ struct ivrs_mappings *ivrs_mapping)
{
unsigned long flags;
if ( !ivrs_mapping )
return 0;
- printk(" %04x:%02x:%02x:%u:\n", seg,
+ printk(" %04x:%02x:%02x:%u:\n", iommu->seg,
PCI_BUS(ivrs_mapping->dte_requestor_id),
PCI_SLOT(ivrs_mapping->dte_requestor_id),
PCI_FUNC(ivrs_mapping->dte_requestor_id));
struct ivrs_mappings *get_ivrs_mappings(u16 seg);
int iterate_ivrs_mappings(int (*)(u16 seg, struct ivrs_mappings *));
-int iterate_ivrs_entries(int (*)(u16 seg, struct ivrs_mappings *));
+int iterate_ivrs_entries(int (*)(const struct amd_iommu *,
+ struct ivrs_mappings *));
/* iommu tables in guest space */
struct mmio_reg {
/* interrupt remapping */
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(unsigned long **);
-int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
+int amd_iommu_free_intremap_table(
+ const struct amd_iommu *, struct ivrs_mappings *);
void amd_iommu_ioapic_update_ire(
unsigned int apic, unsigned int reg, unsigned int value);
unsigned int amd_iommu_read_ioapic_from_ire(