{
struct amd_iommu *iommu, *next;
+ /* free interrupt remapping table */
+ if ( amd_iommu_perdev_intremap )
+ iterate_ivrs_entries(amd_iommu_free_intremap_table);
+ else if ( shared_intremap_table )
+ amd_iommu_free_intremap_table(list_first_entry(&amd_iommu_head,
+ struct amd_iommu,
+ list),
+ NULL);
+
/* free amd iommu list */
list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
{
xfree(iommu);
}
- /* free interrupt remapping table */
- iterate_ivrs_entries(amd_iommu_free_intremap_table);
-
/* free device table */
deallocate_device_table(&device_table);
int __init amd_iommu_free_intremap_table(
const struct amd_iommu *iommu, struct ivrs_mappings *ivrs_mapping)
{
- void *tb = ivrs_mapping->intremap_table;
+ void **tblp;
- XFREE(ivrs_mapping->intremap_inuse);
+ if ( ivrs_mapping )
+ {
+ XFREE(ivrs_mapping->intremap_inuse);
+ tblp = &ivrs_mapping->intremap_table;
+ }
+ else
+ {
+ XFREE(shared_intremap_inuse);
+ tblp = &shared_intremap_table;
+ }
- if ( tb )
+ if ( *tblp )
{
- __free_amd_iommu_tables(tb, intremap_table_order(iommu));
- ivrs_mapping->intremap_table = NULL;
+ __free_amd_iommu_tables(*tblp, intremap_table_order(iommu));
+ *tblp = NULL;
}
return 0;