bool __read_mostly iommu_snoop = true;
#endif
-static unsigned int __initdata nr_iommus;
+static unsigned int __read_mostly nr_iommus;
static struct iommu_ops vtd_ops;
static struct tasklet vtd_fault_tasklet;
iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->arch.vtd.iommu_bitmap) )
+ if ( !test_bit(iommu->index, hd->arch.vtd.iommu_bitmap) )
continue;
flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
{
struct domain_iommu *hd = dom_iommu(d);
+ hd->arch.vtd.iommu_bitmap = xzalloc_array(unsigned long,
+ BITS_TO_LONGS(nr_iommus));
+ if ( !hd->arch.vtd.iommu_bitmap )
+ return -ENOMEM;
+
hd->arch.vtd.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
return 0;
if ( rc > 0 )
rc = 0;
- set_bit(iommu->index, &hd->arch.vtd.iommu_bitmap);
+ set_bit(iommu->index, hd->arch.vtd.iommu_bitmap);
unmap_vtd_domain_page(context_entries);
if ( !found )
{
- clear_bit(iommu->index, &dom_iommu(domain)->arch.vtd.iommu_bitmap);
+ clear_bit(iommu->index, dom_iommu(domain)->arch.vtd.iommu_bitmap);
cleanup_domid_map(domain, iommu);
}
for_each_drhd_unit ( drhd )
cleanup_domid_map(d, drhd->iommu);
+
+ XFREE(hd->arch.vtd.iommu_bitmap);
}
static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
struct {
uint64_t pgd_maddr; /* io page directory machine address */
unsigned int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
- uint64_t iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
+ unsigned long *iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
} vtd;
/* AMD IOMMU */
struct {