};
static void __init add_ivrs_mapping_entry(
- uint16_t bdf, uint16_t alias_id, uint8_t flags, bool alloc_irt,
- struct amd_iommu *iommu)
+ uint16_t bdf, uint16_t alias_id, uint8_t flags, unsigned int ext_flags,
+ bool alloc_irt, struct amd_iommu *iommu)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg);
ivrs_mappings[bdf].dte_requestor_id = alias_id;
/* override flags for range of devices */
+ ivrs_mappings[bdf].block_ats = ext_flags & ACPI_IVHD_ATS_DISABLED;
ivrs_mappings[bdf].device_flags = flags;
/* Don't map an IOMMU by itself. */
return 0;
}
- add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, false,
+ add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, 0, false,
iommu);
return sizeof(*select);
AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x\n", first_bdf, last_bdf);
for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
- add_ivrs_mapping_entry(bdf, bdf, range->start.header.data_setting,
+ add_ivrs_mapping_entry(bdf, bdf, range->start.header.data_setting, 0,
false, iommu);
return dev_length;
AMD_IOMMU_DEBUG(" Dev_Id Alias: %#x\n", alias_id);
- add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, true,
+ add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, 0, true,
iommu);
return dev_length;
for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
add_ivrs_mapping_entry(bdf, alias_id, range->alias.header.data_setting,
- true, iommu);
+ 0, true, iommu);
return dev_length;
}
return 0;
}
- add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, false, iommu);
+ add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting,
+ ext->extended_data, false, iommu);
return dev_length;
}
for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
add_ivrs_mapping_entry(bdf, bdf, range->extended.header.data_setting,
- false, iommu);
+ range->extended.extended_data, false, iommu);
return dev_length;
}
AMD_IOMMU_DEBUG("IVHD Special: %pp variety %#x handle %#x\n",
&PCI_SBDF2(seg, bdf), special->variety, special->handle);
- add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, true,
+ add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, 0, true,
iommu);
switch ( special->variety )
int req_id, valid = 1, rc;
u8 bus = pdev->bus;
struct domain_iommu *hd = dom_iommu(domain);
+ const struct ivrs_mappings *ivrs_dev;
if ( QUARANTINE_SKIP(domain) )
return 0;
req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
table = iommu->dev_table.buffer;
dte = &table[req_id];
+ ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id];
spin_lock_irqsave(&iommu->lock, flags);
if ( !dte->v || !dte->tv )
{
- const struct ivrs_mappings *ivrs_dev;
-
/* bind DTE to domain page-tables */
amd_iommu_set_root_page_table(
dte, page_to_maddr(hd->arch.amd.root_table),
domain->domain_id, hd->arch.amd.paging_mode, valid);
/* Undo what amd_iommu_disable_domain_device() may have done. */
- ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id];
if ( dte->it_root )
{
dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED;
dte->sys_mgt = MASK_EXTR(ivrs_dev->device_flags, ACPI_IVHD_SYSTEM_MGMT);
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
+ !ivrs_dev->block_ats &&
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
dte->i = ats_enabled;
ASSERT(pcidevs_locked());
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
+ !ivrs_dev->block_ats &&
+ iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) &&
!pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
{
if ( devfn == pdev->devfn )