struct amd_iommu *iommu;
u16 bdf;
struct ivrs_mappings *ivrs_mappings;
+ bool fresh_domid = false;
+ int ret;
if ( !pdev->domain )
return -EINVAL;
AMD_IOMMU_WARN("%pd: unity mapping failed for %pp\n",
pdev->domain, &pdev->sbdf);
- return amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
+ if ( iommu_quarantine && pdev->arch.pseudo_domid == DOMID_INVALID )
+ {
+ pdev->arch.pseudo_domid = iommu_alloc_domid(iommu->domid_map);
+ if ( pdev->arch.pseudo_domid == DOMID_INVALID )
+ return -ENOSPC;
+ fresh_domid = true;
+ }
+
+ ret = amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
+ if ( ret && fresh_domid )
+ {
+ iommu_free_domid(pdev->arch.pseudo_domid, iommu->domid_map);
+ pdev->arch.pseudo_domid = DOMID_INVALID;
+ }
+
+ return ret;
}
static int cf_check amd_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
AMD_IOMMU_WARN("%pd: unity unmapping failed for %pp\n",
pdev->domain, &pdev->sbdf);
+ iommu_free_domid(pdev->arch.pseudo_domid, iommu->domid_map);
+ pdev->arch.pseudo_domid = DOMID_INVALID;
+
if ( amd_iommu_perdev_intremap &&
ivrs_mappings[bdf].dte_requestor_id == bdf &&
ivrs_mappings[bdf].intremap_table )
#include <xen/sched.h>
#include <xen/xmalloc.h>
#include <xen/domain_page.h>
+#include <xen/err.h>
#include <xen/iocap.h>
#include <xen/iommu.h>
#include <xen/numa.h>
{
struct vtd_iommu *iommu;
unsigned int sagaw, agaw = 0, nr_dom;
+ domid_t reserved_domid = DOMID_INVALID;
+ int rc;
iommu = xzalloc(struct vtd_iommu);
if ( iommu == NULL )
nr_dom = cap_ndoms(iommu->cap);
- if ( nr_dom <= DOMID_MASK + cap_caching_mode(iommu->cap) )
+ if ( nr_dom <= DOMID_MASK * 2 + cap_caching_mode(iommu->cap) )
{
/* Allocate domain id (bit) maps. */
iommu->domid_bitmap = xzalloc_array(unsigned long,
/* Don't leave dangling NULL pointers. */
iommu->domid_bitmap = ZERO_BLOCK_PTR;
iommu->domid_map = ZERO_BLOCK_PTR;
+
+ /*
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain id 0. Hence reserve the ID taking up bit/slot 0.
+ */
+ reserved_domid = convert_domid(iommu, 0) ?: DOMID_INVALID;
}
+ iommu->pseudo_domid_map = iommu_init_domid(reserved_domid);
+ rc = -ENOMEM;
+ if ( !iommu->pseudo_domid_map )
+ goto free;
+
return 0;
+
+ free:
+ iommu_free(drhd);
+ return rc;
}
void __init iommu_free(struct acpi_drhd_unit *drhd)
xfree(iommu->domid_bitmap);
xfree(iommu->domid_map);
+ xfree(iommu->pseudo_domid_map);
if ( iommu->msi.irq >= 0 )
destroy_irq(iommu->msi.irq);
return rc ?: pdev && prev_dom;
}
-static int domain_context_unmap(struct domain *d, uint8_t devfn,
- struct pci_dev *pdev);
+static const struct acpi_drhd_unit *domain_context_unmap(
+ struct domain *d, uint8_t devfn, struct pci_dev *pdev);
static int domain_context_mapping(struct domain *domain, u8 devfn,
struct pci_dev *pdev)
const struct acpi_drhd_unit *drhd = acpi_find_matched_drhd_unit(pdev);
const struct acpi_rmrr_unit *rmrr;
paddr_t pgd_maddr = dom_iommu(domain)->arch.vtd.pgd_maddr;
+ domid_t orig_domid = pdev->arch.pseudo_domid;
int ret = 0;
unsigned int i, mode = 0;
uint16_t seg = pdev->seg, bdf;
if ( !drhd )
return -ENODEV;
+ if ( iommu_quarantine && orig_domid == DOMID_INVALID )
+ {
+ pdev->arch.pseudo_domid =
+ iommu_alloc_domid(drhd->iommu->pseudo_domid_map);
+ if ( pdev->arch.pseudo_domid == DOMID_INVALID )
+ return -ENOSPC;
+ }
+
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCIe: map %pp\n",
domain, &PCI_SBDF3(seg, bus, devfn));
if ( !drhd )
return -ENODEV;
+ if ( iommu_quarantine && orig_domid == DOMID_INVALID )
+ {
+ pdev->arch.pseudo_domid =
+ iommu_alloc_domid(drhd->iommu->pseudo_domid_map);
+ if ( pdev->arch.pseudo_domid == DOMID_INVALID )
+ return -ENOSPC;
+ }
+
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCI: map %pp\n",
domain, &PCI_SBDF3(seg, bus, devfn));
if ( !ret && devfn == pdev->devfn )
pci_vtd_quirk(pdev);
+ if ( ret && drhd && orig_domid == DOMID_INVALID )
+ {
+ iommu_free_domid(pdev->arch.pseudo_domid,
+ drhd->iommu->pseudo_domid_map);
+ pdev->arch.pseudo_domid = DOMID_INVALID;
+ }
+
return ret;
}
return rc;
}
-static int domain_context_unmap(struct domain *domain, u8 devfn,
- struct pci_dev *pdev)
+static const struct acpi_drhd_unit *domain_context_unmap(
+ struct domain *domain,
+ uint8_t devfn,
+ struct pci_dev *pdev)
{
const struct acpi_drhd_unit *drhd = acpi_find_matched_drhd_unit(pdev);
struct vtd_iommu *iommu = drhd ? drhd->iommu : NULL;
if ( iommu_debug )
printk(VTDPREFIX "%pd:Hostbridge: skip %pp unmap\n",
domain, &PCI_SBDF3(seg, bus, devfn));
- return is_hardware_domain(domain) ? 0 : -EPERM;
+ return ERR_PTR(is_hardware_domain(domain) ? 0 : -EPERM);
case DEV_TYPE_PCIe_BRIDGE:
case DEV_TYPE_PCIe2PCI_BRIDGE:
case DEV_TYPE_LEGACY_PCI_BRIDGE:
- return 0;
+ return ERR_PTR(0);
case DEV_TYPE_PCIe_ENDPOINT:
if ( !iommu )
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCIe: unmap %pp\n",
case DEV_TYPE_PCI:
if ( !iommu )
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCI: unmap %pp\n",
default:
dprintk(XENLOG_ERR VTDPREFIX, "%pd:unknown(%u): %pp\n",
domain, pdev->type, &PCI_SBDF3(seg, bus, devfn));
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ( !ret && pdev->devfn == devfn &&
!QUARANTINE_SKIP(domain, dom_iommu(domain)->arch.vtd.pgd_maddr) )
check_cleanup_domid_map(domain, pdev, iommu);
- return ret;
+ return drhd;
}
static void cf_check iommu_clear_root_pgtable(struct domain *d)
static int cf_check intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
{
+ const struct acpi_drhd_unit *drhd;
struct acpi_rmrr_unit *rmrr;
u16 bdf;
- int ret, i;
+ unsigned int i;
if ( !pdev->domain )
return -EINVAL;
- ret = domain_context_unmap(pdev->domain, devfn, pdev);
- if ( ret )
- return ret;
+ drhd = domain_context_unmap(pdev->domain, devfn, pdev);
+ if ( IS_ERR(drhd) )
+ return PTR_ERR(drhd);
for_each_rmrr_device ( rmrr, bdf, i )
{
rmrr->end_address, 0);
}
+ if ( drhd )
+ {
+ iommu_free_domid(pdev->arch.pseudo_domid,
+ drhd->iommu->pseudo_domid_map);
+ pdev->arch.pseudo_domid = DOMID_INVALID;
+ }
+
return 0;
}
}
}
else
- ret = domain_context_unmap(source, devfn, pdev);
+ {
+ const struct acpi_drhd_unit *drhd;
+
+ drhd = domain_context_unmap(source, devfn, pdev);
+ ret = IS_ERR(drhd) ? PTR_ERR(drhd) : 0;
+ }
if ( ret )
{
if ( !has_arch_pdevs(target) )
return;
}
+void arch_pci_init_pdev(struct pci_dev *pdev)
+{
+ pdev->arch.pseudo_domid = DOMID_INVALID;
+}
+
+unsigned long *__init iommu_init_domid(domid_t reserve)
+{
+ unsigned long *map;
+
+ if ( !iommu_quarantine )
+ return ZERO_BLOCK_PTR;
+
+ BUILD_BUG_ON(DOMID_MASK * 2U >= UINT16_MAX);
+
+ map = xzalloc_array(unsigned long, BITS_TO_LONGS(UINT16_MAX - DOMID_MASK));
+ if ( map && reserve != DOMID_INVALID )
+ {
+ ASSERT(reserve > DOMID_MASK);
+ __set_bit(reserve & DOMID_MASK, map);
+ }
+
+ return map;
+}
+
+domid_t iommu_alloc_domid(unsigned long *map)
+{
+ /*
+ * This is used uniformly across all IOMMUs, such that on typical
+ * systems we wouldn't re-use the same ID very quickly (perhaps never).
+ */
+ static unsigned int start;
+ unsigned int idx = find_next_zero_bit(map, UINT16_MAX - DOMID_MASK, start);
+
+ ASSERT(pcidevs_locked());
+
+ if ( idx >= UINT16_MAX - DOMID_MASK )
+ idx = find_first_zero_bit(map, UINT16_MAX - DOMID_MASK);
+ if ( idx >= UINT16_MAX - DOMID_MASK )
+ return DOMID_INVALID;
+
+ __set_bit(idx, map);
+
+ start = idx + 1;
+
+ return idx | (DOMID_MASK + 1);
+}
+
+void iommu_free_domid(domid_t domid, unsigned long *map)
+{
+ ASSERT(pcidevs_locked());
+
+ if ( domid == DOMID_INVALID )
+ return;
+
+ ASSERT(domid > DOMID_MASK);
+
+ if ( !__test_and_clear_bit(domid & DOMID_MASK, map) )
+ BUG();
+}
+
int iommu_free_pgtables(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);