libxl_device_pci *pcidev,
int rebind)
{
+ libxl_ctx *ctx = libxl__gc_owner(gc);
unsigned dom, bus, dev, func;
char *spath, *driver_path = NULL;
int rc;
}
if ( rc ) {
LOG(WARN, PCI_BDF" already assigned to pciback", dom, bus, dev, func);
- return 0;
+ goto quarantine;
}
/* Check to see if there's already a driver that we need to unbind from */
return ERROR_FAIL;
}
+quarantine:
+ /*
+ * DOMID_IO is just a sentinel domain, without any actual mappings,
+ * so always pass XEN_DOMCTL_DEV_RDM_RELAXED to avoid assignment being
+ * unnecessarily denied.
+ */
+ rc = xc_assign_device(ctx->xch, DOMID_IO, pcidev_encode_bdf(pcidev),
+ XEN_DOMCTL_DEV_RDM_RELAXED);
+ if ( rc < 0 ) {
+ LOG(ERROR, "failed to quarantine "PCI_BDF, dom, bus, dev, func);
+ return ERROR_FAIL;
+ }
+
return 0;
}
libxl_device_pci *pcidev,
int rebind)
{
+ libxl_ctx *ctx = libxl__gc_owner(gc);
int rc;
char *driver_path;
+ /* De-quarantine */
+ rc = xc_deassign_device(ctx->xch, DOMID_IO, pcidev_encode_bdf(pcidev));
+ if ( rc < 0 ) {
+ LOG(ERROR, "failed to de-quarantine "PCI_BDF, pcidev->domain, pcidev->bus,
+ pcidev->dev, pcidev->func);
+ return ERROR_FAIL;
+ }
+
/* Unbind from pciback */
if ( (rc=pciback_dev_is_assigned(gc, pcidev)) < 0 ) {
return ERROR_FAIL;
* Initialise our DOMID_IO domain.
* This domain owns I/O pages that are within the range of the page_info
* array. Mappings occur at the priv of the caller.
+ * Quarantined PCI devices will be associated with this domain.
*/
dom_io = domain_create(DOMID_IO, NULL, false);
if ( IS_ERR(dom_io) )
switch ( op->cmd )
{
+ case XEN_DOMCTL_assign_device:
+ case XEN_DOMCTL_deassign_device:
+ if ( op->domain == DOMID_IO )
+ {
+ d = dom_io;
+ break;
+ }
+ else if ( op->domain == DOMID_INVALID )
+ return -ESRCH;
+ /* fall through */
case XEN_DOMCTL_test_assign_device:
case XEN_DOMCTL_vm_event_op:
if ( op->domain == DOMID_INVALID )
if ( !domctl_lock_acquire() )
{
- if ( d )
+ if ( d && d != dom_io )
rcu_unlock_domain(d);
return hypercall_create_continuation(
__HYPERVISOR_domctl, "h", u_domctl);
domctl_lock_release();
domctl_out_unlock_domonly:
- if ( d )
+ if ( d && d != dom_io )
rcu_unlock_domain(d);
if ( copyback && __copy_to_guest(u_domctl, op, 1) )
u8 bus = pdev->bus;
const struct domain_iommu *hd = dom_iommu(domain);
+ /* dom_io is used as a sentinel for quarantined devices */
+ if ( domain == dom_io )
+ return;
+
BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
!iommu->dev_table.buffer );
int req_id;
u8 bus = pdev->bus;
+ /* dom_io is used as a sentinel for quarantined devices */
+ if ( domain == dom_io )
+ return;
+
BUG_ON ( iommu->dev_table.buffer == NULL );
req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
table = iommu->dev_table.buffer;
ivrs_mappings[req_id].read_permission);
}
- return reassign_device(hardware_domain, d, devfn, pdev);
+ return reassign_device(pdev->domain, d, devfn, pdev);
}
static void deallocate_next_page_table(struct page_info *pg, int level)
break;
}
+ if ( d == dom_io )
+ return -EINVAL;
+
ret = iommu_add_dt_device(dev);
/*
* Ignore "-EEXIST" error code as it would mean that the device is
ret = xsm_deassign_dtdevice(XSM_HOOK, d, dt_node_full_name(dev));
+ if ( d == dom_io )
+ return -EINVAL;
+
ret = iommu_deassign_dt_device(d, dev);
if ( ret )
hd->platform_ops = iommu_get_ops();
ret = hd->platform_ops->init(d);
- if ( ret )
+ if ( ret || is_system_domain(d) )
return ret;
if ( is_hardware_domain(d) )
}
else
{
+ dom_io->options |= XEN_DOMCTL_CDF_iommu;
+ if ( iommu_domain_init(dom_io, 0) )
+ panic("Could not set up quarantine\n");
+
printk(" - Dom0 mode: %s\n",
iommu_hwdom_passthrough ? "Passthrough" :
iommu_hwdom_strict ? "Strict" : "Relaxed");
{
const struct domain_iommu *hd = dom_iommu(d);
struct pci_dev *pdev;
+ struct domain *target;
int ret = 0;
if ( !is_iommu_enabled(d) )
if ( !pdev )
return -ENODEV;
+ /* De-assignment from dom_io should de-quarantine the device */
+ target = (pdev->quarantine && pdev->domain != dom_io) ?
+ dom_io : hardware_domain;
+
while ( pdev->phantom_stride )
{
devfn += pdev->phantom_stride;
if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
break;
- ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn,
+ ret = hd->platform_ops->reassign_device(d, target, devfn,
pci_to_dev(pdev));
if ( !ret )
continue;
}
devfn = pdev->devfn;
- ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn,
+ ret = hd->platform_ops->reassign_device(d, target, devfn,
pci_to_dev(pdev));
if ( ret )
{
return ret;
}
+ if ( pdev->domain == hardware_domain )
+ pdev->quarantine = false;
+
pdev->fault.count = 0;
return ret;
return hd->platform_ops->remove_device(pdev->devfn, pci_to_dev(pdev));
}
-/*
- * If the device isn't owned by the hardware domain, it means it already
- * has been assigned to other domain, or it doesn't exist.
- */
static int device_assigned(u16 seg, u8 bus, u8 devfn)
{
struct pci_dev *pdev;
+ int rc = 0;
pcidevs_lock();
- pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
+
+ pdev = pci_get_pdev(seg, bus, devfn);
+
+ if ( !pdev )
+ rc = -ENODEV;
+ /*
+ * If the device exists and it is not owned by either the hardware
+ * domain or dom_io then it must be assigned to a guest, or be
+ * hidden (owned by dom_xen).
+ */
+ else if ( pdev->domain != hardware_domain &&
+ pdev->domain != dom_io )
+ rc = -EBUSY;
+
pcidevs_unlock();
- return pdev ? 0 : -EBUSY;
+ return rc;
}
static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
- if ( unlikely((is_hvm_domain(d) &&
+ if ( d != dom_io &&
+ unlikely((is_hvm_domain(d) &&
d->arch.hvm.mem_sharing_enabled) ||
vm_event_check_ring(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty) )
if ( !pcidevs_trylock() )
return -ERESTART;
- pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
+ pdev = pci_get_pdev(seg, bus, devfn);
+
+ rc = -ENODEV;
if ( !pdev )
- {
- rc = pci_get_pdev(seg, bus, devfn) ? -EBUSY : -ENODEV;
goto done;
- }
+
+ rc = 0;
+ if ( d == pdev->domain )
+ goto done;
+
+ rc = -EBUSY;
+ if ( pdev->domain != hardware_domain &&
+ pdev->domain != dom_io )
+ goto done;
if ( pdev->msix )
{
}
done:
+ /* The device is assigned to dom_io so mark it as quarantined */
+ if ( !rc && d == dom_io )
+ pdev->quarantine = true;
+
pcidevs_unlock();
return rc;
ret = hypercall_create_continuation(__HYPERVISOR_domctl,
"h", u_domctl);
else if ( ret )
- printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: "
+ printk(XENLOG_G_ERR
"assign %04x:%02x:%02x.%u to dom%d failed (%d)\n",
seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
d->domain_id, ret);
int agaw, rc, ret;
bool_t flush_dev_iotlb;
+ /* dom_io is used as a sentinel for quarantined devices */
+ if ( domain == dom_io )
+ return 0;
+
ASSERT(pcidevs_locked());
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
int iommu_domid, rc, ret;
bool_t flush_dev_iotlb;
+ /* dom_io is used as a sentinel for quarantined devices */
+ if ( domain == dom_io )
+ return 0;
+
ASSERT(pcidevs_locked());
spin_lock(&iommu->lock);
goto out;
}
+ /* dom_io is used as a sentinel for quarantined devices */
+ if ( domain == dom_io )
+ goto out;
+
/*
* if no other devices under the same iommu owned by this domain,
* clear iommu in iommu_bitmap and clear domain_id in domid_bitmp
if ( ret )
return ret;
+ if ( devfn == pdev->devfn && pdev->domain != dom_io )
+ {
+ list_move(&pdev->domain_list, &dom_io->pdev_list);
+ pdev->domain = dom_io;
+ }
+
+ if ( !has_arch_pdevs(source) )
+ vmx_pi_hooks_deassign(source);
+
if ( !has_arch_pdevs(target) )
vmx_pi_hooks_assign(target);
return ret;
}
- if ( devfn == pdev->devfn )
+ if ( devfn == pdev->devfn && pdev->domain != target )
{
list_move(&pdev->domain_list, &target->pdev_list);
pdev->domain = target;
}
- if ( !has_arch_pdevs(source) )
- vmx_pi_hooks_deassign(source);
-
return ret;
}
static int intel_iommu_assign_device(
struct domain *d, u8 devfn, struct pci_dev *pdev, u32 flag)
{
+ struct domain *s = pdev->domain;
struct acpi_rmrr_unit *rmrr;
int ret = 0, i;
u16 bdf, seg;
}
}
- ret = reassign_device_ownership(hardware_domain, d, devfn, pdev);
- if ( ret )
+ ret = reassign_device_ownership(s, d, devfn, pdev);
+ if ( ret || d == dom_io )
return ret;
/* Setup rmrr identity mapping */
ret = rmrr_identity_mapping(d, 1, rmrr, flag);
if ( ret )
{
- reassign_device_ownership(d, hardware_domain, devfn, pdev);
+ int rc;
+
+ rc = reassign_device_ownership(d, s, devfn, pdev);
printk(XENLOG_G_ERR VTDPREFIX
" cannot map reserved region (%"PRIx64",%"PRIx64"] for Dom%d (%d)\n",
rmrr->base_address, rmrr->end_address,
d->domain_id, ret);
+ if ( rc )
+ {
+ printk(XENLOG_ERR VTDPREFIX
+ " failed to reclaim %04x:%02x:%02x.%u from %pd (%d)\n",
+ seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d, rc);
+ domain_crash(d);
+ }
break;
}
}
nodeid_t node; /* NUMA node */
+ /* Device to be quarantined, don't automatically re-assign to dom0 */
+ bool quarantine;
+
/* Device with errata, ignore the BARs. */
bool ignore_bars;