ret = -ESRCH;
if ( iommu_enabled )
{
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pt_irq_create_bind(d, bind);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
if ( ret < 0 )
printk(XENLOG_G_ERR "pt_irq_create_bind failed (%ld) for dom%d\n",
if ( iommu_enabled )
{
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pt_irq_destroy_bind(d, bind);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
if ( ret < 0 )
printk(XENLOG_G_ERR "pt_irq_destroy_bind failed (%ld) for dom%d\n",
struct msixtbl_entry *entry, *new_entry;
int r = -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
ASSERT(spin_is_locked(&d->event_lock));
if ( !has_vlapic(d) )
struct pci_dev *pdev;
struct msixtbl_entry *entry;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
ASSERT(spin_is_locked(&d->event_lock));
if ( !has_vlapic(d) )
struct pci_dev *pdev;
unsigned int nr = 0;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
ret = -ENODEV;
if ( !cpu_has_apic )
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
ASSERT(spin_is_locked(&d->event_lock));
info = pirq_info(d, pirq);
{
int i;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
spin_lock(&d->event_lock);
for ( i = 0; i < d->nr_pirqs; i++ )
unmap_domain_pirq(d, i);
spin_unlock(&d->event_lock);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
static void dump_irqs(unsigned char key)
u8 slot = PCI_SLOT(dev->devfn);
u8 func = PCI_FUNC(dev->devfn);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
if ( !pos )
return -ENODEV;
u8 func = PCI_FUNC(dev->devfn);
bool_t maskall = msix->host_maskall;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
/*
struct pci_dev *pdev;
struct msi_desc *old_desc;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
if ( !pdev )
return -ENODEV;
u8 func = PCI_FUNC(msi->devfn);
struct msi_desc *old_desc;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
pos = pci_find_cap_offset(msi->seg, msi->bus, slot, func, PCI_CAP_ID_MSIX);
if ( !pdev || !pos )
if ( !pos )
return -ENODEV;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(seg, bus, devfn);
if ( !pdev )
rc = -ENODEV;
rc = msix_capability_init(pdev, pos, NULL, NULL,
multi_msix_capable(control));
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
*/
int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc)
{
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
if ( !use_msi )
return -EPERM;
unsigned int type = 0, pos = 0;
u16 control = 0;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
if ( !use_msi )
return -EOPNOTSUPP;
if ( reg < 64 || reg >= 256 )
return 0;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
if ( pdev )
rc = pci_msi_conf_write_intercept(pdev, reg, size, data);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
goto free_domain;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
/* Verify or get pirq. */
spin_lock(&d->event_lock);
pirq = domain_irq_to_pirq(d, irq);
done:
spin_unlock(&d->event_lock);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( ret != 0 )
switch ( type )
{
goto free_domain;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, pirq);
spin_unlock(&d->event_lock);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
free_domain:
rcu_unlock_domain(d);
if ( copy_from_guest(&restore_msi, arg, 1) != 0 )
break;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(0, restore_msi.bus, restore_msi.devfn);
ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
break;
}
if ( copy_from_guest(&dev, arg, 1) != 0 )
break;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
break;
}
break;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
if ( !pdev )
node = XEN_INVALID_DEV;
node = XEN_INVALID_NODE_ID;
else
node = pdev->node;
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( copy_to_guest_offset(ti->nodes, i, &node, 1) )
{
bus = PCI_BUS(device_id);
devfn = PCI_DEVFN2(device_id);
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_real_pdev(iommu->seg, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( pdev )
guest_iommu_add_ppr_log(pdev->domain, entry);
return 0;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf),
PCI_DEVFN2(iommu->bdf));
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !iommu->msi.dev )
{
AMD_IOMMU_DEBUG("IOMMU: no pdev for %04x:%02x:%02x.%u\n",
hd->arch.paging_mode = level;
hd->arch.root_table = new_root;
- if ( !spin_is_locked(&pcidevs_lock) )
+ if ( !pcidevs_locked() )
AMD_IOMMU_DEBUG("%s Try to access pdev_list "
"without aquiring pcidevs_lock.\n", __func__);
spin_unlock_irqrestore(&iommu->lock, flags);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
!pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
}
spin_unlock_irqrestore(&iommu->lock, flags);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
if ( devfn == pdev->devfn &&
pci_ats_device(iommu->seg, bus, devfn) &&
} bus2bridge[MAX_BUSES];
};
-spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t _pcidevs_lock = SPIN_LOCK_UNLOCKED;
+
+void pcidevs_lock(void)
+{
+ spin_lock_recursive(&_pcidevs_lock);
+}
+
+void pcidevs_unlock(void)
+{
+ spin_unlock_recursive(&_pcidevs_lock);
+}
+
+bool_t pcidevs_locked(void)
+{
+ return !!spin_is_locked(&_pcidevs_lock);
+}
+
+bool_t pcidevs_trylock(void)
+{
+ return !!spin_trylock_recursive(&_pcidevs_lock);
+}
+
static struct radix_tree_root pci_segments;
static inline struct pci_seg *get_pseg(u16 seg)
struct pci_dev *pdev;
int rc = -ENOMEM;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = alloc_pdev(get_pseg(0), bus, devfn);
if ( pdev )
{
_pci_hide_device(pdev);
rc = 0;
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
struct pci_seg *pseg = get_pseg(seg);
struct pci_dev *pdev = NULL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
ASSERT(seg != -1 || bus == -1);
ASSERT(bus != -1 || devfn == -1);
pdev_type = "extended function";
else if (info->is_virtfn)
{
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(seg, info->physfn.bus, info->physfn.devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !pdev )
pci_add_device(seg, info->physfn.bus, info->physfn.devfn,
NULL, node);
ret = -ENOMEM;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pseg = alloc_pseg(seg);
if ( !pseg )
goto out;
pci_enable_acs(pdev);
out:
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !ret )
{
printk(XENLOG_DEBUG "PCI add %s %04x:%02x:%02x.%u\n", pdev_type,
if ( !pseg )
return -ENODEV;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
if ( pdev->bus == bus && pdev->devfn == devfn )
{
break;
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return ret;
}
u8 bus, devfn;
int ret;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pci_clean_dpci_irqs(d);
if ( ret )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return ret;
}
while ( (pdev = pci_get_pdev_by_domain(d, -1, -1, -1)) )
d->domain_id, pdev->seg, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return 0;
}
s_time_t now = NOW();
u16 cword;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_real_pdev(seg, bus, devfn);
if ( pdev )
{
if ( ++pdev->fault.count < PT_FAULT_THRESHOLD )
pdev = NULL;
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !pdev )
return;
{
int ret;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pci_segments_iterate(_scan_pci_devices, NULL);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return ret;
}
if ( iommu_verbose )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
process_pending_softirqs();
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
}
}
if ( !iommu_verbose )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
process_pending_softirqs();
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
}
}
{
struct setup_hwdom ctxt = { .d = d, .handler = handler };
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pci_segments_iterate(_setup_hwdom_pci_devices, &ctxt);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
#ifdef CONFIG_ACPI
static void dump_pci_devices(unsigned char ch)
{
printk("==== PCI devices ====\n");
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pci_segments_iterate(_dump_pci_devices, NULL);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
static int __init setup_dump_pcidevs(void)
if ( !pdev->domain )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
hd = domain_hvm_iommu(pdev->domain);
if ( !iommu_enabled || !hd->platform_ops )
if ( !pdev->domain )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
hd = domain_hvm_iommu(pdev->domain);
if ( !iommu_enabled || !hd->platform_ops ||
{
struct pci_dev *pdev;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return pdev ? 0 : -EBUSY;
}
p2m_get_hostp2m(d)->global_logdirty)) )
return -EXDEV;
- if ( !spin_trylock(&pcidevs_lock) )
+ if ( !pcidevs_trylock() )
return -ERESTART;
rc = iommu_construct(d);
if ( rc )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
done:
if ( !has_arch_pdevs(d) && need_iommu(d) )
iommu_teardown(d);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
if ( !iommu_enabled || !hd->platform_ops )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
pdev = pci_get_pdev_by_domain(d, seg, bus, devfn);
if ( !pdev )
return -ENODEV;
group_id = ops->get_device_group_id(seg, bus, devfn);
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
for_each_pdev( d, pdev )
{
if ( (pdev->seg != seg) ||
if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return -1;
}
i++;
}
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return i;
}
bus = PCI_BUS(machine_sbdf);
devfn = PCI_DEVFN2(machine_sbdf);
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = deassign_device(d, seg, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( ret )
printk(XENLOG_G_ERR
"deassign %04x:%02x:%02x.%u from dom%d failed (%d)\n",
spin_unlock_irq(&desc->lock);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
/*
* FIXME: For performance reasons we should store the 'iommu' pointer in
u16 seg = iommu->intel->drhd->segment;
int agaw;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
context_entries = (struct context_entry *)map_vtd_domain_page(maddr);
if ( !drhd )
return -ENODEV;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
switch ( pdev->type )
{
u64 maddr;
int iommu_domid;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
struct mapped_rmrr *mrmrr;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
ASSERT(rmrr->base_address < rmrr->end_address);
/*
u16 bdf;
int ret, i;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_locked());
if ( !pdev->domain )
return -EINVAL;
u16 bdf;
int ret, i;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
for_each_rmrr_device ( rmrr, bdf, i )
{
/*
dprintk(XENLOG_ERR VTDPREFIX,
"IOMMU: mapping reserved region failed\n");
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
int __init intel_vtd_setup(void)
const struct pci_dev *pdev;
u8 b = bus, df = devfn, sb;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(0, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !pdev ||
pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
* interrupt handling related (the mask bit register).
*/
-extern spinlock_t pcidevs_lock;
+void pcidevs_lock(void);
+void pcidevs_unlock(void);
+bool_t __must_check pcidevs_locked(void);
+bool_t __must_check pcidevs_trylock(void);
bool_t pci_known_segment(u16 seg);
bool_t pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func);