pirq = get_param('irq')
+ rc = xc.physdev_map_pirq(domid = self.getDomid(),
+ index = pirq,
+ pirq = pirq)
+ if rc < 0:
+ raise VmError('irq: Failed to map irq %x' % (pirq))
+
rc = xc.domain_irq_permission(domid = self.getDomid(),
pirq = pirq,
allow_access = True)
#todo non-fatal
raise VmError(
'irq: Failed to configure irq: %d' % (pirq))
- rc = xc.physdev_map_pirq(domid = self.getDomid(),
- index = pirq,
- pirq = pirq)
- if rc < 0:
- raise VmError(
- 'irq: Failed to map irq %x' % (pirq))
back = dict([(k, config[k]) for k in self.valid_cfg if k in config])
return (self.allocateDeviceID(), back, {})
/* DOM0 is permitted full I/O capabilities. */
rc |= ioports_permit_access(dom0, 0, 0xFFFF);
rc |= iomem_permit_access(dom0, 0UL, ~0UL);
- rc |= irqs_permit_access(dom0, 0, d->nr_pirqs - 1);
+ rc |= irqs_permit_access(dom0, 1, nr_irqs_gsi - 1);
/*
* Modify I/O port access permissions.
goto bind_out;
ret = -EPERM;
- if ( !IS_PRIV(current->domain) &&
- !irq_access_permitted(current->domain, bind->machine_irq) )
- goto bind_out;
+ if ( !IS_PRIV(current->domain) )
+ {
+ int irq = domain_pirq_to_irq(d, bind->machine_irq);
+
+ if ( irq <= 0 || !irq_access_permitted(current->domain, irq) )
+ goto bind_out;
+ }
ret = -ESRCH;
if ( iommu_enabled )
bind = &(domctl->u.bind_pt_irq);
ret = -EPERM;
- if ( !IS_PRIV(current->domain) &&
- !irq_access_permitted(current->domain, bind->machine_irq) )
- goto unbind_out;
+ if ( !IS_PRIV(current->domain) )
+ {
+ int irq = domain_pirq_to_irq(d, bind->machine_irq);
+
+ if ( irq <= 0 || !irq_access_permitted(current->domain, irq) )
+ goto unbind_out;
+ }
if ( iommu_enabled )
{
out:
spin_unlock_irqrestore(&vector_lock, flags);
+ if ( irq > 0 && dom0 )
+ {
+ ret = irq_permit_access(dom0, irq);
+ if ( ret )
+ printk(XENLOG_G_ERR
+ "Could not grant Dom0 access to IRQ%d (error %d)\n",
+ irq, ret);
+ }
+
return irq;
}
void destroy_irq(unsigned int irq)
{
BUG_ON(!MSI_IRQ(irq));
+
+ if ( dom0 )
+ {
+ int err = irq_deny_access(dom0, irq);
+
+ if ( err )
+ printk(XENLOG_G_ERR
+ "Could not revoke Dom0 access to IRQ%u (error %d)\n",
+ irq, err);
+ }
+
dynamic_irq_cleanup(irq);
clear_irq_vector(irq);
}
if ( !IS_PRIV(current->domain) &&
!(IS_PRIV_FOR(current->domain, d) &&
- irq_access_permitted(current->domain, pirq)))
+ irq_access_permitted(current->domain, irq)))
return -EPERM;
if ( pirq < 0 || pirq >= d->nr_pirqs || irq < 0 || irq >= nr_irqs )
return 0;
}
- ret = irq_permit_access(d, pirq);
+ ret = irq_permit_access(d, irq);
if ( ret )
{
- dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
- d->domain_id, pirq);
+ printk(XENLOG_G_ERR
+ "dom%d: could not permit access to IRQ%d (pirq %d)\n",
+ d->domain_id, irq, pirq);
return ret;
}
spin_lock_irqsave(&desc->lock, flags);
if ( desc->handler != &no_irq_type )
+ {
+ spin_unlock_irqrestore(&desc->lock, flags);
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
+ pci_disable_msi(msi_desc);
+ ret = -EBUSY;
+ goto done;
+ }
desc->handler = &pci_msi_type;
if ( opt_irq_vector_map == OPT_IRQ_VECTOR_MAP_PERDEV
&& !desc->chip_data->used_vectors )
}
done:
+ if ( ret && irq_deny_access(d, irq) )
+ printk(XENLOG_G_ERR
+ "dom%d: could not revoke access to IRQ%d (pirq %d)\n",
+ d->domain_id, irq, pirq);
return ret;
}
if (msi_desc)
msi_free_irq(msi_desc);
- ret = irq_deny_access(d, pirq);
+ ret = irq_deny_access(d, irq);
if ( ret )
- dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
- d->domain_id, pirq);
+ printk(XENLOG_G_ERR
+ "dom%d: could not deny access to IRQ%d (pirq %d)\n",
+ d->domain_id, irq, pirq);
if ( desc->handler == &pci_msi_type )
desc->handler = &no_irq_type;
if ( irq == -1 )
irq = create_irq();
- if ( irq < 0 || irq >= nr_irqs )
+ if ( irq < nr_irqs_gsi || irq >= nr_irqs )
{
dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
d->domain_id);
if ( pirq >= d->nr_pirqs )
ret = -EINVAL;
else if ( op->u.irq_permission.allow_access )
- ret = irq_permit_access(d, pirq);
+ ret = pirq_permit_access(d, pirq);
else
- ret = irq_deny_access(d, pirq);
+ ret = pirq_deny_access(d, pirq);
rcu_unlock_domain(d);
}
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
- if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
+ if ( !is_hvm_domain(d) && !pirq_access_permitted(d, pirq) )
return -EPERM;
spin_lock(&d->event_lock);
#define irq_access_permitted(d, i) \
rangeset_contains_singleton((d)->irq_caps, i)
+#define pirq_permit_access(d, i) ({ \
+ struct domain *d__ = (d); \
+ int i__ = domain_pirq_to_irq(d__, i); \
+ i__ > 0 ? rangeset_add_singleton(d__->irq_caps, i__)\
+ : -EINVAL; \
+})
+#define pirq_deny_access(d, i) ({ \
+ struct domain *d__ = (d); \
+ int i__ = domain_pirq_to_irq(d__, i); \
+ i__ > 0 ? rangeset_remove_singleton(d__->irq_caps, i__)\
+ : -EINVAL; \
+})
+#define pirq_access_permitted(d, i) ({ \
+ struct domain *d__ = (d); \
+ rangeset_contains_singleton(d__->irq_caps, \
+ domain_pirq_to_irq(d__, i));\
+})
+
#endif /* __XEN_IOCAP_H__ */