spin_lock(&irq_map->dom->event_lock);
+ if ( irq_map->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
+ {
+ struct pirq *pirq = dpci_pirq(irq_map);
+
+ ASSERT(is_hardware_domain(irq_map->dom));
+ /*
+ * Identity mapped, no need to iterate over the guest GSI list to find
+ * other pirqs sharing the same guest GSI.
+ */
+ irq_map->flags |= HVM_IRQ_DPCI_EOI_LATCH;
+ hvm_gsi_deassert(irq_map->dom, pirq->pirq);
+ goto out;
+ }
+
dpci = domain_get_irq_dpci(irq_map->dom);
if ( unlikely(!dpci) )
{
hvm_pci_intx_deassert(irq_map->dom, digl->device, digl->intx);
}
+ out:
pt_pirq_iterate(irq_map->dom, pt_irq_guest_eoi, NULL);
spin_unlock(&irq_map->dom->event_lock);
spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
- if ( hvm_irq_dpci == NULL )
+ if ( hvm_irq_dpci == NULL && !is_hardware_domain(d) )
{
unsigned int i;
+ /*
+ * NB: the hardware domain doesn't use a hvm_irq_dpci struct because
+ * it's only allowed to identity map GSIs, and so the data contained in
+ * that struct (used to map guest GSIs into machine GSIs and perform
+ * interrupt routing) it's completely useless for it.
+ */
hvm_irq_dpci = xzalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
{
case PT_IRQ_TYPE_PCI:
case PT_IRQ_TYPE_MSI_TRANSLATE:
{
- unsigned int bus = pt_irq_bind->u.pci.bus;
- unsigned int device = pt_irq_bind->u.pci.device;
- unsigned int intx = pt_irq_bind->u.pci.intx;
- unsigned int guest_gsi = hvm_pci_intx_gsi(device, intx);
- unsigned int link = hvm_pci_intx_link(device, intx);
- struct dev_intx_gsi_link *digl = xmalloc(struct dev_intx_gsi_link);
- struct hvm_girq_dpci_mapping *girq =
- xmalloc(struct hvm_girq_dpci_mapping);
+ struct dev_intx_gsi_link *digl = NULL;
+ struct hvm_girq_dpci_mapping *girq = NULL;
+ unsigned int guest_gsi;
- if ( !digl || !girq )
+ /*
+ * Mapping GSIs for the hardware domain is different than doing it for
+ * an unpriviledged guest, the hardware domain is only allowed to
+ * identity map GSIs, and as such all the data in the u.pci union is
+ * discarded.
+ */
+ if ( !is_hardware_domain(d) )
{
- spin_unlock(&d->event_lock);
- xfree(girq);
- xfree(digl);
- return -ENOMEM;
- }
+ unsigned int link;
+
+ digl = xmalloc(struct dev_intx_gsi_link);
+ girq = xmalloc(struct hvm_girq_dpci_mapping);
+
+ if ( !digl || !girq )
+ {
+ spin_unlock(&d->event_lock);
+ xfree(girq);
+ xfree(digl);
+ return -ENOMEM;
+ }
+
+ girq->bus = digl->bus = pt_irq_bind->u.pci.bus;
+ girq->device = digl->device = pt_irq_bind->u.pci.device;
+ girq->intx = digl->intx = pt_irq_bind->u.pci.intx;
+ list_add_tail(&digl->list, &pirq_dpci->digl_list);
- hvm_irq_dpci->link_cnt[link]++;
+ guest_gsi = hvm_pci_intx_gsi(digl->device, digl->intx);
+ link = hvm_pci_intx_link(digl->device, digl->intx);
- digl->bus = bus;
- digl->device = device;
- digl->intx = intx;
- list_add_tail(&digl->list, &pirq_dpci->digl_list);
+ hvm_irq_dpci->link_cnt[link]++;
- girq->bus = bus;
- girq->device = device;
- girq->intx = intx;
- girq->machine_gsi = pirq;
- list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
+ girq->machine_gsi = pirq;
+ list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
+ }
+ else
+ {
+ /* MSI_TRANSLATE is not supported by the hardware domain. */
+ ASSERT(pt_irq_bind->irq_type == PT_IRQ_TYPE_PCI);
+ guest_gsi = pirq;
+ ASSERT(guest_gsi < hvm_domain_irq(d)->nr_gsis);
+ }
/* Bind the same mirq once in the same domain */
if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED |
HVM_IRQ_DPCI_MACH_PCI |
HVM_IRQ_DPCI_GUEST_PCI;
- share = BIND_PIRQ__WILL_SHARE;
+ if ( !is_hardware_domain(d) )
+ share = BIND_PIRQ__WILL_SHARE;
+ else
+ {
+ unsigned int pin;
+ struct hvm_vioapic *vioapic = gsi_vioapic(d, guest_gsi,
+ &pin);
+
+ if ( !vioapic )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+ pirq_dpci->flags |= HVM_IRQ_DPCI_IDENTITY_GSI;
+ /*
+ * Check if the corresponding vIO APIC pin is configured
+ * level or edge trigger, level triggered interrupts will
+ * be marked as shareable.
+ */
+ share = vioapic->redirtbl[pin].fields.trig_mode;
+ }
}
/* Init timer before binding */
* IRQ_GUEST is not set. As such we can reset 'dom' directly.
*/
pirq_dpci->dom = NULL;
- list_del(&girq->list);
- list_del(&digl->list);
- hvm_irq_dpci->link_cnt[link]--;
+ if ( !is_hardware_domain(d) )
+ {
+ unsigned int link = hvm_pci_intx_link(digl->device,
+ digl->intx);
+
+ list_del(&girq->list);
+ list_del(&digl->list);
+ hvm_irq_dpci->link_cnt[link]--;
+ }
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
spin_unlock(&d->event_lock);
spin_unlock(&d->event_lock);
if ( iommu_verbose )
- printk(XENLOG_G_INFO
- "d%d: bind: m_gsi=%u g_gsi=%u dev=%02x.%02x.%u intx=%u\n",
- d->domain_id, pirq, guest_gsi, bus,
- PCI_SLOT(device), PCI_FUNC(device), intx);
+ {
+ char buf[50];
+
+ if ( !is_hardware_domain(d) )
+ snprintf(buf, ARRAY_SIZE(buf), " dev=%02x.%02x.%u intx=%u",
+ digl->bus, PCI_SLOT(digl->device),
+ PCI_FUNC(digl->device), digl->intx);
+
+ printk(XENLOG_G_INFO "d%d: bind: m_gsi=%u g_gsi=%u%s\n",
+ d->domain_id, pirq, guest_gsi,
+ !is_hardware_domain(d) ? buf : "");
+ }
break;
}
int pt_irq_destroy_bind(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci;
struct hvm_pirq_dpci *pirq_dpci;
unsigned int machine_gsi = pt_irq_bind->machine_irq;
struct pirq *pirq;
spin_lock(&d->event_lock);
- hvm_irq_dpci = domain_get_irq_dpci(d);
-
- if ( hvm_irq_dpci == NULL )
+ pirq = pirq_info(d, machine_gsi);
+ pirq_dpci = pirq_dpci(pirq);
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
{
+ ASSERT(is_hardware_domain(d));
spin_unlock(&d->event_lock);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
- pirq = pirq_info(d, machine_gsi);
- pirq_dpci = pirq_dpci(pirq);
-
if ( pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI )
{
unsigned int bus = pt_irq_bind->u.pci.bus;
unsigned int intx = pt_irq_bind->u.pci.intx;
unsigned int guest_gsi = hvm_pci_intx_gsi(device, intx);
unsigned int link = hvm_pci_intx_link(device, intx);
+ struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
struct hvm_girq_dpci_mapping *girq;
struct dev_intx_gsi_link *digl, *tmp;
+ if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->event_lock);
+ return -EINVAL;
+ }
+
list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
{
if ( girq->bus == bus &&
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirq);
- if ( !iommu_enabled || !dpci || !pirq_dpci ||
+ if ( !iommu_enabled || (!is_hardware_domain(d) && !dpci) || !pirq_dpci ||
!(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
return 0;
static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
{
- if ( unlikely(!hvm_domain_irq(d)->dpci) )
+ if ( unlikely(!hvm_domain_irq(d)->dpci) && !is_hardware_domain(d) )
{
ASSERT_UNREACHABLE();
return;
list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
{
+ ASSERT(!(pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI));
hvm_pci_intx_assert(d, digl->device, digl->intx);
pirq_dpci->pending++;
}
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
+ {
+ hvm_gsi_assert(d, pirq->pirq);
+ pirq_dpci->pending++;
+ }
+
if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE )
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
spin_unlock(&d->event_lock);
}
-static void __hvm_dpci_eoi(struct domain *d,
- const struct hvm_girq_dpci_mapping *girq,
+static void __hvm_pirq_eoi(struct pirq *pirq,
const union vioapic_redir_entry *ent)
{
- struct pirq *pirq = pirq_info(d, girq->machine_gsi);
- struct hvm_pirq_dpci *pirq_dpci;
-
- if ( !hvm_domain_use_pirq(d, pirq) )
- hvm_pci_intx_deassert(d, girq->device, girq->intx);
+ struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirq);
- pirq_dpci = pirq_dpci(pirq);
+ ASSERT(pirq_dpci);
/*
* No need to get vector lock for timer
pirq_guest_eoi(pirq);
}
+static void __hvm_dpci_eoi(struct domain *d,
+ const struct hvm_girq_dpci_mapping *girq,
+ const union vioapic_redir_entry *ent)
+{
+ struct pirq *pirq = pirq_info(d, girq->machine_gsi);
+
+ if ( !hvm_domain_use_pirq(d, pirq) )
+ hvm_pci_intx_deassert(d, girq->device, girq->intx);
+
+ __hvm_pirq_eoi(pirq, ent);
+}
+
+static void __hvm_gsi_eoi(struct domain *d, unsigned int gsi,
+ const union vioapic_redir_entry *ent)
+{
+ struct pirq *pirq = pirq_info(d, gsi);
+ struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirq);
+
+ /* Check if GSI is actually mapped. */
+ if ( !pirq_dpci )
+ return;
+
+ hvm_gsi_deassert(d, gsi);
+ __hvm_pirq_eoi(pirq, ent);
+}
+
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
const union vioapic_redir_entry *ent)
{
if ( !iommu_enabled )
return;
+ if ( is_hardware_domain(d) )
+ {
+ spin_lock(&d->event_lock);
+ __hvm_gsi_eoi(d, guest_gsi, ent);
+ goto unlock;
+ }
+
if ( guest_gsi < NR_ISAIRQS )
{
hvm_dpci_isairq_eoi(d, guest_gsi);