bool pt_irq_need_timer(uint32_t flags)
{
- return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
+ return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE |
+ HVM_IRQ_DPCI_NO_EOI));
}
static int pt_irq_guest_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
*/
ASSERT(!mask);
share = trigger_mode;
+ if ( trigger_mode == VIOAPIC_EDGE_TRIG )
+ /*
+ * Edge IO-APIC interrupt, no EOI or unmask to perform
+ * and hence no timer needed.
+ */
+ pirq_dpci->flags |= HVM_IRQ_DPCI_NO_EOI;
}
}
send_guest_pirq(d, pirq);
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
- {
- spin_unlock(&d->event_lock);
- return;
- }
+ goto out;
}
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
{
vmsi_deliver_pirq(d, pirq_dpci);
- spin_unlock(&d->event_lock);
- return;
+ goto out;
}
list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
{
hvm_gsi_assert(d, pirq->pirq);
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_NO_EOI )
+ goto out;
pirq_dpci->pending++;
}
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
__msi_pirq_eoi(pirq_dpci);
- spin_unlock(&d->event_lock);
- return;
+ goto out;
}
/*
ASSERT(pt_irq_need_timer(pirq_dpci->flags));
set_timer(&pirq_dpci->timer, NOW() + PT_IRQ_TIME_OUT);
}
+
+ out:
spin_unlock(&d->event_lock);
}
#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4
#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5
#define _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT 6
+#define _HVM_IRQ_DPCI_NO_EOI_SHIFT 7
#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15
#define HVM_IRQ_DPCI_MACH_PCI (1u << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
#define HVM_IRQ_DPCI_MACH_MSI (1u << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
#define HVM_IRQ_DPCI_GUEST_PCI (1u << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
#define HVM_IRQ_DPCI_GUEST_MSI (1u << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
#define HVM_IRQ_DPCI_IDENTITY_GSI (1u << _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT)
+#define HVM_IRQ_DPCI_NO_EOI (1u << _HVM_IRQ_DPCI_NO_EOI_SHIFT)
#define HVM_IRQ_DPCI_TRANSLATE (1u << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
struct hvm_gmsi_info {