spin_unlock(&d->event_lock);
}
-static void hvm_pirq_eoi(struct pirq *pirq,
- const union vioapic_redir_entry *ent)
+static void hvm_pirq_eoi(struct pirq *pirq)
{
struct hvm_pirq_dpci *pirq_dpci;
* since interrupt is still not EOIed
*/
if ( --pirq_dpci->pending ||
- (ent && ent->fields.mask) ||
!pt_irq_need_timer(pirq_dpci->flags) )
return;
}
static void __hvm_dpci_eoi(struct domain *d,
- const struct hvm_girq_dpci_mapping *girq,
- const union vioapic_redir_entry *ent)
+ const struct hvm_girq_dpci_mapping *girq)
{
struct pirq *pirq = pirq_info(d, girq->machine_gsi);
if ( !hvm_domain_use_pirq(d, pirq) )
hvm_pci_intx_deassert(d, girq->device, girq->intx);
- hvm_pirq_eoi(pirq, ent);
+ hvm_pirq_eoi(pirq);
}
-static void hvm_gsi_eoi(struct domain *d, unsigned int gsi,
- const union vioapic_redir_entry *ent)
+static void hvm_gsi_eoi(struct domain *d, unsigned int gsi)
{
struct pirq *pirq = pirq_info(d, gsi);
return;
hvm_gsi_deassert(d, gsi);
- hvm_pirq_eoi(pirq, ent);
+ hvm_pirq_eoi(pirq);
}
-void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
- const union vioapic_redir_entry *ent)
+void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi)
{
const struct hvm_irq_dpci *hvm_irq_dpci;
const struct hvm_girq_dpci_mapping *girq;
if ( is_hardware_domain(d) )
{
spin_lock(&d->event_lock);
- hvm_gsi_eoi(d, guest_gsi, ent);
+ hvm_gsi_eoi(d, guest_gsi);
goto unlock;
}
goto unlock;
list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
- __hvm_dpci_eoi(d, girq, ent);
+ __hvm_dpci_eoi(d, girq);
unlock:
spin_unlock(&d->event_lock);
struct npfec);
bool handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
- const union vioapic_redir_entry *ent);
+void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq);
void msix_write_completion(struct vcpu *);
#ifdef CONFIG_HVM