return ret;
}
+void arch_evtchn_bind_pirq(struct domain *d, int pirq)
+{
+ int irq = domain_pirq_to_irq(d, pirq);
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ if ( irq <= 0 )
+ return;
+
+ if ( is_hvm_domain(d) )
+ map_domain_emuirq_pirq(d, pirq, IRQ_PT);
+
+ desc = irq_to_desc(irq);
+ spin_lock_irqsave(&desc->lock, flags);
+ if ( desc->msi_desc )
+ guest_mask_msi_irq(desc, 0);
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
bool_t hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq)
{
return is_hvm_domain(d) && pirq &&
static unsigned int startup_msi_irq(struct irq_desc *desc)
{
- bool_t guest_masked = (desc->status & IRQ_GUEST) &&
- is_hvm_domain(desc->msi_desc->dev->domain);
-
- msi_set_mask_bit(desc, 0, guest_masked);
+ msi_set_mask_bit(desc, 0, !!(desc->status & IRQ_GUEST));
return 0;
}
bind->port = port;
-#ifdef CONFIG_X86
- if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 )
- map_domain_emuirq_pirq(d, pirq, IRQ_PT);
-#endif
+ arch_evtchn_bind_pirq(d, pirq);
out:
spin_unlock(&d->event_lock);
void arch_move_irqs(struct vcpu *v);
+#define arch_evtchn_bind_pirq(d, pirq) ((void)((d) + (pirq)))
+
/* Set IRQ type for an SPI */
int irq_set_spi_type(unsigned int spi, unsigned int type);
unsigned int arch_hwdom_irqs(domid_t);
#endif
+#ifndef arch_evtchn_bind_pirq
+void arch_evtchn_bind_pirq(struct domain *, int pirq);
+#endif
+
#endif /* __XEN_IRQ_H__ */