#include <asm/hvm/support.h>
#include <asm/msi.h>
+bool hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq)
+{
+ return is_hvm_domain(d) && pirq && pirq->arch.hvm.emuirq != IRQ_UNBOUND;
+}
+
/* Must be called with hvm_domain->irq_lock hold */
static void assert_gsi(struct domain *d, unsigned ioapic_gsi)
{
spin_unlock_irqrestore(&desc->lock, flags);
}
-bool hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq)
-{
- return is_hvm_domain(d) && pirq && pirq->arch.hvm.emuirq != IRQ_UNBOUND;
-}
-
static int allocate_pirq(struct domain *d, int index, int pirq, int irq,
int type, int *nr)
{
void hvm_assert_evtchn_irq(struct vcpu *v);
void hvm_set_callback_via(struct domain *d, uint64_t via);
+struct pirq;
+bool hvm_domain_use_pirq(const struct domain *, const struct pirq *);
+
#endif /* __ASM_X86_HVM_IRQ_H__ */
void free_domain_pirqs(struct domain *d);
int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq);
int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
-bool hvm_domain_use_pirq(const struct domain *, const struct pirq *);
/* Reset irq affinities to match the given CPU mask. */
void fixup_irqs(const cpumask_t *mask, bool verbose);