return rc;
}
+int hvm_has_ioreq_server(struct domain *d)
+{
+ int empty;
+
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ empty = list_empty(&d->arch.hvm_domain.ioreq_server.list);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ return !empty;
+}
+
int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
unsigned long *ioreq_pfn,
unsigned long *bufioreq_pfn,
#include <asm/hvm/irq.h>
#include <asm/hvm/support.h>
#include <xen/hvm/irq.h>
+#include <asm/hvm/ioreq.h>
#include <asm/io_apic.h>
static DEFINE_PER_CPU(struct list_head, dpci_list);
pirq_dpci->dom = d;
/* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
rc = pirq_guest_bind(d->vcpu[0], info, 0);
- if ( rc == 0 && pt_irq_bind->u.msi.gtable )
+ if ( rc == 0 && pt_irq_bind->u.msi.gtable &&
+ hvm_has_ioreq_server(d) )
{
rc = msixtbl_pt_register(d, info, pt_irq_bind->u.msi.gtable);
if ( unlikely(rc) )
unsigned long *ioreq_pfn,
unsigned long *bufioreq_pfn,
evtchn_port_t *bufioreq_port);
+int hvm_has_ioreq_server(struct domain *d);
int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint32_t type, uint64_t start,
uint64_t end);