struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
+ if ( iorp->page )
+ {
+ /*
+ * If a page has already been allocated (which will happen on
+ * demand if hvm_get_ioreq_server_frame() is called), then
+ * mapping a guest frame is not permitted.
+ */
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
+
if ( d->is_dying )
return -EINVAL;
return rc;
}
+static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
+{
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( iorp->page )
+ {
+ /*
+ * If a guest frame has already been mapped (which may happen
+ * on demand if hvm_get_ioreq_server_info() is called), then
+ * allocating a page is not permitted.
+ */
+ if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
+
+ /*
+ * Allocated IOREQ server pages are assigned to the emulating
+ * domain, not the target domain. This is safe because the emulating
+ * domain cannot be destroyed until the ioreq server is destroyed.
+ * Also we must use MEMF_no_refcount otherwise page allocation
+ * could fail if the emulating domain has already reached its
+ * maximum allocation.
+ */
+ iorp->page = alloc_domheap_page(s->emulator, MEMF_no_refcount);
+
+ if ( !iorp->page )
+ return -ENOMEM;
+
+ if ( !get_page_type(iorp->page, PGT_writable_page) )
+ goto fail1;
+
+ iorp->va = __map_domain_page_global(iorp->page);
+ if ( !iorp->va )
+ goto fail2;
+
+ clear_page(iorp->va);
+ return 0;
+
+ fail2:
+ put_page_type(iorp->page);
+
+ fail1:
+ put_page(iorp->page);
+ iorp->page = NULL;
+
+ return -ENOMEM;
+}
+
+static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
+{
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( !iorp->page )
+ return;
+
+ unmap_domain_page_global(iorp->va);
+ iorp->va = NULL;
+
+ put_page_and_type(iorp->page);
+ iorp->page = NULL;
+}
+
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
{
const struct hvm_ioreq_server *s;
hvm_unmap_ioreq_gfn(s, false);
}
+static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s)
+{
+ int rc;
+
+ rc = hvm_alloc_ioreq_mfn(s, false);
+
+ if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
+ rc = hvm_alloc_ioreq_mfn(s, true);
+
+ if ( rc )
+ hvm_free_ioreq_mfn(s, false);
+
+ return rc;
+}
+
+static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s)
+{
+ hvm_free_ioreq_mfn(s, true);
+ hvm_free_ioreq_mfn(s, false);
+}
+
static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
{
unsigned int i;
{
ASSERT(!s->enabled);
hvm_ioreq_server_remove_all_vcpus(s);
+
+ /*
+ * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and
+ * hvm_ioreq_server_free_pages() in that order.
+ * This is because the former will do nothing if the pages
+ * are not mapped, leaving the page to be freed by the latter.
+ * However if the pages are mapped then the former will set
+ * the page_info pointer to NULL, meaning the latter will do
+ * nothing.
+ */
hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_pages(s);
+
hvm_ioreq_server_free_rangesets(s);
put_domain(s->emulator);
return rc;
}
+int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
+ unsigned long idx, mfn_t *mfn)
+{
+ struct hvm_ioreq_server *s;
+ int rc;
+
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ s = get_ioreq_server(d, id);
+
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
+
+ ASSERT(!IS_DEFAULT(s));
+
+ rc = -EPERM;
+ if ( s->emulator != current->domain )
+ goto out;
+
+ rc = hvm_ioreq_server_alloc_pages(s);
+ if ( rc )
+ goto out;
+
+ switch ( idx )
+ {
+ case XENMEM_resource_ioreq_server_frame_bufioreq:
+ rc = -ENOENT;
+ if ( !HANDLE_BUFIOREQ(s) )
+ goto out;
+
+ *mfn = _mfn(page_to_mfn(s->bufioreq.page));
+ rc = 0;
+ break;
+
+ case XENMEM_resource_ioreq_server_frame_ioreq(0):
+ *mfn = _mfn(page_to_mfn(s->ioreq.page));
+ rc = 0;
+ break;
+
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ out:
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ return rc;
+}
+
int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint32_t type, uint64_t start,
uint64_t end)
#include <asm/io_apic.h>
#include <asm/pci.h>
#include <asm/guest.h>
+#include <asm/hvm/ioreq.h>
#include <asm/hvm/grant_table.h>
#include <asm/pv/grant_table.h>
return rc;
}
+int arch_acquire_resource(struct domain *d, unsigned int type,
+ unsigned int id, unsigned long frame,
+ unsigned int nr_frames, xen_pfn_t mfn_list[],
+ unsigned int *flags)
+{
+ int rc;
+
+ switch ( type )
+ {
+ case XENMEM_resource_ioreq_server:
+ {
+ ioservid_t ioservid = id;
+ unsigned int i;
+
+ rc = -EINVAL;
+ if ( id != (unsigned int)ioservid )
+ break;
+
+ rc = 0;
+ for ( i = 0; i < nr_frames; i++ )
+ {
+ mfn_t mfn;
+
+ rc = hvm_get_ioreq_server_frame(d, id, frame + i, &mfn);
+ if ( rc )
+ break;
+
+ mfn_list[i] = mfn_x(mfn);
+ }
+
+ /*
+ * The frames will have been assigned to the domain that created
+ * the ioreq server.
+ */
+ *flags |= XENMEM_rsrc_acq_caller_owned;
+ break;
+ }
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
int rc;