#include "xc_private.h"
#include <xen/memory.h>
-int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
- uint32_t *port)
+void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
{
- if ( !port )
- {
- errno = EINVAL;
- return -1;
- }
-
- return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE,
- XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
- port);
+ return xc_mem_event_enable(xch, domain_id, HVM_PARAM_ACCESS_RING_PFN, port);
}
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
return do_memory_op(xch, mode, &meo, sizeof(meo));
}
+void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
+ uint32_t *port)
+{
+ void *ring_page = NULL;
+ unsigned long ring_pfn, mmap_pfn;
+ unsigned int op, mode;
+ int rc1, rc2, saved_errno;
+
+ if ( !port )
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ /* Pause the domain for ring page setup */
+ rc1 = xc_domain_pause(xch, domain_id);
+ if ( rc1 != 0 )
+ {
+ PERROR("Unable to pause domain\n");
+ return NULL;
+ }
+
+ /* Get the pfn of the ring page */
+ rc1 = xc_get_hvm_param(xch, domain_id, param, &ring_pfn);
+ if ( rc1 != 0 )
+ {
+ PERROR("Failed to get pfn of ring page\n");
+ goto out;
+ }
+
+ mmap_pfn = ring_pfn;
+ ring_page = xc_map_foreign_batch(xch, domain_id, PROT_READ | PROT_WRITE,
+ &mmap_pfn, 1);
+ if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+ {
+ /* Map failed, populate ring page */
+ rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
+ &ring_pfn);
+ if ( rc1 != 0 )
+ {
+ PERROR("Failed to populate ring pfn\n");
+ goto out;
+ }
+
+ mmap_pfn = ring_pfn;
+ ring_page = xc_map_foreign_batch(xch, domain_id, PROT_READ | PROT_WRITE,
+ &mmap_pfn, 1);
+ if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+ {
+ PERROR("Could not map the ring page\n");
+ goto out;
+ }
+ }
+
+ switch ( param )
+ {
+ case HVM_PARAM_PAGING_RING_PFN:
+ op = XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE;
+ mode = XEN_DOMCTL_MEM_EVENT_OP_PAGING;
+ break;
+
+ case HVM_PARAM_ACCESS_RING_PFN:
+ op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE;
+ mode = XEN_DOMCTL_MEM_EVENT_OP_ACCESS;
+ break;
+
+ case HVM_PARAM_SHARING_RING_PFN:
+ op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE;
+ mode = XEN_DOMCTL_MEM_EVENT_OP_SHARING;
+ break;
+
+ /*
+ * This is for the outside chance that the HVM_PARAM is valid but is invalid
+ * as far as mem_event goes.
+ */
+ default:
+ errno = EINVAL;
+ rc1 = -1;
+ goto out;
+ }
+
+ rc1 = xc_mem_event_control(xch, domain_id, op, mode, port);
+ if ( rc1 != 0 )
+ {
+ PERROR("Failed to enable mem_event\n");
+ goto out;
+ }
+
+ /* Remove the ring_pfn from the guest's physmap */
+ rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
+ if ( rc1 != 0 )
+ PERROR("Failed to remove ring page from guest physmap");
+
+ out:
+ saved_errno = errno;
+
+ rc2 = xc_domain_unpause(xch, domain_id);
+ if ( rc1 != 0 || rc2 != 0 )
+ {
+ if ( rc2 != 0 )
+ {
+ if ( rc1 == 0 )
+ saved_errno = errno;
+ PERROR("Unable to unpause domain");
+ }
+
+ if ( ring_page )
+ munmap(ring_page, XC_PAGE_SIZE);
+ ring_page = NULL;
+
+ errno = saved_errno;
+ }
+
+ return ring_page;
+}
int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
unsigned int op, unsigned int mode,
uint64_t gfn, void *buffer);
+/*
+ * Enables mem_event and returns the mapped ring page indicated by param.
+ * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
+ */
+void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
+ uint32_t *port);
/**
* Mem paging operations.
* Access tracking operations.
* Supported only on Intel EPT 64 bit processors.
*/
-int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
+
+/*
+ * Enables mem_access and returns the mapped ring page.
+ * Will return NULL on error.
+ * Caller has to unmap this page when done.
+ */
+void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
int xc_mem_access_resume(xc_interface *xch, domid_t domain_id);
xenaccess_t *xenaccess = 0;
xc_interface *xch;
int rc;
- unsigned long ring_pfn, mmap_pfn;
xch = xc_interface_open(NULL, NULL, 0);
if ( !xch )
/* Initialise lock */
mem_event_ring_lock_init(&xenaccess->mem_event);
- /* Map the ring page */
- xc_get_hvm_param(xch, xenaccess->mem_event.domain_id,
- HVM_PARAM_ACCESS_RING_PFN, &ring_pfn);
- mmap_pfn = ring_pfn;
- xenaccess->mem_event.ring_page =
- xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id,
- PROT_READ | PROT_WRITE, &mmap_pfn, 1);
- if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
- {
- /* Map failed, populate ring page */
- rc = xc_domain_populate_physmap_exact(xenaccess->xc_handle,
- xenaccess->mem_event.domain_id,
- 1, 0, 0, &ring_pfn);
- if ( rc != 0 )
- {
- PERROR("Failed to populate ring gfn\n");
- goto err;
- }
-
- mmap_pfn = ring_pfn;
- xenaccess->mem_event.ring_page =
- xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id,
- PROT_READ | PROT_WRITE, &mmap_pfn, 1);
- if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
- {
- PERROR("Could not map the ring page\n");
- goto err;
- }
- }
-
- /* Initialise Xen */
- rc = xc_mem_access_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id,
- &xenaccess->mem_event.evtchn_port);
- if ( rc != 0 )
+ /* Enable mem_access */
+ xenaccess->mem_event.ring_page =
+ xc_mem_access_enable(xenaccess->xc_handle,
+ xenaccess->mem_event.domain_id,
+ &xenaccess->mem_event.evtchn_port);
+ if ( xenaccess->mem_event.ring_page == NULL )
{
switch ( errno ) {
case EBUSY:
ERROR("EPT not supported for this guest");
break;
default:
- perror("Error initialising shared page");
+ perror("Error enabling mem_access");
break;
}
goto err;
(mem_event_sring_t *)xenaccess->mem_event.ring_page,
XC_PAGE_SIZE);
- /* Now that the ring is set, remove it from the guest's physmap */
- if ( xc_domain_decrease_reservation_exact(xch,
- xenaccess->mem_event.domain_id, 1, 0, &ring_pfn) )
- PERROR("Failed to remove ring from guest physmap");
-
/* Get domaininfo */
xenaccess->domain_info = malloc(sizeof(xc_domaininfo_t));
if ( xenaccess->domain_info == NULL )