*/
int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
+int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id);
int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
uint64_t gfn);
int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, uint64_t gfn);
* Supported only on Intel EPT 64 bit processors.
*/
-/*
- * Enables mem_access and returns the mapped ring page.
- * Will return NULL on error.
- * Caller has to unmap this page when done.
- */
-void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
-void *xc_mem_access_enable_introspection(xc_interface *xch, domid_t domain_id,
- uint32_t *port);
-int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
-int xc_mem_access_resume(xc_interface *xch, domid_t domain_id);
-
/*
* Set a range of memory to a specific access.
* Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
/***
* Monitor control operations.
+ *
+ * Enables the VM event monitor ring and returns the mapped ring page.
+ * This ring is used to deliver mem_access events, as well a set of additional
+ * events that can be enabled with the xc_monitor_* functions.
+ *
+ * Will return NULL on error.
+ * Caller has to unmap this page when done.
*/
+void *xc_monitor_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
+int xc_monitor_disable(xc_interface *xch, domid_t domain_id);
+int xc_monitor_resume(xc_interface *xch, domid_t domain_id);
int xc_monitor_mov_to_cr0(xc_interface *xch, domid_t domain_id, bool enable,
bool sync, bool onchangeonly);
int xc_monitor_mov_to_cr3(xc_interface *xch, domid_t domain_id, bool enable,
#include "xc_private.h"
#include <xen/memory.h>
-void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
-{
- return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
- port);
-}
-
-int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
-{
- return xc_vm_event_control(xch, domain_id,
- XEN_VM_EVENT_MONITOR_DISABLE,
- XEN_DOMCTL_VM_EVENT_OP_MONITOR,
- NULL);
-}
-
-int xc_mem_access_resume(xc_interface *xch, domid_t domain_id)
-{
- xen_mem_access_op_t mao =
- {
- .op = XENMEM_access_op_resume,
- .domid = domain_id
- };
-
- return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
-}
-
int xc_set_mem_access(xc_interface *xch,
domid_t domain_id,
xenmem_access_t access,
}
return xc_vm_event_control(xch, domain_id,
- XEN_VM_EVENT_PAGING_ENABLE,
+ XEN_VM_EVENT_ENABLE,
XEN_DOMCTL_VM_EVENT_OP_PAGING,
port);
}
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
{
return xc_vm_event_control(xch, domain_id,
- XEN_VM_EVENT_PAGING_DISABLE,
+ XEN_VM_EVENT_DISABLE,
+ XEN_DOMCTL_VM_EVENT_OP_PAGING,
+ NULL);
+}
+
+int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id)
+{
+ return xc_vm_event_control(xch, domain_id,
+ XEN_VM_EVENT_RESUME,
XEN_DOMCTL_VM_EVENT_OP_PAGING,
NULL);
}
}
return xc_vm_event_control(xch, domid,
- XEN_VM_EVENT_SHARING_ENABLE,
+ XEN_VM_EVENT_ENABLE,
XEN_DOMCTL_VM_EVENT_OP_SHARING,
port);
}
domid_t domid)
{
return xc_vm_event_control(xch, domid,
- XEN_VM_EVENT_SHARING_DISABLE,
+ XEN_VM_EVENT_DISABLE,
XEN_DOMCTL_VM_EVENT_OP_SHARING,
NULL);
}
int xc_memshr_domain_resume(xc_interface *xch,
domid_t domid)
{
- xen_mem_sharing_op_t mso;
-
- memset(&mso, 0, sizeof(mso));
-
- mso.op = XENMEM_sharing_op_resume;
-
- return xc_memshr_memop(xch, domid, &mso);
+ return xc_vm_event_control(xch, domid,
+ XEN_VM_EVENT_RESUME,
+ XEN_DOMCTL_VM_EVENT_OP_SHARING,
+ NULL);
}
int xc_memshr_debug_gfn(xc_interface *xch,
#include "xc_private.h"
+void *xc_monitor_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
+{
+ return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
+ port);
+}
+
+int xc_monitor_disable(xc_interface *xch, domid_t domain_id)
+{
+ return xc_vm_event_control(xch, domain_id,
+ XEN_VM_EVENT_DISABLE,
+ XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+ NULL);
+}
+
+int xc_monitor_resume(xc_interface *xch, domid_t domain_id)
+{
+ return xc_vm_event_control(xch, domain_id,
+ XEN_VM_EVENT_RESUME,
+ XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+ NULL);
+}
+
int xc_monitor_mov_to_cr0(xc_interface *xch, domid_t domain_id, bool enable,
bool sync, bool onchangeonly)
{
switch ( param )
{
case HVM_PARAM_PAGING_RING_PFN:
- op = XEN_VM_EVENT_PAGING_ENABLE;
+ op = XEN_VM_EVENT_ENABLE;
mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
break;
case HVM_PARAM_MONITOR_RING_PFN:
- op = XEN_VM_EVENT_MONITOR_ENABLE;
+ op = XEN_VM_EVENT_ENABLE;
mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
break;
case HVM_PARAM_SHARING_RING_PFN:
- op = XEN_VM_EVENT_SHARING_ENABLE;
+ op = XEN_VM_EVENT_ENABLE;
mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
break;
if ( mem_access_enable )
{
- rc = xc_mem_access_disable(xenaccess->xc_handle,
- xenaccess->vm_event.domain_id);
+ rc = xc_monitor_disable(xenaccess->xc_handle,
+ xenaccess->vm_event.domain_id);
if ( rc != 0 )
{
ERROR("Error tearing down domain xenaccess in xen");
/* Enable mem_access */
xenaccess->vm_event.ring_page =
- xc_mem_access_enable(xenaccess->xc_handle,
- xenaccess->vm_event.domain_id,
- &xenaccess->vm_event.evtchn_port);
+ xc_monitor_enable(xenaccess->xc_handle,
+ xenaccess->vm_event.domain_id,
+ &xenaccess->vm_event.evtchn_port);
if ( xenaccess->vm_event.ring_page == NULL )
{
switch ( errno ) {
}
break;
- case XENMEM_sharing_op_resume:
- {
- if ( !mem_sharing_enabled(d) )
- {
- rc = -EINVAL;
- goto out;
- }
-
- vm_event_resume(d, &d->vm_event->share);
- }
- break;
-
case XENMEM_sharing_op_debug_gfn:
{
unsigned long gfn = mso.u.debug.u.gfn;
switch ( mao.op )
{
- case XENMEM_access_op_resume:
- if ( unlikely(start_iter) )
- rc = -ENOSYS;
- else
- {
- vm_event_resume(d, &d->vm_event->monitor);
- rc = 0;
- }
- break;
case XENMEM_access_op_set_access:
rc = -EINVAL;
switch( vec->op )
{
- case XEN_VM_EVENT_PAGING_ENABLE:
+ case XEN_VM_EVENT_ENABLE:
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
}
break;
- case XEN_VM_EVENT_PAGING_DISABLE:
+ case XEN_VM_EVENT_DISABLE:
if ( ved->ring_page )
rc = vm_event_disable(d, ved);
break;
+ case XEN_VM_EVENT_RESUME:
+ if ( ved->ring_page )
+ vm_event_resume(d, ved);
+ else
+ rc = -ENODEV;
+ break;
+
default:
rc = -ENOSYS;
break;
switch( vec->op )
{
- case XEN_VM_EVENT_MONITOR_ENABLE:
+ case XEN_VM_EVENT_ENABLE:
rc = vm_event_enable(d, vec, ved, _VPF_mem_access,
HVM_PARAM_MONITOR_RING_PFN,
monitor_notification);
break;
- case XEN_VM_EVENT_MONITOR_DISABLE:
+ case XEN_VM_EVENT_DISABLE:
if ( ved->ring_page )
rc = vm_event_disable(d, ved);
break;
+ case XEN_VM_EVENT_RESUME:
+ if ( ved->ring_page )
+ vm_event_resume(d, ved);
+ else
+ rc = -ENODEV;
+ break;
+
default:
rc = -ENOSYS;
break;
switch( vec->op )
{
- case XEN_VM_EVENT_SHARING_ENABLE:
+ case XEN_VM_EVENT_ENABLE:
rc = -EOPNOTSUPP;
/* pvh fixme: p2m_is_foreign types need addressing */
if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
mem_sharing_notification);
break;
- case XEN_VM_EVENT_SHARING_DISABLE:
+ case XEN_VM_EVENT_DISABLE:
if ( ved->ring_page )
rc = vm_event_disable(d, ved);
break;
+ case XEN_VM_EVENT_RESUME:
+ if ( ved->ring_page )
+ vm_event_resume(d, ved);
+ else
+ rc = -ENODEV;
+ break;
+
default:
rc = -ENOSYS;
break;
/* XEN_DOMCTL_vm_event_op */
+/*
+ * There are currently three rings available for VM events:
+ * sharing, monitor and paging. This hypercall allows one to
+ * control these rings (enable/disable), as well as to signal
+ * to the hypervisor to pull responses (resume) from the given
+ * ring.
+ */
+#define XEN_VM_EVENT_ENABLE 0
+#define XEN_VM_EVENT_DISABLE 1
+#define XEN_VM_EVENT_RESUME 2
+
/*
* Domain memory paging
* Page memory in and out.
*/
#define XEN_DOMCTL_VM_EVENT_OP_PAGING 1
-#define XEN_VM_EVENT_PAGING_ENABLE 0
-#define XEN_VM_EVENT_PAGING_DISABLE 1
-
/*
* Monitor helper.
*
* of every page in a domain. When one of these permissions--independent,
* read, write, and execute--is violated, the VCPU is paused and a memory event
* is sent with what happened. The memory event handler can then resume the
- * VCPU and redo the access with a XENMEM_access_op_resume hypercall.
+ * VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
*
* See public/vm_event.h for the list of available events that can be
* subscribed to via the monitor interface.
*
- * To enable MOV-TO-MSR interception on x86, it is necessary to enable this
- * interface with the XEN_VM_EVENT_MONITOR_ENABLE_INTROSPECTION
- * operator.
- *
- * The XEN_VM_EVENT_MONITOR_ENABLE* domctls return several
+ * The XEN_VM_EVENT_MONITOR_* domctls returns
* non-standard error codes to indicate why access could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EBUSY - guest has or had access enabled, ring buffer still active
*
*/
-#define XEN_DOMCTL_VM_EVENT_OP_MONITOR 2
-
-#define XEN_VM_EVENT_MONITOR_ENABLE 0
-#define XEN_VM_EVENT_MONITOR_DISABLE 1
+#define XEN_DOMCTL_VM_EVENT_OP_MONITOR 2
/*
* Sharing ENOMEM helper.
*/
#define XEN_DOMCTL_VM_EVENT_OP_SHARING 3
-#define XEN_VM_EVENT_SHARING_ENABLE 0
-#define XEN_VM_EVENT_SHARING_DISABLE 1
-
/* Use for teardown/setup of helper<->hypervisor interface for paging,
* access and sharing.*/
struct xen_domctl_vm_event_op {
- uint32_t op; /* XEN_VM_EVENT_*_* */
+ uint32_t op; /* XEN_VM_EVENT_* */
uint32_t mode; /* XEN_DOMCTL_VM_EVENT_OP_* */
uint32_t port; /* OUT: event channel for ring */
DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
#define XENMEM_access_op 21
-#define XENMEM_access_op_resume 0
-#define XENMEM_access_op_set_access 1
-#define XENMEM_access_op_get_access 2
-#define XENMEM_access_op_enable_emulate 3
-#define XENMEM_access_op_disable_emulate 4
+#define XENMEM_access_op_set_access 0
+#define XENMEM_access_op_get_access 1
+#define XENMEM_access_op_enable_emulate 2
+#define XENMEM_access_op_disable_emulate 3
typedef enum {
XENMEM_access_n,
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
#define XENMEM_sharing_op_share 2
-#define XENMEM_sharing_op_resume 3
-#define XENMEM_sharing_op_debug_gfn 4
-#define XENMEM_sharing_op_debug_mfn 5
-#define XENMEM_sharing_op_debug_gref 6
-#define XENMEM_sharing_op_add_physmap 7
-#define XENMEM_sharing_op_audit 8
+#define XENMEM_sharing_op_debug_gfn 3
+#define XENMEM_sharing_op_debug_mfn 4
+#define XENMEM_sharing_op_debug_gref 5
+#define XENMEM_sharing_op_add_physmap 6
+#define XENMEM_sharing_op_audit 7
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)