}
break;
- case XEN_DOMCTL_mem_event_op:
- {
- ret = mem_event_domctl(d, &domctl->u.mem_event_op,
- guest_handle_cast(u_domctl, void));
- copyback = 1;
- }
- break;
-
case XEN_DOMCTL_mem_sharing_op:
{
ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
}
}
+void p2m_setup_introspection(struct domain *d)
+{
+ if ( hvm_funcs.enable_msr_exit_interception )
+ {
+ d->arch.hvm_domain.introspection_enabled = 1;
+ hvm_funcs.enable_msr_exit_interception(d);
+ }
+}
+
bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
mem_event_request_t **req_ptr)
break;
}
- case XENMEM_access_op:
- rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t));
- break;
-
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
break;
}
- case XENMEM_access_op:
- rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t));
- break;
-
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
#include <xen/guest_access.h>
#include <xen/sched.h>
#include <xen/event.h>
+#include <xen/mem_access.h>
#include <asm/current.h>
#include <compat/memory.h>
break;
}
+ case XENMEM_access_op:
+ rc = mem_access_memop(cmd, guest_handle_cast(compat, xen_mem_access_op_t));
+ break;
+
case XENMEM_add_to_physmap_batch:
start_extent = end_extent;
break;
#include <xen/bitmap.h>
#include <xen/paging.h>
#include <xen/hypercall.h>
+#include <xen/mem_event.h>
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/page.h>
}
break;
+ case XEN_DOMCTL_mem_event_op:
+ ret = mem_event_domctl(d, &op->u.mem_event_op,
+ guest_handle_cast(u_domctl, void));
+ copyback = 1;
+ break;
+
case XEN_DOMCTL_disable_migrate:
{
d->disable_migrate = op->u.disable_migrate.disable;
HVM_PARAM_ACCESS_RING_PFN,
mem_access_notification);
- if ( mec->op != XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE &&
- rc == 0 && hvm_funcs.enable_msr_exit_interception )
- {
- d->arch.hvm_domain.introspection_enabled = 1;
- hvm_funcs.enable_msr_exit_interception(d);
- }
+ if ( mec->op == XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION
+ && !rc )
+ p2m_setup_introspection(d);
+
}
break;
#include <xen/errno.h>
#include <xen/tmem.h>
#include <xen/tmem_xen.h>
+#include <xen/numa.h>
+#include <xen/mem_access.h>
+#include <xen/trace.h>
#include <asm/current.h>
#include <asm/hardirq.h>
#include <asm/p2m.h>
-#include <xen/numa.h>
#include <public/memory.h>
#include <xsm/xsm.h>
-#include <xen/trace.h>
struct memop_args {
/* INPUT */
break;
}
+ case XENMEM_access_op:
+ rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t));
+ break;
+
case XENMEM_claim_pages:
if ( copy_from_guest(&reservation, arg, 1) )
return -EFAULT;
/* Not supported on ARM. */
};
+static inline
+void p2m_setup_introspection(struct domain *d)
+{
+ /* No special setup on ARM. */
+}
+
#define p2m_is_foreign(_t) ((_t) == p2m_map_foreign)
#define p2m_is_ram(_t) ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
void p2m_mem_event_emulate_check(struct vcpu *v,
const mem_event_response_t *rsp);
+/* Enable arch specific introspection options (such as MSR interception). */
+void p2m_setup_introspection(struct domain *d);
+
/*
* Internal functions, only called by other p2m code
*/