case XEN_DOMCTL_mem_event_op:
{
- ret = xsm_mem_event(d);
- if ( !ret )
- ret = mem_event_domctl(d, &domctl->u.mem_event_op,
- guest_handle_cast(u_domctl, void));
+ ret = mem_event_domctl(d, &domctl->u.mem_event_op,
+ guest_handle_cast(u_domctl, void));
copyback = 1;
}
break;
if ( current->domain == d )
break;
- ret = xsm_mem_event(d);
+ ret = xsm_mem_event_setup(d);
if ( !ret ) {
p2m = p2m_get_hostp2m(d);
p2m->access_required = domctl->u.access_required.access_required;
#include <asm/mem_paging.h>
#include <asm/mem_access.h>
#include <asm/mem_sharing.h>
+#include <xsm/xsm.h>
/* for public/io/ring.h macros */
#define xen_mb() mb()
mem_sharing_sharing_resume(v->domain);
}
-struct domain *get_mem_event_op_target(uint32_t domain, int *rc)
-{
- struct domain *d;
-
- /* Get the target domain */
- *rc = rcu_lock_remote_target_domain_by_id(domain, &d);
- if ( *rc != 0 )
- return NULL;
-
- /* Not dying? */
- if ( d->is_dying )
- {
- rcu_unlock_domain(d);
- *rc = -EINVAL;
- return NULL;
- }
-
- return d;
-}
-
int do_mem_event_op(int op, uint32_t domain, void *arg)
{
int ret;
struct domain *d;
- d = get_mem_event_op_target(domain, &ret);
- if ( !d )
+ ret = rcu_lock_live_remote_domain_by_id(domain, &d);
+ if ( ret )
return ret;
+ ret = xsm_mem_event_op(d, op);
+ if ( ret )
+ goto out;
+
switch (op)
{
case XENMEM_paging_op:
ret = -ENOSYS;
}
+ out:
rcu_unlock_domain(d);
return ret;
}
{
int rc;
+ rc = xsm_mem_event_control(d, mec->mode, mec->op);
+ if ( rc )
+ return rc;
+
if ( unlikely(d == current->domain) )
{
gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
return -EINVAL;
}
- /* TODO: XSM hook */
-#if 0
- rc = xsm_mem_event_control(d, mec->op);
- if ( rc )
- return rc;
-#endif
-
rc = -ENOSYS;
switch ( mec->mode )
#include <asm/atomic.h>
#include <xen/rcupdate.h>
#include <asm/event.h>
+#include <xsm/xsm.h>
#include "mm-locks.h"
if ( !mem_sharing_enabled(d) )
return -EINVAL;
- cd = get_mem_event_op_target(mec->u.share.client_domain, &rc);
- if ( !cd )
+ rc = rcu_lock_live_remote_domain_by_id(mec->u.share.client_domain,
+ &cd);
+ if ( rc )
return rc;
+ rc = xsm_mem_sharing_op(d, cd, mec->op);
+ if ( rc )
+ {
+ rcu_unlock_domain(cd);
+ return rc;
+ }
+
if ( !mem_sharing_enabled(cd) )
{
rcu_unlock_domain(cd);
if ( !mem_sharing_enabled(d) )
return -EINVAL;
- cd = get_mem_event_op_target(mec->u.share.client_domain, &rc);
- if ( !cd )
+ rc = rcu_lock_live_remote_domain_by_id(mec->u.share.client_domain,
+ &cd);
+ if ( rc )
return rc;
+ rc = xsm_mem_sharing_op(d, cd, mec->op);
+ if ( rc )
+ {
+ rcu_unlock_domain(cd);
+ return rc;
+ }
+
if ( !mem_sharing_enabled(cd) )
{
rcu_unlock_domain(cd);
return 0;
}
+int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d)
+{
+ int rv;
+ rv = rcu_lock_remote_domain_by_id(dom, d);
+ if ( rv )
+ return rv;
+ if ( (*d)->is_dying )
+ {
+ rcu_unlock_domain(*d);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int domain_kill(struct domain *d)
{
int rc = 0;
int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
mem_event_response_t *rsp);
-struct domain *get_mem_event_op_target(uint32_t domain, int *rc);
int do_mem_event_op(int op, uint32_t domain, void *arg);
int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
*/
int rcu_lock_remote_domain_by_id(domid_t dom, struct domain **d);
+/*
+ * As rcu_lock_remote_domain_by_id() but will fail EINVAL if the domain is
+ * dying.
+ */
+int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d);
+
/* Finish a RCU critical region started by rcu_lock_domain_by_id(). */
static inline void rcu_unlock_domain(struct domain *d)
{
return 0;
}
-static XSM_INLINE int xsm_mem_event(struct domain *d)
+static XSM_INLINE int xsm_mem_event_setup(struct domain *d)
{
return 0;
}
+static XSM_INLINE int xsm_mem_event_control(struct domain *d, int mode, int op)
+{
+ if ( !IS_PRIV(current->domain) )
+ return -EPERM;
+ return 0;
+}
+
+static XSM_INLINE int xsm_mem_event_op(struct domain *d, int op)
+{
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ return -EPERM;
+ return 0;
+}
+
static XSM_INLINE int xsm_mem_sharing(struct domain *d)
{
return 0;
}
+static XSM_INLINE int xsm_mem_sharing_op(struct domain *d, struct domain *cd, int op)
+{
+ if ( !IS_PRIV_FOR(current->domain, cd) )
+ return -EPERM;
+ return 0;
+}
+
static XSM_INLINE int xsm_apic(struct domain *d, int cmd)
{
if ( !IS_PRIV(d) )
int (*hvm_set_isa_irq_level) (struct domain *d);
int (*hvm_set_pci_link_route) (struct domain *d);
int (*hvm_inject_msi) (struct domain *d);
- int (*mem_event) (struct domain *d);
+ int (*mem_event_setup) (struct domain *d);
+ int (*mem_event_control) (struct domain *d, int mode, int op);
+ int (*mem_event_op) (struct domain *d, int op);
int (*mem_sharing) (struct domain *d);
+ int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
int (*apic) (struct domain *d, int cmd);
int (*xen_settime) (void);
int (*memtype) (uint32_t access);
return xsm_ops->hvm_inject_msi(d);
}
-static inline int xsm_mem_event (struct domain *d)
+static inline int xsm_mem_event_setup (struct domain *d)
{
- return xsm_ops->mem_event(d);
+ return xsm_ops->mem_event_setup(d);
+}
+
+static inline int xsm_mem_event_control (struct domain *d, int mode, int op)
+{
+ return xsm_ops->mem_event_control(d, mode, op);
+}
+
+static inline int xsm_mem_event_op (struct domain *d, int op)
+{
+ return xsm_ops->mem_event_op(d, op);
}
static inline int xsm_mem_sharing (struct domain *d)
return xsm_ops->mem_sharing(d);
}
+static inline int xsm_mem_sharing_op (struct domain *d, struct domain *cd, int op)
+{
+ return xsm_ops->mem_sharing_op(d, cd, op);
+}
+
static inline int xsm_apic (struct domain *d, int cmd)
{
return xsm_ops->apic(d, cmd);
set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
set_to_dummy_if_null(ops, hvm_set_pci_link_route);
set_to_dummy_if_null(ops, hvm_inject_msi);
- set_to_dummy_if_null(ops, mem_event);
+ set_to_dummy_if_null(ops, mem_event_setup);
+ set_to_dummy_if_null(ops, mem_event_control);
+ set_to_dummy_if_null(ops, mem_event_op);
set_to_dummy_if_null(ops, mem_sharing);
+ set_to_dummy_if_null(ops, mem_sharing_op);
set_to_dummy_if_null(ops, apic);
set_to_dummy_if_null(ops, xen_settime);
set_to_dummy_if_null(ops, memtype);
return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
}
-static int flask_mem_event(struct domain *d)
+static int flask_mem_event_setup(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_event_control(struct domain *d, int mode, int op)
+{
+ return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_event_op(struct domain *d, int op)
{
return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
}
return current_has_perm(d, SECCLASS_HVM, HVM__MEM_SHARING);
}
+static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
+{
+ int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
+ if ( rc )
+ return rc;
+ return domain_has_perm(d, cd, SECCLASS_HVM, HVM__SHARE_MEM);
+}
+
static int flask_apic(struct domain *d, int cmd)
{
u32 perm;
.hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
.hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
.hvm_inject_msi = flask_hvm_inject_msi,
- .mem_event = flask_mem_event,
+ .mem_event_setup = flask_mem_event_setup,
+ .mem_event_control = flask_mem_event_control,
+ .mem_event_op = flask_mem_event_op,
.mem_sharing = flask_mem_sharing,
+ .mem_sharing_op = flask_mem_sharing_op,
.apic = flask_apic,
.xen_settime = flask_xen_settime,
.memtype = flask_memtype,
mem_sharing
audit_p2m
send_irq
+ share_mem
}
class event