domain_event_comms($1, $2)
allow $1 $2:grant { map_read map_write copy unmap };
allow $2 $1:grant { map_read map_write copy unmap };
+ allow $1 $2:mmu share_mem;
+ allow $2 $1:mmu share_mem;
')
# domain_self_comms(domain)
break;
case XENMAPSPACE_gmfn_foreign:
+ case XENMAPSPACE_gmfn_share:
{
struct domain *od;
p2m_type_t p2mt;
return -EINVAL;
}
- rc = xsm_map_gmfn_foreign(XSM_TARGET, d, od);
+ if ( space == XENMAPSPACE_gmfn_foreign )
+ rc = xsm_map_gmfn_foreign(XSM_TARGET, d, od);
+ else
+ rc = xsm_map_gmfn_share(XSM_TARGET, d, od);
+
if ( rc )
{
put_pg_owner(od);
Stage-2 using the Normal Memory
Inner/Outer Write-Back Cacheable
memory attribute. */
+#define XENMAPSPACE_gmfn_share 6 /* GMFN from another dom,
+ XENMEM_add_to_physmap_batch (and
+ currently ARM) only. Unlike
+ XENMAPSPACE_gmfn_foreign, it
+ requires current to have mapping
+ privileges instead of the
+ destination domain. */
+
/* ` } */
/*
return xsm_default_action(action, d, t);
}
+/*
+ * Be aware that this is not an exact default equivalence of its flask
+ * variant which also checks if @d and @t "are allowed to share memory
+ * pages", for now, we don't have a proper default equivalence of such a
+ * check.
+ */
+static XSM_INLINE int xsm_map_gmfn_share(XSM_DEFAULT_ARG struct domain *d,
+ struct domain *t)
+{
+ XSM_ASSERT_ACTION(XSM_TARGET);
+ return xsm_default_action(action, current->domain, d) ?:
+ xsm_default_action(action, current->domain, t);
+}
+
static XSM_INLINE int xsm_hvm_param(XSM_DEFAULT_ARG struct domain *d, unsigned long op)
{
XSM_ASSERT_ACTION(XSM_TARGET);
int (*add_to_physmap) (struct domain *d1, struct domain *d2);
int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
int (*map_gmfn_foreign) (struct domain *d, struct domain *t);
+ int (*map_gmfn_share) (struct domain *d, struct domain *t);
int (*claim_pages) (struct domain *d);
int (*console_io) (struct domain *d, int cmd);
return xsm_ops->map_gmfn_foreign(d, t);
}
+static inline int xsm_map_gmfn_share (xsm_default_t def, struct domain *d, struct domain *t)
+{
+ return xsm_ops->map_gmfn_share(d, t);
+}
+
static inline int xsm_claim_pages(xsm_default_t def, struct domain *d)
{
return xsm_ops->claim_pages(d);
set_to_dummy_if_null(ops, add_to_physmap);
set_to_dummy_if_null(ops, remove_from_physmap);
set_to_dummy_if_null(ops, map_gmfn_foreign);
+ set_to_dummy_if_null(ops, map_gmfn_share);
set_to_dummy_if_null(ops, vm_event_control);
return domain_has_perm(d, t, SECCLASS_MMU, MMU__MAP_READ | MMU__MAP_WRITE);
}
+static int flask_map_gmfn_share(struct domain *d, struct domain *t)
+{
+ return current_has_perm(t, SECCLASS_MMU, MMU__MAP_READ | MMU__MAP_WRITE) ?:
+ domain_has_perm(d, t, SECCLASS_MMU, MMU__SHARE_MEM);
+}
+
static int flask_hvm_param(struct domain *d, unsigned long op)
{
u32 perm;
.add_to_physmap = flask_add_to_physmap,
.remove_from_physmap = flask_remove_from_physmap,
.map_gmfn_foreign = flask_map_gmfn_foreign,
+ .map_gmfn_share = flask_map_gmfn_share,
#if defined(CONFIG_HAS_PASSTHROUGH) && defined(CONFIG_HAS_PCI)
.get_device_group = flask_get_device_group,
# Allow a privileged domain to install a map of a page it does not own. Used
# for stub domain device models with the PV framebuffer.
target_hack
+# Checked when using XENMEM_add_to_physmap with XENMAPSPACE_gmfn_share
+# to share memory between two domains:
+# source = domain whose memory is being shared
+# target = client domain
+ share_mem
}
# control of the paging_domctl split by subop