# containing at most one domain. This is not enforced by policy.
define(`declare_singleton_domain', `
type $1, domain_type`'ifelse(`$#', `1', `', `,shift($@)');
+ define(`$1_self', `$1')
type $1_channel, event_type;
type_transition $1 domain_type:event $1_channel;
declare_domain_common($1, $1)
# use_device(domain, device)
# Allow a device to be used by a domain
define(`use_device', `
+ allow $1 $1_self:mmu exchange;
allow $1 $2:resource use;
allow $1 domio_t:mmu { map_read map_write };
')
out_chunk_order = exch.in.extent_order - exch.out.extent_order;
}
- rc = rcu_lock_target_domain_by_id(exch.in.domid, &d);
+ d = rcu_lock_domain_by_any_id(exch.in.domid);
+ if ( d == NULL )
+ {
+ rc = -ESRCH;
+ goto fail_early;
+ }
+
+ rc = xsm_memory_exchange(d);
if ( rc )
+ {
+ rcu_unlock_domain(d);
goto fail_early;
+ }
memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
d,
return 0;
}
+static XSM_INLINE int xsm_memory_exchange(struct domain *d)
+{
+ if ( d != current->domain && !IS_PRIV_FOR(current->domain, d) )
+ return -EPERM;
+ return 0;
+}
+
static XSM_INLINE int xsm_memory_adjust_reservation(struct domain *d1,
struct domain *d2)
{
int (*get_pod_target) (struct domain *d);
int (*set_pod_target) (struct domain *d);
+ int (*memory_exchange) (struct domain *d);
int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page);
return xsm_ops->set_pod_target(d);
}
+static inline int xsm_memory_exchange (struct domain *d)
+{
+ return xsm_ops->memory_exchange(d);
+}
+
static inline int xsm_memory_adjust_reservation (struct domain *d1, struct
domain *d2)
{
set_to_dummy_if_null(ops, get_pod_target);
set_to_dummy_if_null(ops, set_pod_target);
+ set_to_dummy_if_null(ops, memory_exchange);
set_to_dummy_if_null(ops, memory_adjust_reservation);
set_to_dummy_if_null(ops, memory_stat_reservation);
set_to_dummy_if_null(ops, memory_pin_page);
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SETPODTARGET);
}
+static int flask_memory_exchange(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_MMU, MMU__EXCHANGE);
+}
+
static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2)
{
return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST);
.get_pod_target = flask_get_pod_target,
.set_pod_target = flask_set_pod_target,
+ .memory_exchange = flask_memory_exchange,
.memory_adjust_reservation = flask_memory_adjust_reservation,
.memory_stat_reservation = flask_memory_stat_reservation,
.memory_pin_page = flask_memory_pin_page,
memorymap
remote_remap
mmuext_op
+ exchange
}
class shadow