################################################################################
define(`declare_domain_common', `
allow $1 $2:grant { query setup };
- allow $1 $2:mmu { adjust physmap map_read map_write stat pinpage updatemp };
+ allow $1 $2:mmu { adjust physmap map_read map_write stat pinpage updatemp mmuext_op };
allow $1 $2:hvm { getparam setparam };
')
allow $1 $2:domain2 { set_cpuid settsc };
allow $1 $2:security check_context;
allow $1 $2:shadow enable;
- allow $1 $2:mmu {map_read map_write adjust memorymap physmap pinpage};
+ allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op };
allow $1 $2:grant setup;
allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc setparam pcilevel trackdirtyvram };
')
# Allow a device to be used by a domain
define(`use_device', `
allow $1 $2:resource use;
- allow $1 $2:mmu { map_read map_write };
+ allow $1 domio_t:mmu { map_read map_write };
')
# admin_device(domain, device)
# Domain 0
declare_singleton_domain(dom0_t, mls_priv);
-# Untracked I/O memory (pseudo-domain)
+# I/O memory (DOMID_IO pseudo-domain)
type domio_t, xen_type;
-# Xen heap (pseudo-domain)
+# Xen heap (DOMID_XEN pseudo-domain)
type domxen_t, xen_type;
# Unlabeled objects
admin_device(dom0_t, irq_t)
admin_device(dom0_t, ioport_t)
admin_device(dom0_t, iomem_t)
-allow dom0_t domio_t:mmu { map_read map_write };
domain_comms(dom0_t, dom0_t)
pg_owner = rcu_lock_domain(dom_io);
break;
case DOMID_XEN:
- if ( !IS_PRIV(curr) )
- {
- MEM_LOG("Cannot set foreign dom");
- break;
- }
pg_owner = rcu_lock_domain(dom_xen);
break;
default:
MEM_LOG("Unknown domain '%u'", domid);
break;
}
- if ( !IS_PRIV_FOR(curr, pg_owner) )
- {
- MEM_LOG("Cannot set foreign dom");
- rcu_unlock_domain(pg_owner);
- pg_owner = NULL;
- }
break;
}
goto out;
}
+ rc = xsm_mmuext_op(d, pg_owner);
+ if ( rc )
+ {
+ rcu_unlock_domain(pg_owner);
+ goto out;
+ }
+
for ( i = 0; i < count; i++ )
{
if ( hypercall_preempt_check() )
struct vcpu *v = current;
struct domain *d = v->domain, *pt_owner = d, *pg_owner;
struct domain_mmap_cache mapcache;
+ uint32_t xsm_needed = 0;
+ uint32_t xsm_checked = 0;
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
{
rc = -EINVAL;
goto out;
}
- if ( !IS_PRIV_FOR(d, pt_owner) )
- {
- rc = -ESRCH;
- goto out;
- }
}
if ( (pg_owner = get_pg_owner((uint16_t)foreigndom)) == NULL )
{
p2m_type_t p2mt;
- rc = xsm_mmu_normal_update(d, pt_owner, pg_owner, req.val);
- if ( rc )
- break;
+ xsm_needed |= XSM_MMU_NORMAL_UPDATE;
+ if ( get_pte_flags(req.val) & _PAGE_PRESENT )
+ {
+ xsm_needed |= XSM_MMU_UPDATE_READ;
+ if ( get_pte_flags(req.val) & _PAGE_RW )
+ xsm_needed |= XSM_MMU_UPDATE_WRITE;
+ }
+ if ( xsm_needed != xsm_checked )
+ {
+ rc = xsm_mmu_update(d, pt_owner, pg_owner, xsm_needed);
+ if ( rc )
+ break;
+ xsm_checked = xsm_needed;
+ }
rc = -EINVAL;
req.ptr -= cmd;
mfn = req.ptr >> PAGE_SHIFT;
gpfn = req.val;
- rc = xsm_mmu_machphys_update(d, pg_owner, mfn);
- if ( rc )
- break;
+ xsm_needed |= XSM_MMU_MACHPHYS_UPDATE;
+ if ( xsm_needed != xsm_checked )
+ {
+ rc = xsm_mmu_update(d, NULL, pg_owner, xsm_needed);
+ if ( rc )
+ break;
+ xsm_checked = xsm_needed;
+ }
if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) )
{
return 0;
}
-static XSM_INLINE int xsm_mmu_normal_update(struct domain *d, struct domain *t,
- struct domain *f, intpte_t fpte)
+static XSM_INLINE int xsm_mmu_update(struct domain *d, struct domain *t,
+ struct domain *f, uint32_t flags)
{
+ if ( t && d != t && !IS_PRIV_FOR(d, t) )
+ return -EPERM;
+ if ( d != f && !IS_PRIV_FOR(d, f) )
+ return -EPERM;
return 0;
}
-static XSM_INLINE int xsm_mmu_machphys_update(struct domain *d, struct domain *f,
- unsigned long mfn)
+static XSM_INLINE int xsm_mmuext_op(struct domain *d, struct domain *f)
{
+ if ( d != f && !IS_PRIV_FOR(d, f) )
+ return -EPERM;
return 0;
}
static XSM_INLINE int xsm_update_va_mapping(struct domain *d, struct domain *f,
l1_pgentry_t pte)
{
+ if ( d != f && !IS_PRIV_FOR(d, f) )
+ return -EPERM;
return 0;
}
#define XSM_MAGIC 0x00000000
#endif
-#ifdef XSM_ENABLE
-
extern char *policy_buffer;
extern u32 policy_size;
int (*getidletime) (void);
int (*machine_memory_map) (void);
int (*domain_memory_map) (struct domain *d);
- int (*mmu_normal_update) (struct domain *d, struct domain *t,
- struct domain *f, intpte_t fpte);
- int (*mmu_machphys_update) (struct domain *d1, struct domain *d2, unsigned long mfn);
+#define XSM_MMU_UPDATE_READ 1
+#define XSM_MMU_UPDATE_WRITE 2
+#define XSM_MMU_NORMAL_UPDATE 4
+#define XSM_MMU_MACHPHYS_UPDATE 8
+ int (*mmu_update) (struct domain *d, struct domain *t,
+ struct domain *f, uint32_t flags);
+ int (*mmuext_op) (struct domain *d, struct domain *f);
int (*update_va_mapping) (struct domain *d, struct domain *f, l1_pgentry_t pte);
int (*add_to_physmap) (struct domain *d1, struct domain *d2);
int (*sendtrigger) (struct domain *d);
#endif
};
+#ifdef XSM_ENABLE
+
extern struct xsm_operations *xsm_ops;
#ifndef XSM_NO_WRAPPERS
return xsm_ops->domain_memory_map(d);
}
-static inline int xsm_mmu_normal_update (struct domain *d, struct domain *t,
- struct domain *f, intpte_t fpte)
+static inline int xsm_mmu_update (struct domain *d, struct domain *t,
+ struct domain *f, uint32_t flags)
{
- return xsm_ops->mmu_normal_update(d, t, f, fpte);
+ return xsm_ops->mmu_update(d, t, f, flags);
}
-static inline int xsm_mmu_machphys_update (struct domain *d1, struct domain *d2,
- unsigned long mfn)
+static inline int xsm_mmuext_op (struct domain *d, struct domain *f)
{
- return xsm_ops->mmu_machphys_update(d1, d2, mfn);
+ return xsm_ops->mmuext_op(d, f);
}
static inline int xsm_update_va_mapping(struct domain *d, struct domain *f,
set_to_dummy_if_null(ops, getidletime);
set_to_dummy_if_null(ops, machine_memory_map);
set_to_dummy_if_null(ops, domain_memory_map);
- set_to_dummy_if_null(ops, mmu_normal_update);
- set_to_dummy_if_null(ops, mmu_machphys_update);
+ set_to_dummy_if_null(ops, mmu_update);
+ set_to_dummy_if_null(ops, mmuext_op);
set_to_dummy_if_null(ops, update_va_mapping);
set_to_dummy_if_null(ops, add_to_physmap);
set_to_dummy_if_null(ops, remove_from_physmap);
return current_has_perm(d, SECCLASS_MMU, MMU__MEMORYMAP);
}
-static int domain_memory_perm(struct domain *d, struct domain *f, l1_pgentry_t pte)
+static int flask_mmu_update(struct domain *d, struct domain *t,
+ struct domain *f, uint32_t flags)
{
int rc = 0;
- u32 map_perms = MMU__MAP_READ;
- unsigned long fgfn, fmfn;
- p2m_type_t p2mt;
-
- if ( !(l1e_get_flags(pte) & _PAGE_PRESENT) )
- return 0;
-
- if ( l1e_get_flags(pte) & _PAGE_RW )
- map_perms |= MMU__MAP_WRITE;
-
- fgfn = l1e_get_pfn(pte);
- fmfn = mfn_x(get_gfn_query(f, fgfn, &p2mt));
- put_gfn(f, fgfn);
+ u32 map_perms = 0;
- if ( f->domain_id == DOMID_IO || !mfn_valid(fmfn) )
- {
- struct avc_audit_data ad;
- u32 dsid, fsid;
- rc = security_iomem_sid(fmfn, &fsid);
- if ( rc )
- return rc;
- AVC_AUDIT_DATA_INIT(&ad, MEMORY);
- ad.sdom = d;
- ad.tdom = f;
- ad.memory.pte = pte.l1;
- ad.memory.mfn = fmfn;
- dsid = domain_sid(d);
- return avc_has_perm(dsid, fsid, SECCLASS_MMU, map_perms, &ad);
- }
-
- return domain_has_perm(d, f, SECCLASS_MMU, map_perms);
-}
-
-static int flask_mmu_normal_update(struct domain *d, struct domain *t,
- struct domain *f, intpte_t fpte)
-{
- int rc = 0;
-
- if (d != t)
+ if ( t && d != t )
rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__REMOTE_REMAP);
if ( rc )
return rc;
- return domain_memory_perm(d, f, l1e_from_intpte(fpte));
+ if ( flags & XSM_MMU_UPDATE_READ )
+ map_perms |= MMU__MAP_READ;
+ if ( flags & XSM_MMU_UPDATE_WRITE )
+ map_perms |= MMU__MAP_WRITE;
+ if ( flags & XSM_MMU_MACHPHYS_UPDATE )
+ map_perms |= MMU__UPDATEMP;
+
+ if ( map_perms )
+ rc = domain_has_perm(d, f, SECCLASS_MMU, map_perms);
+ return rc;
}
-static int flask_mmu_machphys_update(struct domain *d1, struct domain *d2,
- unsigned long mfn)
+static int flask_mmuext_op(struct domain *d, struct domain *f)
{
- return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__UPDATEMP);
+ return domain_has_perm(d, f, SECCLASS_MMU, MMU__MMUEXT_OP);
}
static int flask_update_va_mapping(struct domain *d, struct domain *f,
l1_pgentry_t pte)
{
- return domain_memory_perm(d, f, pte);
+ u32 map_perms = MMU__MAP_READ;
+ if ( !(l1e_get_flags(pte) & _PAGE_PRESENT) )
+ return 0;
+ if ( l1e_get_flags(pte) & _PAGE_RW )
+ map_perms |= MMU__MAP_WRITE;
+
+ return domain_has_perm(d, f, SECCLASS_MMU, map_perms);
}
static int flask_add_to_physmap(struct domain *d1, struct domain *d2)
.getidletime = flask_getidletime,
.machine_memory_map = flask_machine_memory_map,
.domain_memory_map = flask_domain_memory_map,
- .mmu_normal_update = flask_mmu_normal_update,
- .mmu_machphys_update = flask_mmu_machphys_update,
+ .mmu_update = flask_mmu_update,
+ .mmuext_op = flask_mmuext_op,
.update_va_mapping = flask_update_va_mapping,
.add_to_physmap = flask_add_to_physmap,
.remove_from_physmap = flask_remove_from_physmap,
mfnlist
memorymap
remote_remap
+ mmuext_op
}
class shadow