return r;
}
-static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type)
+static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access)
{
+ /* First apply type permissions */
switch(type)
{
case p2m_invalid:
case p2m_ram_paging_in_start:
default:
entry->r = entry->w = entry->x = 0;
- return;
+ break;
case p2m_ram_rw:
entry->r = entry->w = entry->x = 1;
- return;
+ break;
case p2m_mmio_direct:
entry->r = entry->x = 1;
entry->w = !rangeset_contains_singleton(mmio_ro_ranges,
entry->mfn);
- return;
+ break;
case p2m_ram_logdirty:
case p2m_ram_ro:
case p2m_ram_shared:
entry->r = entry->x = 1;
entry->w = 0;
- return;
+ break;
case p2m_grant_map_rw:
entry->r = entry->w = 1;
entry->x = 0;
- return;
+ break;
case p2m_grant_map_ro:
entry->r = 1;
entry->w = entry->x = 0;
- return;
+ break;
+ }
+
+
+ /* Then restrict with access permissions */
+ switch (access)
+ {
+ case p2m_access_n:
+ entry->r = entry->w = entry->x = 0;
+ break;
+ case p2m_access_r:
+ entry->w = entry->x = 0;
+ break;
+ case p2m_access_w:
+ entry->r = entry->x = 0;
+ break;
+ case p2m_access_x:
+ entry->r = entry->w = 0;
+ break;
+ case p2m_access_rx:
+ case p2m_access_rx2rw:
+ entry->w = 0;
+ break;
+ case p2m_access_wx:
+ entry->r = 0;
+ break;
+ case p2m_access_rw:
+ entry->x = 0;
+ break;
+ case p2m_access_rwx:
+ break;
}
+
}
#define GUEST_TABLE_MAP_FAILED 0
ept_entry->epte = 0;
ept_entry->mfn = page_to_mfn(pg);
+ ept_entry->access = p2m->default_access;
+
ept_entry->r = ept_entry->w = ept_entry->x = 1;
return 1;
epte->emt = ept_entry->emt;
epte->ipat = ept_entry->ipat;
epte->sp = (level > 1) ? 1 : 0;
+ epte->access = ept_entry->access;
epte->sa_p2mt = ept_entry->sa_p2mt;
epte->mfn = ept_entry->mfn + i * trunk;
epte->rsvd2_snp = ( iommu_enabled && iommu_snoop ) ? 1 : 0;
- ept_p2m_type_to_flags(epte, epte->sa_p2mt);
+ ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access);
if ( (level - 1) == target )
continue;
*/
static int
ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int order, p2m_type_t p2mt)
+ unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
{
ept_entry_t *table, *ept_entry = NULL;
unsigned long gfn_remainder = gfn;
/* Construct the new entry, and then write it once */
new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
direct_mmio);
+
new_entry.ipat = ipat;
new_entry.sp = order ? 1 : 0;
new_entry.sa_p2mt = p2mt;
+ new_entry.access = p2ma;
new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);
if ( new_entry.mfn == mfn_x(mfn) )
else
new_entry.mfn = mfn_x(mfn);
- ept_p2m_type_to_flags(&new_entry, p2mt);
+ ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
}
atomic_write_ept_entry(ept_entry, new_entry);
new_entry.ipat = ipat;
new_entry.sp = i ? 1 : 0;
new_entry.sa_p2mt = p2mt;
+ new_entry.access = p2ma;
new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);
if ( new_entry.mfn == mfn_x(mfn) )
else /* the caller should take care of the previous page */
new_entry.mfn = mfn_x(mfn);
- ept_p2m_type_to_flags(&new_entry, p2mt);
+ ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
atomic_write_ept_entry(ept_entry, new_entry);
}
/* Read ept p2m entries */
static mfn_t ept_get_entry(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
p2m_query_t q)
{
struct domain *d = p2m->domain;
mfn_t mfn = _mfn(INVALID_MFN);
*t = p2m_mmio_dm;
+ *a = p2m_access_n;
/* This pfn is higher than the highest the p2m map currently holds */
if ( gfn > p2m->max_mapped_pfn )
if ( ept_entry->sa_p2mt != p2m_invalid )
{
*t = ept_entry->sa_p2mt;
+ *a = ept_entry->access;
+
mfn = _mfn(ept_entry->mfn);
if ( i )
{
}
static mfn_t ept_get_entry_current(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
p2m_query_t q)
{
- return ept_get_entry(p2m, gfn, t, q);
+ return ept_get_entry(p2m, gfn, t, a, q);
}
/*
order = level * EPT_TABLE_ORDER;
if ( need_modify_ept_entry(p2m, gfn, mfn,
e.ipat, e.emt, e.sa_p2mt) )
- ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+ ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
gfn += trunk;
break;
}
else /* gfn assigned with 4k */
{
if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.sa_p2mt) )
- ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+ ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
}
}
p2m_unlock(p2m);
continue;
e.sa_p2mt = nt;
- ept_p2m_type_to_flags(&e, nt);
+ ept_p2m_type_to_flags(&e, nt, e.access);
atomic_write_ept_entry(&epte[i], e);
}
}
*/
static
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt);
+ unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
static int
p2m_pod_cache_add(struct p2m_domain *p2m,
{
/* All PoD: Mark the whole region invalid and tell caller
* we're done. */
- set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
+ set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid, p2m->default_access);
p2m->pod.entry_count-=(1<<order); /* Lock: p2m */
BUG_ON(p2m->pod.entry_count < 0);
ret = 1;
mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
if ( t == p2m_populate_on_demand )
{
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+ set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
p2m->pod.entry_count--; /* Lock: p2m */
BUG_ON(p2m->pod.entry_count < 0);
pod--;
page = mfn_to_page(mfn);
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+ set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, 0);
/* Try to remove the page, restoring old mapping if it fails. */
set_p2m_entry(p2m, gfn,
_mfn(POPULATE_ON_DEMAND_MFN), 9,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
/* Make none of the MFNs are used elsewhere... for example, mapped
* via the grant table interface, or by qemu. Allow one refcount for
out_reset:
if ( reset )
- set_p2m_entry(p2m, gfn, mfn0, 9, type0);
+ set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
out:
return ret;
/* Try to remove the page, restoring old mapping if it fails. */
set_p2m_entry(p2m, gfns[i],
_mfn(POPULATE_ON_DEMAND_MFN), 0,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
* for being allocated to a domain.) */
unmap_domain_page(map[i]);
map[i] = NULL;
- set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+ set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
continue;
}
* check timing. */
if ( j < PAGE_SIZE/sizeof(*map[i]) )
{
- set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+ set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
}
else
{
* 512 2MB pages. The rest of 511 calls are unnecessary.
*/
set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
return 0;
gfn_aligned = (gfn >> order) << order;
- set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw);
+ set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, p2m->default_access);
for( i = 0; i < (1UL << order); i++ )
set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
struct {
// Returns 0 on error (out of memory)
static int
p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt)
+ unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
// XXX -- this might be able to be faster iff current->domain == d
mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
}
static mfn_t
-p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
+p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
p2m_query_t q)
{
mfn_t mfn;
* XXX Once we start explicitly registering MMIO regions in the p2m
* XXX we will return p2m_invalid for unmapped gfns */
*t = p2m_mmio_dm;
+ /* Not implemented except with EPT */
+ *a = p2m_access_rwx;
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
/* Read the current domain's p2m table (through the linear mapping). */
static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
p2m_query_t q)
{
mfn_t mfn = _mfn(INVALID_MFN);
* XXX Once we start explicitly registering MMIO regions in the p2m
* XXX we will return p2m_invalid for unmapped gfns */
+ /* Not currently implemented except for EPT */
+ *a = p2m_access_rwx;
+
if ( gfn <= p2m->max_mapped_pfn )
{
l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
INIT_PAGE_LIST_HEAD(&p2m->pod.single);
p2m->domain = d;
+ p2m->default_access = p2m_access_rwx;
+
p2m->set_entry = p2m_set_entry;
p2m->get_entry = p2m_gfn_to_mfn;
p2m->get_entry_current = p2m_gfn_to_mfn_current;
if ( p2m == NULL )
return -ENOMEM;
p2m_initialise(d, p2m);
-
+
return 0;
}
static
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt)
+ unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
struct domain *d = p2m->domain;
unsigned long todo = 1ul << page_order;
else
order = 0;
- if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) )
+ if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) )
rc = 0;
gfn += 1ul << order;
if ( mfn_x(mfn) != INVALID_MFN )
/* Initialise physmap tables for slot zero. Other code assumes this. */
if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0,
- p2m_invalid) )
+ p2m_invalid, p2m->default_access) )
goto error;
/* Copy all existing mappings from the page list and m2p */
(gfn != 0x55555555L)
#endif
&& gfn != INVALID_M2P_ENTRY
- && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) )
+ && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw, p2m->default_access) )
goto error_unlock;
}
spin_unlock(&p2m->domain->page_alloc_lock);
#ifdef __x86_64__
unsigned long gfn;
p2m_type_t t;
+ p2m_access_t a;
mfn_t mfn;
#endif
#ifdef __x86_64__
for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
{
- mfn = p2m->get_entry(p2m, gfn, &t, p2m_query);
+ mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
BUG_ON(mem_sharing_unshare_page(p2m, gfn, MEM_SHARING_DESTROY_GFN));
}
unsigned long i;
mfn_t mfn_return;
p2m_type_t t;
+ p2m_access_t a;
if ( !paging_mode_translate(p2m->domain) )
{
for ( i = 0; i < (1UL << page_order); i++ )
{
- mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
+ mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
if ( !p2m_is_grant(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
- set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
+ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access);
}
void
/* Now, actually do the two-way mapping */
if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
- p2m_populate_on_demand) )
+ p2m_populate_on_demand, p2m->default_access) )
rc = -EINVAL;
else
{
/* Now, actually do the two-way mapping */
if ( mfn_valid(_mfn(mfn)) )
{
- if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) )
+ if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) )
rc = -EINVAL;
if ( !p2m_is_grant(t) )
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn, mfn);
if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
- p2m_invalid) )
+ p2m_invalid, p2m->default_access) )
rc = -EINVAL;
else
{
}
/* Modify the p2m type of a single gfn from ot to nt, returning the
- * entry's previous type */
+ * entry's previous type. Resets the access permissions. */
p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t ot, p2m_type_t nt)
{
mfn = gfn_to_mfn_query(p2m, gfn, &pt);
if ( pt == ot )
- set_p2m_entry(p2m, gfn, mfn, 0, nt);
+ set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
p2m_unlock(p2m);
P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
p2m_lock(p2m);
- rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
if ( 0 == rc )
return 0;
}
p2m_lock(p2m);
- rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0);
+ rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
if ( need_lock )
p2m_lock(p2m);
- rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access);
if ( need_lock )
p2m_unlock(p2m);
if ( 0 == rc )
/* Fix p2m entry */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out);
+ set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
/* Remove mapping from p2m table */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
if ( p2mt == p2m_ram_paged )
{
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
+ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
}
/* Fix p2m mapping */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+ set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
/* Fix p2m entry */
mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
p2m_lock(p2m);
- set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
+ set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
p2m_ram_broken =14, /* Broken page, access cause domain crash */
} p2m_type_t;
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given my the p2m_type_t memory type. Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+ p2m_access_n = 0, /* No access permissions allowed */
+ p2m_access_r = 1,
+ p2m_access_w = 2,
+ p2m_access_rw = 3,
+ p2m_access_x = 4,
+ p2m_access_rx = 5,
+ p2m_access_wx = 6,
+ p2m_access_rwx = 7,
+ p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+
+ /* NOTE: Assumed to be only 4 bits right now */
+} p2m_access_t;
+
typedef enum {
p2m_query = 0, /* Do not populate a PoD entries */
p2m_alloc = 1, /* Automatically populate PoD entries */
int (*set_entry )(struct p2m_domain *p2m,
unsigned long gfn,
mfn_t mfn, unsigned int page_order,
- p2m_type_t p2mt);
+ p2m_type_t p2mt,
+ p2m_access_t p2ma);
mfn_t (*get_entry )(struct p2m_domain *p2m,
unsigned long gfn,
p2m_type_t *p2mt,
+ p2m_access_t *p2ma,
p2m_query_t q);
mfn_t (*get_entry_current)(struct p2m_domain *p2m,
unsigned long gfn,
p2m_type_t *p2mt,
+ p2m_access_t *p2ma,
p2m_query_t q);
void (*change_entry_type_global)(struct p2m_domain *p2m,
p2m_type_t ot,
p2m_type_t nt);
+
+ /* Default P2M access type for each page in the the domain: new pages,
+ * swapped in pages, cleared pages, and pages that are ambiquously
+ * retyped get this access type. See definition of p2m_access_t. */
+ p2m_access_t default_access;
+
+ /* If true, and an access fault comes in and there is no mem_event listener,
+ * pause domain. Otherwise, remove access restrictions. */
+ bool_t access_required;
/* Highest guest frame that's ever been mapped in the p2m */
unsigned long max_mapped_pfn;
/* Read the current domain's p2m table. Do not populate PoD pages. */
static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *t,
+ p2m_access_t *a,
p2m_query_t q)
{
- return p2m->get_entry_current(p2m, gfn, t, q);
+ return p2m->get_entry_current(p2m, gfn, t, a, q);
}
/* Read P2M table, mapping pages as we go.
gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t *t, p2m_query_t q)
{
- return p2m->get_entry(p2m, gfn, t, q);
+ p2m_access_t a = 0;
+ return p2m->get_entry(p2m, gfn, t, &a, q);
}
p2m_query_t q)
{
mfn_t mfn;
+ p2m_access_t a;
if ( !p2m || !paging_mode_translate(p2m->domain) )
{
mfn = _mfn(gfn);
}
else if ( likely(current->domain == p2m->domain) )
- mfn = gfn_to_mfn_type_current(p2m, gfn, t, q);
+ mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q);
else
mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);