* All PoD: Mark the whole region invalid and tell caller
* we're done.
*/
- p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN, order, p2m_invalid,
+ p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
p2m->default_access);
p2m->pod.entry_count -= 1UL << order;
BUG_ON(p2m->pod.entry_count < 0);
n = 1UL << cur_order;
if ( t == p2m_populate_on_demand )
{
- p2m_set_entry(p2m, gfn_x(gfn) + i, INVALID_MFN, cur_order,
+ p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
p2m_invalid, p2m->default_access);
p2m->pod.entry_count -= n;
BUG_ON(p2m->pod.entry_count < 0);
page = mfn_to_page(mfn);
- p2m_set_entry(p2m, gfn_x(gfn) + i, INVALID_MFN, cur_order,
+ p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
p2m_invalid, p2m->default_access);
p2m_tlb_flush_sync(p2m);
for ( j = 0; j < n; ++j )
* in the p2m.
*/
static int
-p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
+p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn_l)
{
mfn_t mfn, mfn0 = INVALID_MFN;
+ gfn_t gfn = _gfn(gfn_l);
p2m_type_t type, type0 = 0;
unsigned long * map = NULL;
int ret=0, reset = 0;
ASSERT(pod_locked_by_me(p2m));
- if ( !superpage_aligned(gfn) )
+ if ( !superpage_aligned(gfn_l) )
goto out;
/* Allow an extra refcount for one shadow pt mapping in shadowed domains */
unsigned long k;
const struct page_info *page;
- mfn = p2m->get_entry(p2m, _gfn(gfn + i), &type, &a, 0,
+ mfn = p2m->get_entry(p2m, gfn_add(gfn, i), &type, &a, 0,
&cur_order, NULL);
/*
int d:16,order:16;
} t;
- t.gfn = gfn;
+ t.gfn = gfn_l;
t.mfn = mfn_x(mfn);
t.d = d->domain_id;
t.order = 9;
}
/* Try to remove the page, restoring old mapping if it fails. */
- p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
+ p2m_set_entry(p2m, _gfn(gfns[i]), INVALID_MFN, PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
/*
unmap_domain_page(map[i]);
map[i] = NULL;
- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ p2m_set_entry(p2m, _gfn(gfns[i]), mfns[i], PAGE_ORDER_4K,
types[i], p2m->default_access);
continue;
*/
if ( j < (PAGE_SIZE / sizeof(*map[i])) )
{
- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ p2m_set_entry(p2m, _gfn(gfns[i]), mfns[i], PAGE_ORDER_4K,
types[i], p2m->default_access);
}
else
{
struct domain *d = p2m->domain;
struct page_info *p = NULL; /* Compiler warnings */
- unsigned long gfn_aligned = (gfn >> order) << order;
+ gfn_t gfn_aligned = _gfn((gfn >> order) << order);
mfn_t mfn;
unsigned long i;
for( i = 0; i < (1UL << order); i++ )
{
- set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
+ set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn_aligned) + i);
paging_mark_dirty(d, mfn_add(mfn, i));
}
p2m->pod.entry_count -= (1UL << order);
BUG_ON(p2m->pod.entry_count < 0);
- pod_eager_record(p2m, gfn_aligned, order);
+ pod_eager_record(p2m, gfn_x(gfn_aligned), order);
if ( tb_init_done )
{
* need promoting the gfn lock from gfn->2M superpage.
*/
for ( i = 0; i < (1UL << order); i++ )
- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfn_add(gfn_aligned, i), INVALID_MFN, PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
int
-guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn_l,
unsigned int order)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ gfn_t gfn = _gfn(gfn_l);
unsigned long i, n, pod_count = 0;
int rc = 0;
gfn_lock(p2m, gfn, order);
- P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
+ P2M_DEBUG("mark pod gfn=%#lx\n", gfn_l);
/* Make sure all gpfns are unused */
for ( i = 0; i < (1UL << order); i += n )
p2m_access_t a;
unsigned int cur_order;
- p2m->get_entry(p2m, _gfn(gfn + i), &ot, &a, 0, &cur_order, NULL);
+ p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, &cur_order, NULL);
n = 1UL << min(order, cur_order);
if ( p2m_is_ram(ot) )
{
}
/* Returns: 0 for success, -errno for failure */
-int p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
+int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
struct domain *d = p2m->domain;
{
if ( hap_enabled(d) )
{
- unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ?
- (gfn | mfn_x(mfn) | todo) : (gfn | todo);
+ unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? mfn_x(mfn) : 0;
+
+ fn_mask |= gfn_x(gfn) | todo;
order = (!(fn_mask & ((1ul << PAGE_ORDER_1G) - 1)) &&
hap_has_1gb) ? PAGE_ORDER_1G :
else
order = 0;
- set_rc = p2m->set_entry(p2m, _gfn(gfn), mfn, order, p2mt, p2ma, -1);
+ set_rc = p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma, -1);
if ( set_rc )
rc = set_rc;
- gfn += 1ul << order;
+ gfn = gfn_add(gfn, 1ul << order);
if ( !mfn_eq(mfn, INVALID_MFN) )
mfn = mfn_add(mfn, 1ul << order);
todo -= 1ul << order;
/* Initialise physmap tables for slot zero. Other code assumes this. */
p2m->defer_nested_flush = 1;
- rc = p2m_set_entry(p2m, 0, INVALID_MFN, PAGE_ORDER_4K,
+ rc = p2m_set_entry(p2m, _gfn(0), INVALID_MFN, PAGE_ORDER_4K,
p2m_invalid, p2m->default_access);
p2m->defer_nested_flush = 0;
p2m_unlock(p2m);
static int
-p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
+p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
unsigned int page_order)
{
unsigned long i;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn_return;
p2m_type_t t;
p2m_access_t a;
}
ASSERT(gfn_locked_by_me(p2m, gfn));
- P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
+ P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
if ( mfn_valid(_mfn(mfn)) )
{
for ( i = 0; i < (1UL << page_order); i++ )
{
- mfn_return = p2m->get_entry(p2m, _gfn(gfn + i), &t, &a, 0,
+ mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
NULL, NULL);
if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
/* Now, actually do the two-way mapping */
if ( mfn_valid(mfn) )
{
- rc = p2m_set_entry(p2m, gfn_x(gfn), mfn, page_order, t,
+ rc = p2m_set_entry(p2m, gfn, mfn, page_order, t,
p2m->default_access);
if ( rc )
goto out; /* Failed to update p2m, bail without updating m2p. */
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn_x(gfn), mfn_x(mfn));
- rc = p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN, page_order,
+ rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
p2m_invalid, p2m->default_access);
if ( rc == 0 )
{
* Returns: 0 for success, -errno for failure.
* Resets the access permissions.
*/
-int p2m_change_type_one(struct domain *d, unsigned long gfn,
+int p2m_change_type_one(struct domain *d, unsigned long gfn_l,
p2m_type_t ot, p2m_type_t nt)
{
p2m_access_t a;
p2m_type_t pt;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &pt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL, NULL);
rc = likely(pt == ot)
? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt,
p2m->default_access)
}
P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
- rc = p2m_set_entry(p2m, gfn_l, mfn, order, gfn_p2mt, access);
+ rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access);
if ( rc )
gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
gfn_l, order, rc, mfn_x(mfn));
return set_typed_p2m_entry(d, gfn, mfn, order, p2m_mmio_direct, access);
}
-int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
+int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
p2m_access_t p2ma, unsigned int flag)
{
p2m_type_t p2mt;
p2m_access_t a;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret;
{
if ( !need_iommu(d) )
return 0;
- return iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
+ return iommu_map_page(d, gfn_l, gfn_l, IOMMUF_readable|IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
- ret = p2m_set_entry(p2m, gfn, _mfn(gfn), PAGE_ORDER_4K,
+ ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
p2m_mmio_direct, p2ma);
- else if ( mfn_x(mfn) == gfn && p2mt == p2m_mmio_direct && a == p2ma )
+ else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
ret = 0;
else
{
printk(XENLOG_G_WARNING
"Cannot setup identity map d%d:%lx,"
" gfn already mapped to %lx.\n",
- d->domain_id, gfn, mfn_x(mfn));
+ d->domain_id, gfn_l, mfn_x(mfn));
}
gfn_unlock(p2m, gfn, 0);
* order+1 for caller to retry with order (guaranteed smaller than
* the order value passed in)
*/
-int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
+int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn,
unsigned int order)
{
int rc = -EINVAL;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t actual_mfn;
p2m_access_t a;
p2m_type_t t;
return -EIO;
gfn_lock(p2m, gfn, order);
- actual_mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, &cur_order, NULL);
+ actual_mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, &cur_order, NULL);
if ( cur_order < order )
{
rc = cur_order + 1;
if ( mfn_eq(actual_mfn, INVALID_MFN) || (t != p2m_mmio_direct) )
{
gdprintk(XENLOG_ERR,
- "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn, t);
+ "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn_l, t);
goto out;
}
if ( mfn_x(mfn) != mfn_x(actual_mfn) )
gdprintk(XENLOG_WARNING,
"no mapping between mfn %08lx and gfn %08lx\n",
- mfn_x(mfn), gfn);
+ mfn_x(mfn), gfn_l);
rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
p2m->default_access);
return rc;
}
-int clear_identity_p2m_entry(struct domain *d, unsigned long gfn)
+int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
{
p2m_type_t p2mt;
p2m_access_t a;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret;
{
if ( !need_iommu(d) )
return 0;
- return iommu_unmap_page(d, gfn);
+ return iommu_unmap_page(d, gfn_l);
}
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
- if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn )
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
+ if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn_l )
{
ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
p2m_invalid, p2m->default_access);
gfn_unlock(p2m, gfn, 0);
printk(XENLOG_G_WARNING
"non-identity map d%d:%lx not cleared (mapped to %lx)\n",
- d->domain_id, gfn, mfn_x(mfn));
+ d->domain_id, gfn_l, mfn_x(mfn));
ret = 0;
}
}
/* Returns: 0 for success, -errno for failure */
-int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc = 0;
+ gfn_t gfn = _gfn(gfn_l);
p2m_access_t a;
p2m_type_t ot;
mfn_t omfn;
return -EIO;
gfn_lock(p2m, gfn, 0);
- omfn = p2m->get_entry(p2m, _gfn(gfn), &ot, &a, 0, NULL, NULL);
+ omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL, NULL);
/* At the moment we only allow p2m change if gfn has already been made
* sharable first */
ASSERT(p2m_is_shared(ot));
|| (pg_type & PGT_type_mask) != PGT_shared_page )
set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
- P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
+ P2M_DEBUG("set shared %lx %lx\n", gfn_l, mfn_x(mfn));
rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared,
p2m->default_access);
gfn_unlock(p2m, gfn, 0);
if ( rc )
gdprintk(XENLOG_ERR,
"p2m_set_entry failed! mfn=%08lx rc:%d\n",
- mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)), rc);
+ mfn_x(get_gfn_query_unlocked(p2m->domain, gfn_l, &ot)), rc);
return rc;
}
* Once the p2mt is changed the page is readonly for the guest. On success the
* pager can write the page contents to disk and later evict the page.
*/
-int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn_l)
{
struct page_info *page;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_type_t p2mt;
p2m_access_t a;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
int ret = -EBUSY;
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
/* Check if mfn is valid */
if ( !mfn_valid(mfn) )
* could evict it, eviction can not be done either. In this case the gfn is
* still backed by a mfn.
*/
-int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn_l)
{
struct page_info *page;
p2m_type_t p2mt;
p2m_access_t a;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret = -EBUSY;
gfn_lock(p2m, gfn, 0);
/* Get mfn */
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
if ( unlikely(!mfn_valid(mfn)) )
goto out;
* already sent to the pager. In this case the caller has to try again until the
* gfn is fully paged in again.
*/
-void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn_l)
{
struct vcpu *v = current;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_MEM_PAGING,
- .u.mem_paging.gfn = gfn
+ .u.mem_paging.gfn = gfn_l
};
p2m_type_t p2mt;
p2m_access_t a;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
if ( rc == -ENOSYS )
{
gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
- "in place\n", d->domain_id, gfn);
+ "in place\n", d->domain_id, gfn_l);
/* Prevent the vcpu from faulting repeatedly on the same gfn */
if ( v->domain == d )
vcpu_pause_nosync(v);
/* Fix p2m mapping */
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
/* Allow only nominated or evicted pages to enter page-in path */
if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
{
* mfn if populate was called for gfn which was nominated but not evicted. In
* this case only the p2mt needs to be forwarded.
*/
-int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer)
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn_l, uint64_t buffer)
{
struct page_info *page;
p2m_type_t p2mt;
p2m_access_t a;
+ gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret, page_extant = 1;
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
ret = -ENOENT;
/* Allow missing pages */
if ( rc )
{
gdprintk(XENLOG_ERR, "Failed to load paging-in gfn %lx domain %u "
- "bytes left %d\n", gfn, d->domain_id, rc);
+ "bytes left %d\n", gfn_l, d->domain_id, rc);
ret = -EFAULT;
put_page(page); /* Don't leak pages */
goto out;
ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
paging_mode_log_dirty(d) ? p2m_ram_logdirty
: p2m_ram_rw, a);
- set_gpfn_from_mfn(mfn_x(mfn), gfn);
+ set_gpfn_from_mfn(mfn_x(mfn), gfn_l);
if ( !page_extant )
atomic_dec(&d->paged_pages);
/* Fix p2m entry if the page was not dropped */
if ( !(rsp->u.mem_paging.flags & MEM_PAGING_DROP_PAGE) )
{
- unsigned long gfn = rsp->u.mem_access.gfn;
+ gfn_t gfn = _gfn(rsp->u.mem_access.gfn);
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
/*
* Allow only pages which were prepared properly, or pages which
* were nominated but not evicted.
p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
paging_mode_log_dirty(d) ? p2m_ram_logdirty :
p2m_ram_rw, a);
- set_gpfn_from_mfn(mfn_x(mfn), gfn);
+ set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
}
gfn_unlock(p2m, gfn, 0);
}
*/
mask = ~((1UL << page_order) - 1);
mfn = _mfn(mfn_x(mfn) & mask);
+ gfn = _gfn(gfn_x(gfn) & mask);
- rv = p2m_set_entry(*ap2m, gfn_x(gfn) & mask, mfn, page_order, p2mt, p2ma);
+ rv = p2m_set_entry(*ap2m, gfn, mfn, page_order, p2mt, p2ma);
p2m_unlock(*ap2m);
if ( rv )
}
}
else if ( !mfn_eq(m, INVALID_MFN) )
- p2m_set_entry(p2m, gfn_x(gfn), mfn, page_order, p2mt, p2ma);
+ p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
__put_gfn(p2m, gfn_x(gfn));
}