/* Figure out if we need to steal some freed memory for our cache */
steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count );
- p2m_lock(p2m);
+ gfn_lock(p2m, gpfn, order);
if ( unlikely(d->is_dying) )
goto out_unlock;
}
out_unlock:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gpfn, order);
out:
return ret;
mfn_t mfn;
int i;
- ASSERT(p2m_locked_by_me(p2m));
+ ASSERT(gfn_locked_by_me(p2m, gfn));
/* This check is done with the p2m lock held. This will make sure that
* even if d->is_dying changes under our feet, p2m_pod_empty_cache()
if ( unlikely(d->is_dying) )
goto out_fail;
+
/* Because PoD does not have cache list for 1GB pages, it has to remap
* 1GB region to 2MB chunks for a retry. */
if ( order == PAGE_ORDER_1G )
* split 1GB into 512 2MB pages here. But We only do once here because
* set_p2m_entry() should automatically shatter the 1GB page into
* 512 2MB pages. The rest of 511 calls are unnecessary.
+ *
+ * NOTE: In a fine-grained p2m locking scenario this operation
+ * may need to promote its locking from gfn->1g superpage
*/
set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
if ( rc != 0 )
return rc;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, order);
P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
BUG_ON(p2m->pod.entry_count < 0);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, order);
out:
return rc;
}
-/* Non-ept "lock-and-check" wrapper */
-static int p2m_pod_check_and_populate(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p2m_entry, int order,
- p2m_query_t q)
-{
- int r;
-
- /* This is called from the p2m lookups, which can happen with or
- * without the lock hed. */
- p2m_lock_recursive(p2m);
-
- /* Check to make sure this is still PoD */
- if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) != p2m_populate_on_demand )
- {
- p2m_unlock(p2m);
- return 0;
- }
-
- r = p2m_pod_demand_populate(p2m, gfn, order, q);
-
- p2m_unlock(p2m);
-
- return r;
-}
-
/* Read the current domain's p2m table (through the linear mapping). */
static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *t,
/* The read has succeeded, so we know that mapping exists */
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *) &l3e, PAGE_ORDER_1G, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
p2mt = p2m_invalid;
gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
* exits at this point. */
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- p2m_entry, 9, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn,
+ PAGE_ORDER_2M, q) )
goto pod_retry_l2;
/* Allocate failed. */
* exits at this point. */
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)p2m_entry, 0, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn,
+ PAGE_ORDER_4K, q) )
goto pod_retry_l1;
/* Allocate failed. */
{
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *) l3e, PAGE_ORDER_1G, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
}
if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand )
{
if ( q != p2m_query ) {
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)l2e, PAGE_ORDER_2M, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) )
goto pod_retry_l2;
} else
*t = p2m_populate_on_demand;
if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand )
{
if ( q != p2m_query ) {
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)l1e, PAGE_ORDER_4K, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) )
goto pod_retry_l1;
} else
*t = p2m_populate_on_demand;
/* For now only perform locking on hap domains */
if ( locked && (hap_enabled(p2m->domain)) )
/* Grab the lock here, don't release until put_gfn */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
/* Nothing to do in this case */
return;
- ASSERT(p2m_locked_by_me(p2m));
+ ASSERT(gfn_locked_by_me(p2m, gfn));
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
}
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int order;
int rc = 1;
- ASSERT(p2m_locked_by_me(p2m));
+ ASSERT(gfn_locked_by_me(p2m, gfn));
while ( todo )
{
return;
}
+ ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
if ( mfn_valid(_mfn(mfn)) )
unsigned long mfn, unsigned int page_order)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, page_order);
p2m_remove_page(p2m, gfn, mfn, page_order);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, page_order);
}
int
BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL);
if ( pt == ot )
set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return pt;
}
if ( !paging_mode_translate(d) )
return 0;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
if ( p2m_is_grant(ot) )
{
P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
"set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
if ( !paging_mode_translate(d) )
return 0;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL);
/* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access);
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return rc;
}
if ( !paging_mode_translate(p2m->domain) )
return 0;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
/* At the moment we only allow p2m change if gfn has already been made
* sharable first */
P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared, p2m->default_access);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
"set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
mfn_t mfn;
int ret = -EBUSY;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
ret = 0;
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return ret;
}
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret = -EBUSY;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
/* Get mfn */
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
put_page(page);
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return ret;
}
req.type = MEM_EVENT_TYPE_PAGING;
/* Fix p2m mapping */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
/* Allow only nominated or evicted pages to enter page-in path */
if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
/* Pause domain if request came from guest and gfn has paging type */
if ( p2m_is_paging(p2mt) && v->domain == d )
(!access_ok(user_ptr, PAGE_SIZE)) )
return -EINVAL;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
ret = 0;
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return ret;
}
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
{
- p2m_lock(p2m);
+ gfn_lock(p2m, rsp.gfn, 0);
mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
/* Allow only pages which were prepared properly, or pages which
* were nominated but not evicted */
p2m_ram_rw, a);
set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, rsp.gfn, 0);
}
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
p2m_access_t p2ma;
/* First, handle rx2rw conversion automatically */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL);
if ( access_w && p2ma == p2m_access_rx2rw )
{
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return 1;
}
else if ( p2ma == p2m_access_n2rwx )
ASSERT(access_w || access_r || access_x);
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the message along */
if ( mem_event_claim_slot(d, &d->mem_event->access) == -ENOSYS )
if ( p2ma != p2m_access_n2rwx )
{
/* A listener is not required, so clear the access restrictions */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
}
return 1;
}
return 0;
}
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query, NULL);
+ gfn_unlock(p2m, gfn, 0);
+
if ( mfn_x(mfn) == INVALID_MFN )
return -ESRCH;