/* Unpin this top-level shadow */
trace_shadow_prealloc_unpin(d, smfn);
- sh_unpin(v, smfn);
+ sh_unpin(d, smfn);
/* See if that freed up enough space */
if ( d->arch.paging.shadow.free_pages >= pages ) return;
foreach_pinned_shadow(d, sp, t)
{
smfn = page_to_mfn(sp);
- sh_unpin(v, smfn);
+ sh_unpin(d, smfn);
}
/* Second pass: unhook entries of in-use shadows */
break; \
} \
if ( sh_type_is_pinnable(d, t) ) \
- sh_unpin(v, smfn); \
+ sh_unpin(d, smfn); \
else if ( sh_type_has_up_pointer(d, t) ) \
sh_remove_shadow_via_pointer(v, smfn); \
if( !fast \
{
/* About to install a new reference */
mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
- ok = sh_get_ref(v, sl3mfn, paddr);
+ ok = sh_get_ref(d, sl3mfn, paddr);
/* Are we pinning l3 shadows to handle wierd linux behaviour? */
if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
- ok |= sh_pin(v, sl3mfn);
+ ok |= sh_pin(d, sl3mfn);
if ( !ok )
{
domain_crash(d);
if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
{
/* About to install a new reference */
- if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) )
+ if ( !sh_get_ref(d, shadow_l3e_get_mfn(new_sl3e), paddr) )
{
domain_crash(d);
return SHADOW_SET_ERROR;
ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
/* About to install a new reference */
- if ( !sh_get_ref(v, sl1mfn, paddr) )
+ if ( !sh_get_ref(d, sl1mfn, paddr) )
{
domain_crash(d);
return SHADOW_SET_ERROR;
page_list_for_each_safe(sp, t, &d->arch.paging.shadow.pinned_shadows)
{
if ( sp->u.sh.type == SH_type_l3_64_shadow )
- sh_unpin(v, page_to_mfn(sp));
+ sh_unpin(d, page_to_mfn(sp));
}
d->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
sh_reset_l3_up_pointers(v);
ASSERT(mfn_valid(smfn));
/* Pin the shadow and put it (back) on the list of pinned shadows */
- if ( sh_pin(v, smfn) == 0 )
+ if ( sh_pin(d, smfn) == 0 )
{
SHADOW_ERROR("can't pin %#lx as toplevel shadow\n", mfn_x(smfn));
domain_crash(d);
/* Take a ref to this page: it will be released in sh_detach_old_tables()
* or the next call to set_toplevel_shadow() */
- if ( !sh_get_ref(v, smfn, 0) )
+ if ( !sh_get_ref(d, smfn, 0) )
{
SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
domain_crash(d);
/* Need to repin the old toplevel shadow if it's been unpinned
* by shadow_prealloc(): in PV mode we're still running on this
* shadow and it's not safe to free it yet. */
- if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
+ if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(d, old_smfn) )
{
SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
domain_crash(d);
* and the physical address of the shadow entry that holds the ref (or zero
* if the ref is held by something else).
* Returns 0 for failure, 1 for success. */
-static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
+static inline int sh_get_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa)
{
- struct domain *d = v->domain;
u32 x, nx;
struct page_info *sp = mfn_to_page(smfn);
/* Pin a shadow page: take an extra refcount, set the pin bit,
* and put the shadow at the head of the list of pinned shadows.
* Returns 0 for failure, 1 for success. */
-static inline int sh_pin(struct vcpu *v, mfn_t smfn)
+static inline int sh_pin(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
struct page_info *sp[4];
struct page_list_head *pin_list;
unsigned int i, pages;
else
{
/* Not pinned: pin it! */
- if ( !sh_get_ref(v, smfn, 0) )
+ if ( !sh_get_ref(d, smfn, 0) )
return 0;
sp[0]->u.sh.pinned = 1;
}
/* Unpin a shadow page: unset the pin bit, take the shadow off the list
* of pinned shadows, and release the extra ref. */
-static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
+static inline void sh_unpin(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
struct page_list_head tmp_list, *pin_list;
struct page_info *sp, *next;
unsigned int i, head_type;