* which will decrement refcounts appropriately and return memory to the
* free pool. */
-void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_shadow(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
struct page_info *sp = mfn_to_page(smfn);
unsigned int t = sp->u.sh.type;
{
case SH_type_l1_32_shadow:
case SH_type_fl1_32_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(d, smfn);
break;
case SH_type_l2_32_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2)(d, smfn);
break;
case SH_type_l1_pae_shadow:
case SH_type_fl1_pae_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3)(d, smfn);
break;
case SH_type_l2_pae_shadow:
case SH_type_l2h_pae_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(d, smfn);
break;
case SH_type_l1_64_shadow:
case SH_type_fl1_64_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(d, smfn);
break;
case SH_type_l2h_64_shadow:
ASSERT(is_pv_32on64_domain(d));
/* Fall through... */
case SH_type_l2_64_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(d, smfn);
break;
case SH_type_l3_64_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4)(d, smfn);
break;
case SH_type_l4_64_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(v, smfn);
+ SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(d, smfn);
break;
default:
{
flags |= SHADOW_SET_FLUSH;
}
- sh_put_ref(v, osl3mfn, paddr);
+ sh_put_ref(d, osl3mfn, paddr);
}
return flags;
}
{
flags |= SHADOW_SET_FLUSH;
}
- sh_put_ref(v, osl2mfn, paddr);
+ sh_put_ref(d, osl2mfn, paddr);
}
return flags;
}
{
flags |= SHADOW_SET_FLUSH;
}
- sh_put_ref(v, osl1mfn, paddr);
+ sh_put_ref(d, osl1mfn, paddr);
}
return flags;
}
*/
#if GUEST_PAGING_LEVELS >= 4
-void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l4_shadow(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
shadow_l4e_t *sl4e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
{
- sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
+ sh_put_ref(d, shadow_l4e_get_mfn(*sl4e),
(((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
| ((unsigned long)sl4e & ~PAGE_MASK));
}
shadow_free(d, smfn);
}
-void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l3_shadow(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
shadow_l3e_t *sl3e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
sl3mfn = smfn;
SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, {
if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
- sh_put_ref(v, shadow_l3e_get_mfn(*sl3e),
+ sh_put_ref(d, shadow_l3e_get_mfn(*sl3e),
(((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
| ((unsigned long)sl3e & ~PAGE_MASK));
});
#endif /* GUEST_PAGING_LEVELS >= 4 */
-void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l2_shadow(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
shadow_l2e_t *sl2e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
sl2mfn = smfn;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
- sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
+ sh_put_ref(d, shadow_l2e_get_mfn(*sl2e),
(((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
| ((unsigned long)sl2e & ~PAGE_MASK));
});
shadow_free(d, smfn);
}
-void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l1_shadow(struct domain *d, mfn_t smfn)
{
- struct domain *d = v->domain;
shadow_l1e_t *sl1e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
static void
sh_detach_old_tables(struct vcpu *v)
{
+ struct domain *d = v->domain;
mfn_t smfn;
int i = 0;
#else
if ( v->arch.paging.shadow.guest_vtable )
{
- struct domain *d = v->domain;
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
v->arch.paging.shadow.guest_vtable = NULL;
{
smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
if ( mfn_x(smfn) )
- sh_put_ref(v, smfn, 0);
+ sh_put_ref(d, smfn, 0);
v->arch.shadow_table[i] = pagetable_null();
}
}
SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
domain_crash(d);
}
- sh_put_ref(v, old_smfn, 0);
+ sh_put_ref(d, old_smfn, 0);
}
}
extern void
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, GUEST_LEVELS)(
- struct vcpu *v, mfn_t smfn);
+ struct domain *d, mfn_t smfn);
extern void
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, GUEST_LEVELS)(
- struct vcpu *v, mfn_t smfn);
+ struct domain *d, mfn_t smfn);
extern void
SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, GUEST_LEVELS)(
- struct vcpu *v, mfn_t smfn);
+ struct domain *d, mfn_t smfn);
extern void
SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, GUEST_LEVELS)(
- struct vcpu *v, mfn_t smfn);
+ struct domain *d, mfn_t smfn);
extern void
SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
/**************************************************************************/
/* Shadow-page refcounting. */
-void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
+void sh_destroy_shadow(struct domain *d, mfn_t smfn);
/* Increase the refcount of a shadow page. Arguments are the mfn to refcount,
* and the physical address of the shadow entry that holds the ref (or zero
/* Decrease the refcount of a shadow page. As for get_ref, takes the
* physical address of the shadow entry that held this reference. */
-static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
+static inline void sh_put_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa)
{
- struct domain *d = v->domain;
u32 x, nx;
struct page_info *sp = mfn_to_page(smfn);
sp->u.sh.count = nx;
if ( unlikely(nx == 0) )
- sh_destroy_shadow(v, smfn);
+ sh_destroy_shadow(d, smfn);
}
}
sh_terminate_list(&tmp_list);
- sh_put_ref(v, smfn, 0);
+ sh_put_ref(d, smfn, 0);
}