*/
static inline mfn_t
-get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
+get_fl1_shadow_status(struct domain *d, gfn_t gfn)
/* Look for FL1 shadows in the hash table */
{
- struct domain *d = v->domain;
mfn_t smfn = shadow_hash_lookup(d, gfn_x(gfn), SH_type_fl1_shadow);
ASSERT(!mfn_valid(smfn) || mfn_to_page(smfn)->u.sh.head);
return smfn;
}
static inline mfn_t
-get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
+get_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type)
/* Look for shadows in the hash table */
{
- struct domain *d = v->domain;
mfn_t smfn = shadow_hash_lookup(d, mfn_x(gmfn), shadow_type);
ASSERT(!mfn_valid(smfn) || mfn_to_page(smfn)->u.sh.head);
perfc_incr(shadow_get_shadow_status);
}
static inline void
-set_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
+set_fl1_shadow_status(struct domain *d, gfn_t gfn, mfn_t smfn)
/* Put an FL1 shadow into the hash table */
{
- struct domain *d = v->domain;
SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
}
static inline void
-set_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
+set_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
/* Put a shadow into the hash table */
{
- struct domain *d = v->domain;
int res;
- SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n",
- d->domain_id, v->vcpu_id, mfn_x(gmfn),
- shadow_type, mfn_x(smfn));
+ SHADOW_PRINTK("d=%d: gmfn=%lx, type=%08x, smfn=%lx\n",
+ d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
}
static inline void
-delete_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
+delete_fl1_shadow_status(struct domain *d, gfn_t gfn, mfn_t smfn)
/* Remove a shadow from the hash table */
{
- struct domain *d = v->domain;
SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
}
static inline void
-delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
+delete_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
/* Remove a shadow from the hash table */
{
- struct domain *d = v->domain;
- SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n",
- d->domain_id, v->vcpu_id,
- mfn_x(gmfn), shadow_type, mfn_x(smfn));
+ SHADOW_PRINTK("d=%d: gmfn=%lx, type=%08x, smfn=%lx\n",
+ d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn);
/* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
* through the audit mechanisms */
static void sh_audit_gw(struct vcpu *v, walk_t *gw)
{
+ struct domain *d = v->domain;
mfn_t smfn;
if ( !(SHADOW_AUDIT_ENABLE) )
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
if ( mfn_valid(gw->l4mfn)
- && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn,
+ && mfn_valid((smfn = get_shadow_status(d, gw->l4mfn,
SH_type_l4_shadow))) )
(void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
if ( mfn_valid(gw->l3mfn)
- && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn,
+ && mfn_valid((smfn = get_shadow_status(d, gw->l3mfn,
SH_type_l3_shadow))) )
(void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
#endif /* PAE or 64... */
if ( mfn_valid(gw->l2mfn) )
{
- if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
+ if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
SH_type_l2_shadow))) )
(void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
#if GUEST_PAGING_LEVELS == 3
- if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
+ if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
SH_type_l2h_shadow))) )
(void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
#endif
}
if ( mfn_valid(gw->l1mfn)
- && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn,
+ && mfn_valid((smfn = get_shadow_status(d, gw->l1mfn,
SH_type_l1_shadow))) )
(void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
&& (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
&& mfn_valid(
- (smfn = get_fl1_shadow_status(v, guest_l2e_get_gfn(gw->l2e)))) )
+ (smfn = get_fl1_shadow_status(d, guest_l2e_get_gfn(gw->l2e)))) )
(void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
}
}
shadow_promote(v, gmfn, shadow_type);
- set_shadow_status(v, gmfn, shadow_type, smfn);
+ set_shadow_status(d, gmfn, shadow_type, smfn);
return smfn;
}
/* Make a splintered superpage shadow */
static mfn_t
-make_fl1_shadow(struct vcpu *v, gfn_t gfn)
+make_fl1_shadow(struct domain *d, gfn_t gfn)
{
- struct domain *d = v->domain;
mfn_t smfn = shadow_alloc(d, SH_type_fl1_shadow, gfn_x(gfn));
SHADOW_DEBUG(MAKE_SHADOW, "(%" SH_PRI_gfn ")=>%" PRI_mfn "\n",
gfn_x(gfn), mfn_x(smfn));
- set_fl1_shadow_status(v, gfn, smfn);
+ set_fl1_shadow_status(d, gfn, smfn);
return smfn;
}
fetch_type_t ft,
int *resync)
{
+ struct domain *d = v->domain;
mfn_t sl4mfn;
shadow_l4e_t *sl4e;
if ( !mfn_valid(gw->l3mfn) ) return NULL; /* No guest page. */
int r;
shadow_l4e_t new_sl4e;
/* No l3 shadow installed: find and install it. */
- *sl3mfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow);
+ *sl3mfn = get_shadow_status(d, gw->l3mfn, SH_type_l3_shadow);
if ( !mfn_valid(*sl3mfn) )
{
/* No l3 shadow of this page exists at all: make one. */
int *resync)
{
#if GUEST_PAGING_LEVELS >= 4 /* 64bit... */
+ struct domain *d = v->domain;
mfn_t sl3mfn = _mfn(INVALID_MFN);
shadow_l3e_t *sl3e;
if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
t = SH_type_l2h_shadow;
/* No l2 shadow installed: find and install it. */
- *sl2mfn = get_shadow_status(v, gw->l2mfn, t);
+ *sl2mfn = get_shadow_status(d, gw->l2mfn, t);
if ( !mfn_valid(*sl2mfn) )
{
/* No l2 shadow of this page exists at all: make one. */
mfn_t *sl1mfn,
fetch_type_t ft)
{
+ struct domain *d = v->domain;
mfn_t sl2mfn;
int resync = 0;
shadow_l2e_t *sl2e;
{
/* Splintering a superpage */
gfn_t l2gfn = guest_l2e_get_gfn(gw->l2e);
- *sl1mfn = get_fl1_shadow_status(v, l2gfn);
+ *sl1mfn = get_fl1_shadow_status(d, l2gfn);
if ( !mfn_valid(*sl1mfn) )
{
/* No fl1 shadow of this superpage exists at all: make one. */
- *sl1mfn = make_fl1_shadow(v, l2gfn);
+ *sl1mfn = make_fl1_shadow(d, l2gfn);
}
}
else
{
/* Shadowing an actual guest l1 table */
if ( !mfn_valid(gw->l1mfn) ) return NULL; /* No guest page. */
- *sl1mfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow);
+ *sl1mfn = get_shadow_status(d, gw->l1mfn, SH_type_l1_shadow);
if ( !mfn_valid(*sl1mfn) )
{
/* No l1 shadow of this page exists at all: make one. */
/* Record that the guest page isn't shadowed any more (in this type) */
gmfn = backpointer(sp);
- delete_shadow_status(v, gmfn, t, smfn);
+ delete_shadow_status(d, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
sl4mfn = smfn;
/* Record that the guest page isn't shadowed any more (in this type) */
gmfn = backpointer(sp);
- delete_shadow_status(v, gmfn, t, smfn);
+ delete_shadow_status(d, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
/* Record that the guest page isn't shadowed any more (in this type) */
gmfn = backpointer(sp);
- delete_shadow_status(v, gmfn, t, smfn);
+ delete_shadow_status(d, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
if ( t == SH_type_fl1_shadow )
{
gfn_t gfn = _gfn(sp->v.sh.back);
- delete_fl1_shadow_status(v, gfn, smfn);
+ delete_fl1_shadow_status(d, gfn, smfn);
}
else
{
mfn_t gmfn = backpointer(sp);
- delete_shadow_status(v, gmfn, t, smfn);
+ delete_shadow_status(d, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
}
gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
mfn_t gl3mfn = get_gfn_query_unlocked(d, gfn_x(gl3gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
- sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
+ sl3mfn = get_shadow_status(d, gl3mfn, SH_type_l3_shadow);
else if ( p2mt != p2m_populate_on_demand )
result |= SHADOW_SET_ERROR;
gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
mfn_t gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
- sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
+ sl2mfn = get_shadow_status(d, gl2mfn, SH_type_l2_shadow);
else if ( p2mt != p2m_populate_on_demand )
result |= SHADOW_SET_ERROR;
{
// superpage -- need to look up the shadow L1 which holds the
// splitters...
- sl1mfn = get_fl1_shadow_status(v, gl1gfn);
+ sl1mfn = get_fl1_shadow_status(d, gl1gfn);
#if 0
// XXX - it's possible that we want to do some kind of prefetch
// for superpage fl1's here, but this is *not* on the demand path,
// so we'll hold off trying that for now...
//
if ( !mfn_valid(sl1mfn) )
- sl1mfn = make_fl1_shadow(v, gl1gfn);
+ sl1mfn = make_fl1_shadow(d, gl1gfn);
#endif
}
else
{
mfn_t gl1mfn = get_gfn_query_unlocked(d, gfn_x(gl1gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
- sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
+ sl1mfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
else if ( p2mt != p2m_populate_on_demand )
result |= SHADOW_SET_ERROR;
}
ASSERT(mfn_valid(snpmfn));
- sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
+ sl1mfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
ASSERT(mfn_valid(sl1mfn)); /* Otherwise we would not have been called */
snp = sh_map_domain_page(snpmfn);
* called in the *mode* of the vcpu that unsynced it. Clear? Good. */
int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
{
+ struct domain *d = v->domain;
struct page_info *sp;
mfn_t smfn;
if ( !sh_type_has_up_pointer(v, SH_type_l1_shadow) )
return 0;
- smfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
+ smfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
ASSERT(mfn_valid(smfn)); /* Otherwise we would not have been called */
/* Up to l2 */
mfn_t smfn, void *se))
/* Generic function for mapping and validating. */
{
+ struct domain *d = v->domain;
mfn_t smfn, smfn2, map_mfn;
shadow_l1e_t *sl1p;
u32 shadow_idx, guest_idx;
ASSERT(size + (((unsigned long)new_gp) & ~PAGE_MASK) <= PAGE_SIZE);
/* Map the shadow page */
- smfn = get_shadow_status(v, gmfn, sh_type);
+ smfn = get_shadow_status(d, gmfn, sh_type);
ASSERT(mfn_valid(smfn)); /* Otherwise we would not have been called */
guest_idx = guest_index(new_gp);
map_mfn = smfn;
}
/* Guest mfn is valid: shadow it and install the shadow */
- smfn = get_shadow_status(v, gmfn, root_type);
+ smfn = get_shadow_status(d, gmfn, root_type);
if ( !mfn_valid(smfn) )
{
/* Make sure there's enough free shadow memory. */
gfn = guest_l2e_get_gfn(*gl2e);
mfn = shadow_l2e_get_mfn(*sl2e);
gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
- ? get_fl1_shadow_status(v, gfn)
- : get_shadow_status(v,
+ ? get_fl1_shadow_status(d, gfn)
+ : get_shadow_status(d,
get_gfn_query_unlocked(d, gfn_x(gfn),
&p2mt), SH_type_l1_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
{
gfn = guest_l3e_get_gfn(*gl3e);
mfn = shadow_l3e_get_mfn(*sl3e);
- gmfn = get_shadow_status(v, get_gfn_query_unlocked(
+ gmfn = get_shadow_status(d, get_gfn_query_unlocked(
d, gfn_x(gfn), &p2mt),
((GUEST_PAGING_LEVELS == 3 ||
is_pv_32on64_vcpu(v))
{
gfn = guest_l4e_get_gfn(*gl4e);
mfn = shadow_l4e_get_mfn(*sl4e);
- gmfn = get_shadow_status(v, get_gfn_query_unlocked(
+ gmfn = get_shadow_status(d, get_gfn_query_unlocked(
d, gfn_x(gfn), &p2mt),
SH_type_l3_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )