static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn,
struct oos_fixup *fixup)
{
+ struct domain *d = v->domain;
int ftlb = 0;
ftlb |= oos_fixup_flush_gmfn(v, gmfn, fixup);
}
if ( ftlb )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
return 0;
}
*/
void shadow_promote(struct vcpu *v, mfn_t gmfn, unsigned int type)
{
+ struct domain *d = v->domain;
struct page_info *page = mfn_to_page(gmfn);
ASSERT(mfn_valid(gmfn));
/* We should never try to promote a gmfn that has writeable mappings */
ASSERT((page->u.inuse.type_info & PGT_type_mask) != PGT_writable_page
|| (page->u.inuse.type_info & PGT_count_mask) == 0
- || v->domain->is_shutting_down);
+ || d->is_shutting_down);
/* Is the page already shadowed? */
if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
{
+ struct domain *d = v->domain;
struct page_info *sp = mfn_to_page(smfn);
unsigned int t = sp->u.sh.type;
t == SH_type_fl1_pae_shadow ||
t == SH_type_fl1_64_shadow ||
t == SH_type_monitor_table ||
- (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
- (page_get_owner(mfn_to_page(backpointer(sp)))
- == v->domain));
+ (is_pv_32on64_domain(d) && t == SH_type_l4_64_shadow) ||
+ (page_get_owner(mfn_to_page(backpointer(sp))) == d));
/* The down-shifts here are so that the switch statement is on nice
* small numbers that the compiler will enjoy */
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
break;
case SH_type_l2h_64_shadow:
- ASSERT(is_pv_32on64_vcpu(v));
+ ASSERT(is_pv_32on64_domain(d));
/* Fall through... */
case SH_type_l2_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(v, smfn);
| SHF_L1_64
| SHF_FL1_64
;
+ struct domain *d = v->domain;
struct page_info *pg = mfn_to_page(gmfn);
- ASSERT(paging_locked_by_me(v->domain));
+ ASSERT(paging_locked_by_me(d));
/* Only remove writable mappings if we are doing shadow refcounts.
* In guest refcounting, we trust Xen to already be restricting
* all the writes to the guest page tables, so we do not need to
* do more. */
- if ( !shadow_mode_refcounts(v->domain) )
+ if ( !shadow_mode_refcounts(d) )
return 0;
/* Early exit if it's already a pagetable, or otherwise not writeable */
SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %"
PRtype_info "\n",
mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info);
- domain_crash(v->domain);
+ domain_crash(d);
}
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
GUESS(0xC0000000UL + (fault_addr >> 10), 1);
/* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
- if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
+ if ((gfn = mfn_to_gfn(d, gmfn)) < 0x38000 )
GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
/* FreeBSD: Linear map at 0xBFC00000 */
}
/* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
- if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
+ if ((gfn = mfn_to_gfn(d, gmfn)) < 0x38000 )
GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
/* FreeBSD PAE: Linear map at 0xBF800000 */
/* 64bit Linux direct map at 0xffff880000000000; older kernels
* had it at 0xffff810000000000, and older kernels yet had it
* at 0x0000010000000000UL */
- gfn = mfn_to_gfn(v->domain, gmfn);
+ gfn = mfn_to_gfn(d, gmfn);
GUESS(0xffff880000000000UL + (gfn << PAGE_SHIFT), 4);
GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4);
GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4);
SHADOW_ERROR("can't remove write access to mfn %lx: guest has "
"%lu special-use mappings of it\n", mfn_x(gmfn),
(mfn_to_page(gmfn)->u.inuse.type_info&PGT_count_mask));
- domain_crash(v->domain);
+ domain_crash(d);
}
/* We killed at least one writeable mapping, so must flush TLBs. */
static int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
{
+ struct domain *d = v->domain;
struct page_info *page = mfn_to_page(gmfn);
/* Dispatch table for getting per-type functions */
/* Although this is an externally visible function, we do not know
* whether the paging lock will be held when it is called (since it
* can be called via put_page_type when we clear a shadow l1e).*/
- paging_lock_recursive(v->domain);
+ paging_lock_recursive(d);
/* XXX TODO:
* Heuristics for finding the (probably) single mapping of this gmfn */
* and the HVM restore program takes another.
* Also allow one typed refcount for xenheap pages, to match
* share_xen_page_with_guest(). */
- if ( !(shadow_mode_external(v->domain)
+ if ( !(shadow_mode_external(d)
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
== !!is_xen_heap_page(page))) )
}
}
- paging_unlock(v->domain);
+ paging_unlock(d);
/* We killed at least one mapping, so must flush TLBs. */
return 1;
* (all != 0 implies fast == 0)
*/
{
+ struct domain *d = v->domain;
struct page_info *pg = mfn_to_page(gmfn);
mfn_t smfn;
unsigned char t;
/* Although this is an externally visible function, we do not know
* whether the paging lock will be held when it is called (since it
* can be called via put_page_type when we clear a shadow l1e).*/
- paging_lock_recursive(v->domain);
+ paging_lock_recursive(d);
SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
- v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
+ d->domain_id, v->vcpu_id, mfn_x(gmfn));
/* Bail out now if the page is not shadowed */
if ( (pg->count_info & PGC_page_table) == 0 )
{
- paging_unlock(v->domain);
+ paging_unlock(d);
return;
}
SHADOW_ERROR("can't find all shadows of mfn %05lx "
"(shadow_flags=%08x)\n",
mfn_x(gmfn), pg->shadow_flags);
- domain_crash(v->domain);
+ domain_crash(d);
}
/* Need to flush TLBs now, so that linear maps are safe next time we
* take a fault. */
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
- paging_unlock(v->domain);
+ paging_unlock(d);
}
static void
ASSERT(mfn_to_page(smfn)->u.sh.head);
/* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
- if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
+ if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
{
res = get_page(mfn_to_page(gmfn), d);
ASSERT(res == 1);
delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
/* Remove a shadow from the hash table */
{
+ struct domain *d = v->domain;
SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n",
- v->domain->domain_id, v->vcpu_id,
+ d->domain_id, v->vcpu_id,
mfn_x(gmfn), shadow_type, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
/* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
- if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
+ if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
put_page(mfn_to_page(gmfn));
}
shadow_l4e_t new_sl4e,
mfn_t sl4mfn)
{
+ struct domain *d = v->domain;
int flags = 0, ok;
shadow_l4e_t old_sl4e;
paddr_t paddr;
ok |= sh_pin(v, sl3mfn);
if ( !ok )
{
- domain_crash(v->domain);
+ domain_crash(d);
return SHADOW_SET_ERROR;
}
}
shadow_l3e_t new_sl3e,
mfn_t sl3mfn)
{
+ struct domain *d = v->domain;
int flags = 0;
shadow_l3e_t old_sl3e;
paddr_t paddr;
/* About to install a new reference */
if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) )
{
- domain_crash(v->domain);
+ domain_crash(d);
return SHADOW_SET_ERROR;
}
}
shadow_l2e_t new_sl2e,
mfn_t sl2mfn)
{
+ struct domain *d = v->domain;
int flags = 0;
shadow_l2e_t old_sl2e;
paddr_t paddr;
/* About to install a new reference */
if ( !sh_get_ref(v, sl1mfn, paddr) )
{
- domain_crash(v->domain);
+ domain_crash(d);
return SHADOW_SET_ERROR;
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
static mfn_t
sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
{
- mfn_t smfn = shadow_alloc(v->domain, shadow_type, mfn_x(gmfn));
+ struct domain *d = v->domain;
+ mfn_t smfn = shadow_alloc(d, shadow_type, mfn_x(gmfn));
SHADOW_DEBUG(MAKE_SHADOW, "(%05lx, %u)=>%05lx\n",
mfn_x(gmfn), shadow_type, mfn_x(smfn));
#if GUEST_PAGING_LEVELS == 4
#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
if ( shadow_type == SH_type_l4_64_shadow &&
- unlikely(v->domain->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL) )
+ unlikely(d->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL) )
{
/* We're shadowing a new l4, but we've been assuming the guest uses
* only one l4 per vcpu and context switches using an l4 entry.
struct page_info *sp, *t;
struct vcpu *v2;
int l4count = 0, vcpus = 0;
- page_list_for_each(sp, &v->domain->arch.paging.shadow.pinned_shadows)
+ page_list_for_each(sp, &d->arch.paging.shadow.pinned_shadows)
{
if ( sp->u.sh.type == SH_type_l4_64_shadow )
l4count++;
}
- for_each_vcpu ( v->domain, v2 )
+ for_each_vcpu ( d, v2 )
vcpus++;
if ( l4count > 2 * vcpus )
{
/* Unpin all the pinned l3 tables, and don't pin any more. */
- page_list_for_each_safe(sp, t, &v->domain->arch.paging.shadow.pinned_shadows)
+ page_list_for_each_safe(sp, t, &d->arch.paging.shadow.pinned_shadows)
{
if ( sp->u.sh.type == SH_type_l3_64_shadow )
sh_unpin(v, page_to_mfn(sp));
}
- v->domain->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
+ d->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
sh_reset_l3_up_pointers(v);
}
}
#endif
// Create the Xen mappings...
- if ( !shadow_mode_external(v->domain) )
+ if ( !shadow_mode_external(d) )
{
switch (shadow_type)
{
static mfn_t
make_fl1_shadow(struct vcpu *v, gfn_t gfn)
{
- mfn_t smfn = shadow_alloc(v->domain, SH_type_fl1_shadow,
- (unsigned long) gfn_x(gfn));
+ struct domain *d = v->domain;
+ mfn_t smfn = shadow_alloc(d, SH_type_fl1_shadow, gfn_x(gfn));
SHADOW_DEBUG(MAKE_SHADOW, "(%" SH_PRI_gfn ")=>%" PRI_mfn "\n",
gfn_x(gfn), mfn_x(smfn));
#if GUEST_PAGING_LEVELS >= 4
void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
{
+ struct domain *d = v->domain;
shadow_l4e_t *sl4e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
sl4mfn = smfn;
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
{
sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
});
/* Put the memory back in the pool */
- shadow_free(v->domain, smfn);
+ shadow_free(d, smfn);
}
void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
{
+ struct domain *d = v->domain;
shadow_l3e_t *sl3e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
});
/* Put the memory back in the pool */
- shadow_free(v->domain, smfn);
+ shadow_free(d, smfn);
}
#endif /* GUEST_PAGING_LEVELS >= 4 */
void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
{
+ struct domain *d = v->domain;
shadow_l2e_t *sl2e;
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
/* Decrement refcounts of all the old entries */
sl2mfn = smfn;
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
(((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
});
/* Put the memory back in the pool */
- shadow_free(v->domain, smfn);
+ shadow_free(d, smfn);
}
void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
}
/* Put the memory back in the pool */
- shadow_free(v->domain, smfn);
+ shadow_free(d, smfn);
}
#if SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS
void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
{
+ struct domain *d = v->domain;
shadow_l2e_t *sl2e;
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
(void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
});
void sh_unhook_pae_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
/* Walk a PAE l2 shadow, unhooking entries from all the subshadows */
{
+ struct domain *d = v->domain;
shadow_l2e_t *sl2e;
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
(void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
});
void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn, int user_only)
{
+ struct domain *d = v->domain;
shadow_l4e_t *sl4e;
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
if ( !user_only || (sl4e->l4 & _PAGE_USER) )
(void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
});
static int validate_gl3e(struct vcpu *v, void *new_ge, mfn_t sl3mfn, void *se)
{
+ struct domain *d = v->domain;
shadow_l3e_t new_sl3e;
guest_l3e_t new_gl3e = *(guest_l3e_t *)new_ge;
shadow_l3e_t *sl3p = se;
if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
{
gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
- mfn_t gl2mfn = get_gfn_query_unlocked(v->domain, gfn_x(gl2gfn), &p2mt);
+ mfn_t gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
else if ( p2mt != p2m_populate_on_demand )
static int validate_gl2e(struct vcpu *v, void *new_ge, mfn_t sl2mfn, void *se)
{
+ struct domain *d = v->domain;
shadow_l2e_t new_sl2e;
guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
shadow_l2e_t *sl2p = se;
}
else
{
- mfn_t gl1mfn = get_gfn_query_unlocked(v->domain, gfn_x(gl1gfn),
- &p2mt);
+ mfn_t gl1mfn = get_gfn_query_unlocked(d, gfn_x(gl1gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
else if ( p2mt != p2m_populate_on_demand )
static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
{
+ struct domain *d = v->domain;
shadow_l1e_t new_sl1e;
guest_l1e_t new_gl1e = *(guest_l1e_t *)new_ge;
shadow_l1e_t *sl1p = se;
perfc_incr(shadow_validate_gl1e_calls);
gfn = guest_l1e_get_gfn(new_gl1e);
- gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
+ gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
* *not* the one that is causing it to be resynced. */
void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn)
{
+ struct domain *d = v->domain;
mfn_t sl1mfn;
shadow_l1e_t *sl1p;
guest_l1e_t *gl1p, *gp, *snp;
shadow_l1e_t nsl1e;
gfn = guest_l1e_get_gfn(gl1e);
- gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
+ gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
rc |= shadow_set_l1e(v, sl1p, nsl1e, p2mt, sl1mfn);
*snpl1p = gl1e;
static inline void check_for_early_unshadow(struct vcpu *v, mfn_t gmfn)
{
#if SHADOW_OPTIMIZATIONS & SHOPT_EARLY_UNSHADOW
+ struct domain *d = v->domain;
/* If the domain has never made a "dying" op, use the two-writes
* heuristic; otherwise, unshadow as soon as we write a zero for a dying
* process.
* Don't bother trying to unshadow if it's not a PT, or if it's > l1.
*/
if ( ( v->arch.paging.shadow.pagetable_dying
- || ( !v->domain->arch.paging.shadow.pagetable_dying_op
+ || ( !d->arch.paging.shadow.pagetable_dying_op
&& v->arch.paging.shadow.last_emulated_mfn_for_unshadow == mfn_x(gmfn) ) )
&& sh_mfn_is_a_page_table(gmfn)
- && (!v->domain->arch.paging.shadow.pagetable_dying_op ||
+ && (!d->arch.paging.shadow.pagetable_dying_op ||
!(mfn_to_page(gmfn)->shadow_flags
& (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64))) )
{
static void sh_prefetch(struct vcpu *v, walk_t *gw,
shadow_l1e_t *ptr_sl1e, mfn_t sl1mfn)
{
+ struct domain *d = v->domain;
int i, dist;
gfn_t gfn;
mfn_t gmfn;
/* Look at the gfn that the l1e is pointing at */
gfn = guest_l1e_get_gfn(gl1e);
- gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
+ gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
/* Propagate the entry. */
l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Check to see if the SL1 is out of sync. */
{
+ struct domain *d = v->domain;
mfn_t gl1mfn = backpointer(mfn_to_page(sl1mfn));
struct page_info *pg = mfn_to_page(gl1mfn);
if ( mfn_valid(gl1mfn)
{
/* The test above may give false positives, since we don't
* hold the paging lock yet. Check again with the lock held. */
- paging_lock(v->domain);
+ paging_lock(d);
/* This must still be a copy-from-user because we didn't
* have the paging lock last time we checked, and the
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- paging_unlock(v->domain);
+ paging_unlock(d);
return 0;
}
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
- paging_unlock(v->domain);
+ paging_unlock(d);
return 0;
}
(void) shadow_set_l1e(v, sl1, shadow_l1e_empty(),
p2m_invalid, sl1mfn);
}
- paging_unlock(v->domain);
+ paging_unlock(d);
/* Need the invlpg, to pick up the disappeareance of the sl1e */
return 1;
}
if ( sh_pin(v, smfn) == 0 )
{
SHADOW_ERROR("can't pin %#lx as toplevel shadow\n", mfn_x(smfn));
- domain_crash(v->domain);
+ domain_crash(d);
}
/* Take a ref to this page: it will be released in sh_detach_old_tables()
if ( !sh_get_ref(v, smfn, 0) )
{
SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
- domain_crash(v->domain);
+ domain_crash(d);
}
new_entry = pagetable_from_mfn(smfn);
if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
{
SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
- domain_crash(v->domain);
+ domain_crash(d);
}
sh_put_ref(v, old_smfn, 0);
}
int sh_remove_l1_shadow(struct vcpu *v, mfn_t sl2mfn, mfn_t sl1mfn)
/* Remove all mappings of this l1 shadow from this l2 shadow */
{
+ struct domain *d = v->domain;
shadow_l2e_t *sl2e;
int done = 0;
int flags;
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain,
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, d,
{
flags = shadow_l2e_get_flags(*sl2e);
if ( (flags & _PAGE_PRESENT)
int sh_remove_l3_shadow(struct vcpu *v, mfn_t sl4mfn, mfn_t sl3mfn)
/* Remove all mappings of this l3 shadow from this l4 shadow */
{
+ struct domain *d = v->domain;
shadow_l4e_t *sl4e;
int done = 0;
int flags;
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, v->domain,
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, d,
{
flags = shadow_l4e_get_flags(*sl4e);
if ( (flags & _PAGE_PRESENT)
#if GUEST_PAGING_LEVELS == 3
static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
{
+ struct domain *d = v->domain;
int i = 0;
int flush = 0;
int fast_path = 0;
fast_path = 1;
l3gfn = gpa >> PAGE_SHIFT;
- l3mfn = get_gfn_query(v->domain, _gfn(l3gfn), &p2mt);
+ l3mfn = get_gfn_query(d, _gfn(l3gfn), &p2mt);
if ( !mfn_valid(l3mfn) || !p2m_is_ram(p2mt) )
{
printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
goto out_put_gfn;
}
- paging_lock(v->domain);
+ paging_lock(d);
if ( !fast_path )
{
/* retrieving the l2s */
gl2a = guest_l3e_get_paddr(gl3e[i]);
gfn = gl2a >> PAGE_SHIFT;
- gmfn = get_gfn_query_unlocked(v->domain, gfn, &p2mt);
+ gmfn = get_gfn_query_unlocked(d, gfn, &p2mt);
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_pae_shadow);
}
}
}
if ( flush )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
/* Remember that we've seen the guest use this interface, so we
* can rely on it using it in future, instead of guessing at
* when processes are being torn down. */
- v->domain->arch.paging.shadow.pagetable_dying_op = 1;
+ d->arch.paging.shadow.pagetable_dying_op = 1;
v->arch.paging.shadow.pagetable_dying = 1;
if ( !fast_path )
unmap_domain_page(gl3pa);
- paging_unlock(v->domain);
+ paging_unlock(d);
out_put_gfn:
- put_gfn(v->domain, l3gfn);
+ put_gfn(d, l3gfn);
}
#else
static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
{
+ struct domain *d = v->domain;
mfn_t smfn, gmfn;
p2m_type_t p2mt;
- gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
- paging_lock(v->domain);
+ gmfn = get_gfn_query(d, _gfn(gpa >> PAGE_SHIFT), &p2mt);
+ paging_lock(d);
#if GUEST_PAGING_LEVELS == 2
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
shadow_unhook_mappings(v, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
}
/* Remember that we've seen the guest use this interface, so we
* can rely on it using it in future, instead of guessing at
* when processes are being torn down. */
- v->domain->arch.paging.shadow.pagetable_dying_op = 1;
+ d->arch.paging.shadow.pagetable_dying_op = 1;
v->arch.paging.shadow.pagetable_dying = 1;
- paging_unlock(v->domain);
- put_gfn(v->domain, gpa >> PAGE_SHIFT);
+ paging_unlock(d);
+ put_gfn(d, gpa >> PAGE_SHIFT);
}
#endif
int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x)
{
+ struct domain *d = v->domain;
guest_l2e_t *gl2e, *gp;
shadow_l2e_t *sl2e;
mfn_t mfn, gmfn, gl2mfn;
#endif
gl2e = gp = sh_map_domain_page(gl2mfn);
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, v->domain, {
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, d, {
s = sh_audit_flags(v, 2, guest_l2e_get_flags(*gl2e),
shadow_l2e_get_flags(*sl2e));
gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
? get_fl1_shadow_status(v, gfn)
: get_shadow_status(v,
- get_gfn_query_unlocked(v->domain, gfn_x(gfn),
+ get_gfn_query_unlocked(d, gfn_x(gfn),
&p2mt), SH_type_l1_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
" --> %" PRI_mfn " != mfn %" PRI_mfn,
gfn_x(gfn),
(guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
- : mfn_x(get_gfn_query_unlocked(v->domain,
+ : mfn_x(get_gfn_query_unlocked(d,
gfn_x(gfn), &p2mt)), mfn_x(gmfn), mfn_x(mfn));
}
});
#if GUEST_PAGING_LEVELS >= 4
int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x)
{
+ struct domain *d = v->domain;
guest_l3e_t *gl3e, *gp;
shadow_l3e_t *sl3e;
mfn_t mfn, gmfn, gl3mfn;
gfn = guest_l3e_get_gfn(*gl3e);
mfn = shadow_l3e_get_mfn(*sl3e);
gmfn = get_shadow_status(v, get_gfn_query_unlocked(
- v->domain, gfn_x(gfn), &p2mt),
+ d, gfn_x(gfn), &p2mt),
((GUEST_PAGING_LEVELS == 3 ||
is_pv_32on64_vcpu(v))
- && !shadow_mode_external(v->domain)
+ && !shadow_mode_external(d)
&& (guest_index(gl3e) % 4) == 3)
? SH_type_l2h_shadow
: SH_type_l2_shadow);
int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x)
{
+ struct domain *d = v->domain;
guest_l4e_t *gl4e, *gp;
shadow_l4e_t *sl4e;
mfn_t mfn, gmfn, gl4mfn;
#endif
gl4e = gp = sh_map_domain_page(gl4mfn);
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, v->domain,
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, d,
{
s = sh_audit_flags(v, 4, guest_l4e_get_flags(*gl4e),
shadow_l4e_get_flags(*sl4e));
gfn = guest_l4e_get_gfn(*gl4e);
mfn = shadow_l4e_get_mfn(*sl4e);
gmfn = get_shadow_status(v, get_gfn_query_unlocked(
- v->domain, gfn_x(gfn), &p2mt),
+ d, gfn_x(gfn), &p2mt),
SH_type_l3_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn