ftlb |= oos_fixup_flush_gmfn(v, gmfn, fixup);
- switch ( sh_remove_write_access(v, gmfn, 0, 0) )
+ switch ( sh_remove_write_access(d, gmfn, 0, 0) )
{
default:
case 0:
* level==0 means we have some other reason for revoking write access.
* If level==0 we are allowed to fail, returning -1. */
-int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
+int sh_remove_write_access(struct domain *d, mfn_t gmfn,
unsigned int level,
unsigned long fault_addr)
{
| SHF_L1_64
| SHF_FL1_64
;
- struct domain *d = v->domain;
struct page_info *pg = mfn_to_page(gmfn);
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
struct vcpu *curr = current;
for ( i = begin_pfn; i < end_pfn; i++ ) {
mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
if (mfn_x(mfn) != INVALID_MFN)
- flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0);
+ flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
}
dirty_vram->last_dirty = -1;
}
static inline uint32_t
gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw)
{
-#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
struct domain *d = v->domain;
-#endif
-#endif
uint32_t rc = 0;
#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
}
else
#endif /* OOS */
- if ( sh_remove_write_access(v, gw->l3mfn, 3, va) )
+ if ( sh_remove_write_access(d, gw->l3mfn, 3, va) )
rc = GW_RMWR_FLUSHTLB;
#endif /* GUEST_PAGING_LEVELS >= 4 */
}
else
#endif /* OOS */
- if ( sh_remove_write_access(v, gw->l2mfn, 2, va) )
+ if ( sh_remove_write_access(d, gw->l2mfn, 2, va) )
rc |= GW_RMWR_FLUSHTLB;
#endif /* GUEST_PAGING_LEVELS >= 3 */
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
&& !mfn_is_out_of_sync(gw->l1mfn)
#endif /* OOS */
- && sh_remove_write_access(v, gw->l1mfn, 1, va) )
+ && sh_remove_write_access(d, gw->l1mfn, 1, va) )
rc |= GW_RMWR_FLUSHTLB;
return rc;
* replace the old shadow pagetable(s), so that we can safely use the
* (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 2
- if ( sh_remove_write_access(v, gmfn, 2, 0) != 0 )
+ if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
flush_tlb_mask(d->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
gl2gfn = guest_l3e_get_gfn(gl3e[i]);
gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
- flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
+ flush |= sh_remove_write_access(d, gl2mfn, 2, 0);
}
}
if ( flush )
}
}
#elif GUEST_PAGING_LEVELS == 4
- if ( sh_remove_write_access(v, gmfn, 4, 0) != 0 )
+ if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
flush_tlb_mask(d->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
#else