if ( toaddr )
{
copy_from_user(va, buf, pagecnt); /* va = buf */
- paging_mark_dirty(dp, mfn_x(mfn));
+ paging_mark_dirty(dp, mfn);
}
else
{
*/
if ( npfec.write_access )
{
- paging_mark_dirty(currd, mfn_x(mfn));
+ paging_mark_dirty(currd, mfn);
/*
* If p2m is really an altp2m, unlock here to avoid lock ordering
* violation when the change below is propagated from host p2m.
if ( unlikely(p2m_is_discard_write(p2mt)) )
*writable = 0;
else if ( !permanent )
- paging_mark_dirty(d, page_to_mfn(page));
+ paging_mark_dirty(d, _mfn(page_to_mfn(page)));
}
if ( !permanent )
list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
if ( track->page == page )
{
- paging_mark_dirty(d, mfn);
+ paging_mark_dirty(d, _mfn(mfn));
list_del(&track->list);
xfree(track);
break;
spin_lock(&d->arch.hvm_domain.write_map.lock);
list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
- paging_mark_dirty(d, page_to_mfn(track->page));
+ paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
spin_unlock(&d->arch.hvm_domain.write_map.lock);
}
memcpy(p, buf, count);
else
memset(p, 0, count);
- paging_mark_dirty(curr->domain, page_to_mfn(page));
+ paging_mark_dirty(curr->domain, _mfn(page_to_mfn(page)));
}
}
else
page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
if ( page )
{
- paging_mark_dirty(d, page_to_mfn(page));
+ paging_mark_dirty(d, _mfn(page_to_mfn(page)));
/* These are most probably not page tables any more */
/* don't take a long time and don't die either */
sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
_mfn(page_to_mfn(iorp->page)), 0);
if ( rc == 0 )
- paging_mark_dirty(d, page_to_mfn(iorp->page));
+ paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
return rc;
}
/* A page table is dirtied when its type count becomes non-zero. */
if ( likely(owner != NULL) )
- paging_mark_dirty(owner, page_to_mfn(page));
+ paging_mark_dirty(owner, _mfn(page_to_mfn(page)));
switch ( type & PGT_type_mask )
{
if ( likely(owner != NULL) && unlikely(paging_mode_enabled(owner)) )
{
/* A page table is dirtied when its type count becomes zero. */
- paging_mark_dirty(owner, page_to_mfn(page));
+ paging_mark_dirty(owner, _mfn(page_to_mfn(page)));
if ( shadow_mode_refcounts(owner) )
return 0;
goto pin_drop;
/* A page is dirtied when its pin status is set. */
- paging_mark_dirty(pg_owner, page_to_mfn(page));
+ paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
/* We can race domain destruction (domain_relinquish_resources). */
if ( unlikely(pg_owner != d) )
put_page(page);
/* A page is dirtied when its pin status is cleared. */
- paging_mark_dirty(pg_owner, page_to_mfn(page));
+ paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
break;
}
}
/* A page is dirtied when it's being cleared. */
- paging_mark_dirty(pg_owner, page_to_mfn(page));
+ paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
clear_domain_page(_mfn(page_to_mfn(page)));
}
/* A page is dirtied when it's being copied to. */
- paging_mark_dirty(pg_owner, page_to_mfn(dst_page));
+ paging_mark_dirty(pg_owner, _mfn(page_to_mfn(dst_page)));
copy_domain_page(_mfn(page_to_mfn(dst_page)),
_mfn(page_to_mfn(src_page)));
set_gpfn_from_mfn(mfn, gpfn);
- paging_mark_dirty(pg_owner, mfn);
+ paging_mark_dirty(pg_owner, _mfn(mfn));
put_page(mfn_to_page(mfn));
break;
break;
}
- paging_mark_dirty(dom, mfn);
+ paging_mark_dirty(dom, _mfn(mfn));
/* All is good so make the update. */
gdt_pent = map_domain_page(_mfn(mfn));
{
#if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
if ( set_ad_bits(l4p + guest_l4_table_offset(va), &gw->l4e, 0) )
- paging_mark_dirty(d, mfn_x(gw->l4mfn));
+ paging_mark_dirty(d, gw->l4mfn);
if ( set_ad_bits(l3p + guest_l3_table_offset(va), &gw->l3e,
(pse1G && (pfec & PFEC_write_access))) )
- paging_mark_dirty(d, mfn_x(gw->l3mfn));
+ paging_mark_dirty(d, gw->l3mfn);
#endif
if ( !pse1G )
{
if ( set_ad_bits(l2p + guest_l2_table_offset(va), &gw->l2e,
(pse2M && (pfec & PFEC_write_access))) )
- paging_mark_dirty(d, mfn_x(gw->l2mfn));
+ paging_mark_dirty(d, gw->l2mfn);
if ( !pse2M )
{
if ( set_ad_bits(l1p + guest_l1_table_offset(va), &gw->l1e,
(pfec & PFEC_write_access)) )
- paging_mark_dirty(d, mfn_x(gw->l1mfn));
+ paging_mark_dirty(d, gw->l1mfn);
}
}
}
/* Now that the gfn<->mfn map is properly established,
* marking dirty is feasible */
- paging_mark_dirty(d, mfn_x(page_to_mfn(page)));
+ paging_mark_dirty(d, page_to_mfn(page));
/* We do not need to unlock a private page */
put_gfn(d, gfn);
return 0;
for( i = 0; i < (1UL << order); i++ )
{
set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
- paging_mark_dirty(d, mfn_x(mfn) + i);
+ paging_mark_dirty(d, mfn_add(mfn, i));
}
p2m->pod.entry_count -= (1 << order);
}
/* Mark a page as dirty */
-void paging_mark_dirty(struct domain *d, unsigned long guest_mfn)
+void paging_mark_dirty(struct domain *d, mfn_t gmfn)
{
unsigned long pfn;
- mfn_t gmfn;
-
- gmfn = _mfn(guest_mfn);
if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
page_get_owner(mfn_to_page(gmfn)) != d )
int result = 0;
struct page_info *page = mfn_to_page(gmfn);
- paging_mark_dirty(v->domain, mfn_x(gmfn));
+ paging_mark_dirty(v->domain, gmfn);
// Determine which types of shadows are affected, and update each.
//
sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2);
}
- paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0]));
+ paging_mark_dirty(v->domain, sh_ctxt->mfn[0]);
if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
{
- paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1]));
+ paging_mark_dirty(v->domain, sh_ctxt->mfn[1]);
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
else
{
if ( mfn_valid(target_mfn) ) {
if ( ft & FETCH_TYPE_WRITE )
- paging_mark_dirty(d, mfn_x(target_mfn));
+ paging_mark_dirty(d, target_mfn);
else if ( !paging_mfn_is_dirty(d, target_mfn) )
sflags &= ~_PAGE_RW;
}
if ( mark_dirty )
{
put_page_and_type(cli_pfp);
- paging_mark_dirty(current->domain,cli_mfn);
+ paging_mark_dirty(current->domain, _mfn(cli_mfn));
}
else
put_page(cli_pfp);
#define gnttab_status_gmfn(d, t, i) \
(mfn_to_gmfn(d, gnttab_status_mfn(t, i)))
-#define gnttab_mark_dirty(d, f) paging_mark_dirty((d), (f))
+#define gnttab_mark_dirty(d, f) paging_mark_dirty((d), _mfn(f))
static inline void gnttab_clear_flag(unsigned int nr, uint16_t *st)
{
void (*clean_dirty_bitmap)(struct domain *d));
/* mark a page as dirty */
-void paging_mark_dirty(struct domain *d, unsigned long guest_mfn);
+void paging_mark_dirty(struct domain *d, mfn_t gmfn);
/* mark a page as dirty with taking guest pfn as parameter */
void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn);