}
/* returns : 0 for success, -errno otherwise */
-static int atomic_write_ept_entry(ept_entry_t *entryptr, ept_entry_t new,
+static int atomic_write_ept_entry(struct p2m_domain *p2m,
+ ept_entry_t *entryptr, ept_entry_t new,
int level)
{
int rc;
if ( unlikely(p2m_is_foreign(entryptr->sa_p2mt)) && check_foreign )
oldmfn = entryptr->mfn;
+ p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt, level);
+
write_atomic(&entryptr->epte, new.epte);
if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
* present entries in the given page table, optionally marking the entries
* also for their subtrees needing P2M type re-calculation.
*/
-static bool_t ept_invalidate_emt(mfn_t mfn, bool_t recalc, int level)
+static bool_t ept_invalidate_emt(struct p2m_domain *p2m, mfn_t mfn,
+ bool_t recalc, int level)
{
int rc;
ept_entry_t *epte = map_domain_page(mfn);
e.emt = MTRR_NUM_TYPES;
if ( recalc )
e.recalc = 1;
- rc = atomic_write_ept_entry(&epte[i], e, level);
+ rc = atomic_write_ept_entry(p2m, &epte[i], e, level);
ASSERT(rc == 0);
changed = 1;
}
rc = -ENOMEM;
goto out;
}
- wrc = atomic_write_ept_entry(&table[index], split_ept_entry, i);
+ wrc = atomic_write_ept_entry(p2m, &table[index], split_ept_entry, i);
ASSERT(wrc == 0);
for ( ; i > target; --i )
{
e.emt = MTRR_NUM_TYPES;
e.recalc = 1;
- wrc = atomic_write_ept_entry(&table[index], e, target);
+ wrc = atomic_write_ept_entry(p2m, &table[index], e, target);
ASSERT(wrc == 0);
rc = 1;
}
nt = p2m_recalc_type(e.recalc, e.sa_p2mt, p2m, gfn + i);
if ( nt != e.sa_p2mt )
{
- if ( e.sa_p2mt == p2m_ioreq_server )
- {
- ASSERT(p2m->ioreq.entry_count > 0);
- p2m->ioreq.entry_count--;
- }
-
e.sa_p2mt = nt;
ept_p2m_type_to_flags(p2m, &e, e.sa_p2mt, e.access);
}
e.recalc = 0;
- wrc = atomic_write_ept_entry(&epte[i], e, level);
+ wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
ASSERT(wrc == 0);
}
}
{
if ( ept_split_super_page(p2m, &e, level, level - 1) )
{
- wrc = atomic_write_ept_entry(&epte[i], e, level);
+ wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
ASSERT(wrc == 0);
unmap_domain_page(epte);
mfn = e.mfn;
e.recalc = 0;
if ( recalc && p2m_is_changeable(e.sa_p2mt) )
ept_p2m_type_to_flags(p2m, &e, e.sa_p2mt, e.access);
- wrc = atomic_write_ept_entry(&epte[i], e, level);
+ wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
ASSERT(wrc == 0);
}
if ( e.emt == MTRR_NUM_TYPES )
{
ASSERT(is_epte_present(&e));
- ept_invalidate_emt(_mfn(e.mfn), e.recalc, level);
+ ept_invalidate_emt(p2m, _mfn(e.mfn), e.recalc, level);
smp_wmb();
e.emt = 0;
e.recalc = 0;
- wrc = atomic_write_ept_entry(&epte[i], e, level);
+ wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
ASSERT(wrc == 0);
unmap_domain_page(epte);
rc = 1;
/* now install the newly split ept sub-tree */
/* NB: please make sure domian is paused and no in-fly VT-d DMA. */
- rc = atomic_write_ept_entry(ept_entry, split_ept_entry, i);
+ rc = atomic_write_ept_entry(p2m, ept_entry, split_ept_entry, i);
ASSERT(rc == 0);
/* then move to the level we want to make real changes */
new_entry.suppress_ve = is_epte_valid(&old_entry) ?
old_entry.suppress_ve : 1;
- /*
- * p2m_ioreq_server is only used for 4K pages, so the
- * count is only done on ept page table entries.
- */
- if ( p2mt == p2m_ioreq_server )
- {
- ASSERT(i == 0);
- p2m->ioreq.entry_count++;
- }
-
- if ( ept_entry->sa_p2mt == p2m_ioreq_server )
- {
- ASSERT(i == 0);
- ASSERT(p2m->ioreq.entry_count > 0);
- p2m->ioreq.entry_count--;
- }
-
- rc = atomic_write_ept_entry(ept_entry, new_entry, target);
+ rc = atomic_write_ept_entry(p2m, ept_entry, new_entry, target);
if ( unlikely(rc) )
old_entry.epte = 0;
else
if ( !mfn )
return;
- if ( ept_invalidate_emt(_mfn(mfn), 1, p2m->ept.wl) )
+ if ( ept_invalidate_emt(p2m, _mfn(mfn), 1, p2m->ept.wl) )
ept_sync_domain(p2m);
}
if ( !mfn )
return;
- if ( ept_invalidate_emt(_mfn(mfn), 0, p2m->ept.wl) )
+ if ( ept_invalidate_emt(p2m, _mfn(mfn), 0, p2m->ept.wl) )
ept_sync_domain(p2m);
}
flags |= _PAGE_PSE;
}
- if ( ot == p2m_ioreq_server )
- {
- ASSERT(p2m->ioreq.entry_count > 0);
- ASSERT(level == 0);
- p2m->ioreq.entry_count--;
- }
-
e = l1e_from_pfn(mfn, flags);
p2m_add_iommu_flags(&e, level,
(nt == p2m_ram_rw)
if ( page_order == PAGE_ORDER_4K )
{
- p2m_type_t p2mt_old;
-
rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
L2_PAGETABLE_SHIFT - PAGE_SHIFT,
L2_PAGETABLE_ENTRIES, 1, 1);
if ( entry_content.l1 != 0 )
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
- p2mt_old = p2m_flags_to_type(l1e_get_flags(*p2m_entry));
-
- /*
- * p2m_ioreq_server is only used for 4K pages, so
- * the count is only done for level 1 entries.
- */
- if ( p2mt == p2m_ioreq_server )
- p2m->ioreq.entry_count++;
-
- if ( p2mt_old == p2m_ioreq_server )
- {
- ASSERT(p2m->ioreq.entry_count > 0);
- p2m->ioreq.entry_count--;
- }
-
/* level 1 entry */
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */