#include "iommu.h"
+#define CONTIG_MASK IOMMU_PTE_CONTIG_MASK
#include <asm/pt-contig-markers.h>
/* Given pfn and page table level, return pde index */
static union amd_iommu_pte clear_iommu_pte_present(unsigned long l1_mfn,
unsigned long dfn,
- unsigned int level)
+ unsigned int level,
+ bool *free)
{
union amd_iommu_pte *table, *pte, old;
+ unsigned int idx = pfn_to_pde_idx(dfn, level);
table = map_domain_page(_mfn(l1_mfn));
- pte = &table[pfn_to_pde_idx(dfn, level)];
+ pte = &table[idx];
old = *pte;
write_atomic(&pte->raw, 0);
+ *free = pt_update_contig_markers(&table->raw, idx, level, PTE_kind_null);
+
unmap_domain_page(table);
return old;
if ( !old.pr || old.next_level ||
old.mfn != next_mfn ||
old.iw != iw || old.ir != ir )
+ {
set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
+ pt_update_contig_markers(&table->raw, pfn_to_pde_idx(dfn, level),
+ level, PTE_kind_leaf);
+ }
else
old.pr = false; /* signal "no change" to the caller */
smp_wmb();
set_iommu_pde_present(pde, next_table_mfn, next_level, true,
true);
+ pt_update_contig_markers(&next_table_vaddr->raw,
+ pfn_to_pde_idx(dfn, level),
+ level, PTE_kind_table);
*flush_flags |= IOMMU_FLUSHF_modified;
}
next_table_mfn = mfn_x(page_to_mfn(table));
set_iommu_pde_present(pde, next_table_mfn, next_level, true,
true);
+ pt_update_contig_markers(&next_table_vaddr->raw,
+ pfn_to_pde_idx(dfn, level),
+ level, PTE_kind_table);
}
else /* should never reach here */
{
if ( pt_mfn )
{
+ bool free;
+
/* Mark PTE as 'page not present'. */
- old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level);
+ old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level, &free);
+
+ while ( unlikely(free) && ++level < hd->arch.amd.paging_mode )
+ {
+ struct page_info *pg = mfn_to_page(_mfn(pt_mfn));
+
+ if ( iommu_pde_from_dfn(d, dfn_x(dfn), level, &pt_mfn,
+ flush_flags, false) )
+ BUG();
+ BUG_ON(!pt_mfn);
+
+ clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level, &free);
+ *flush_flags |= IOMMU_FLUSHF_all;
+ iommu_queue_free_pgtable(hd, pg);
+ }
}
spin_unlock(&hd->arch.mapping_lock);