The new per-domain IOMMU page-table allocator will now free the
page-tables when domain's resources are relinquished. However, the
per-domain IOMMU structure will still contain a dangling pointer to
the root page-table.
Xen may access the IOMMU page-tables afterwards at least in the case of
PV domain:
(XEN) Xen call trace:
(XEN) [<
ffff82d04025b4b2>] R iommu.c#addr_to_dma_page_maddr+0x12e/0x1d8
(XEN) [<
ffff82d04025b695>] F iommu.c#intel_iommu_unmap_page+0x5d/0xf8
(XEN) [<
ffff82d0402695f3>] F iommu_unmap+0x9c/0x129
(XEN) [<
ffff82d0402696a6>] F iommu_legacy_unmap+0x26/0x63
(XEN) [<
ffff82d04033c5c7>] F mm.c#cleanup_page_mappings+0x139/0x144
(XEN) [<
ffff82d04033c61d>] F put_page+0x4b/0xb3
(XEN) [<
ffff82d04033c87f>] F put_page_from_l1e+0x136/0x13b
(XEN) [<
ffff82d04033cada>] F devalidate_page+0x256/0x8dc
(XEN) [<
ffff82d04033d396>] F mm.c#_put_page_type+0x236/0x47e
(XEN) [<
ffff82d04033d64d>] F mm.c#put_pt_page+0x6f/0x80
(XEN) [<
ffff82d04033d8d6>] F mm.c#put_page_from_l2e+0x8a/0xcf
(XEN) [<
ffff82d04033cc27>] F devalidate_page+0x3a3/0x8dc
(XEN) [<
ffff82d04033d396>] F mm.c#_put_page_type+0x236/0x47e
(XEN) [<
ffff82d04033d64d>] F mm.c#put_pt_page+0x6f/0x80
(XEN) [<
ffff82d04033d807>] F mm.c#put_page_from_l3e+0x8a/0xcf
(XEN) [<
ffff82d04033cdf0>] F devalidate_page+0x56c/0x8dc
(XEN) [<
ffff82d04033d396>] F mm.c#_put_page_type+0x236/0x47e
(XEN) [<
ffff82d04033d64d>] F mm.c#put_pt_page+0x6f/0x80
(XEN) [<
ffff82d04033d6c7>] F mm.c#put_page_from_l4e+0x69/0x6d
(XEN) [<
ffff82d04033cf24>] F devalidate_page+0x6a0/0x8dc
(XEN) [<
ffff82d04033d396>] F mm.c#_put_page_type+0x236/0x47e
(XEN) [<
ffff82d04033d92e>] F put_page_type_preemptible+0x13/0x15
(XEN) [<
ffff82d04032598a>] F domain.c#relinquish_memory+0x1ff/0x4e9
(XEN) [<
ffff82d0403295f2>] F domain_relinquish_resources+0x2b6/0x36a
(XEN) [<
ffff82d040205cdf>] F domain_kill+0xb8/0x141
(XEN) [<
ffff82d040236cac>] F do_domctl+0xb6f/0x18e5
(XEN) [<
ffff82d04031d098>] F pv_hypercall+0x2f0/0x55f
(XEN) [<
ffff82d04039b432>] F lstar_enter+0x112/0x120
This will result to a use after-free and possibly an host crash or
memory corruption.
It would not be possible to free the page-tables further down in
domain_relinquish_resources() because cleanup_page_mappings() will only
be called when the last reference on the page dropped. This may happen
much later if another domain still hold a reference.
After all the PCI devices have been de-assigned, nobody should use the
IOMMU page-tables and it is therefore pointless to try to modify them.
So we can simply clear any reference to the root page-table in the
per-domain IOMMU structure. This requires to introduce a new callback of
the method will depend on the IOMMU driver used.
Take the opportunity to add an ASSERT() in arch_iommu_domain_destroy()
to check if we freed all the IOMMU page tables.
Fixes: 3eef6d07d722 ("x86/iommu: convert VT-d code to use new page table allocator")
Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Release-Acked-by: Ian Jackson <iwj@xenproject.org>
return reassign_device(pdev->domain, d, devfn, pdev);
}
+static void amd_iommu_clear_root_pgtable(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+
+ spin_lock(&hd->arch.mapping_lock);
+ hd->arch.amd.root_table = NULL;
+ spin_unlock(&hd->arch.mapping_lock);
+}
+
static void amd_iommu_domain_destroy(struct domain *d)
{
- dom_iommu(d)->arch.amd.root_table = NULL;
+ ASSERT(!dom_iommu(d)->arch.amd.root_table);
}
static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
.remove_device = amd_iommu_remove_device,
.assign_device = amd_iommu_assign_device,
.teardown = amd_iommu_domain_destroy,
+ .clear_root_pgtable = amd_iommu_clear_root_pgtable,
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
.iotlb_flush = amd_iommu_flush_iotlb_pages,
return ret;
}
+static void iommu_clear_root_pgtable(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+
+ spin_lock(&hd->arch.mapping_lock);
+ hd->arch.vtd.pgd_maddr = 0;
+ spin_unlock(&hd->arch.mapping_lock);
+}
+
static void iommu_domain_teardown(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
xfree(mrmrr);
}
- hd->arch.vtd.pgd_maddr = 0;
+ ASSERT(!hd->arch.vtd.pgd_maddr);
}
static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
.remove_device = intel_iommu_remove_device,
.assign_device = intel_iommu_assign_device,
.teardown = iommu_domain_teardown,
+ .clear_root_pgtable = iommu_clear_root_pgtable,
.map_page = intel_iommu_map_page,
.unmap_page = intel_iommu_unmap_page,
.lookup_page = intel_iommu_lookup_page,
void arch_iommu_domain_destroy(struct domain *d)
{
+ /*
+ * There should be not page-tables left allocated by the time the
+ * domain is destroyed. Note that arch_iommu_domain_destroy() is
+ * called unconditionally, so pgtables may be uninitialized.
+ */
+ ASSERT(!dom_iommu(d)->platform_ops ||
+ page_list_empty(&dom_iommu(d)->arch.pgtables.list));
}
static bool __hwdom_init hwdom_iommu_map(const struct domain *d,
/* After this barrier, no new IOMMU mappings can be inserted. */
spin_barrier(&hd->arch.mapping_lock);
+ /*
+ * Pages will be moved to the free list below. So we want to
+ * clear the root page-table to avoid any potential use after-free.
+ */
+ hd->platform_ops->clear_root_pgtable(d);
+
while ( (pg = page_list_remove_head(&hd->arch.pgtables.list)) )
{
free_domheap_page(pg);
int (*adjust_irq_affinities)(void);
void (*sync_cache)(const void *addr, unsigned int size);
+ void (*clear_root_pgtable)(struct domain *d);
#endif /* CONFIG_X86 */
int __must_check (*suspend)(void);