return 0;
}
-static int update_paging_mode(struct domain *d, unsigned long gfn)
-{
- u16 bdf;
- void *device_entry;
- unsigned int req_id, level, offset;
- unsigned long flags;
- struct pci_dev *pdev;
- struct amd_iommu *iommu = NULL;
- struct page_info *new_root = NULL;
- struct page_info *old_root = NULL;
- void *new_root_vaddr;
- unsigned long old_root_mfn;
- struct domain_iommu *hd = dom_iommu(d);
-
- if ( gfn == gfn_x(INVALID_GFN) )
- return -EADDRNOTAVAIL;
- ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
-
- level = hd->arch.paging_mode;
- old_root = hd->arch.root_table;
- offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
-
- ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
-
- while ( offset >= PTE_PER_TABLE_SIZE )
- {
- /* Allocate and install a new root table.
- * Only upper I/O page table grows, no need to fix next level bits */
- new_root = alloc_amd_iommu_pgtable();
- if ( new_root == NULL )
- {
- AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n",
- __func__);
- return -ENOMEM;
- }
-
- new_root_vaddr = __map_domain_page(new_root);
- old_root_mfn = page_to_mfn(old_root);
- set_iommu_pde_present(new_root_vaddr, old_root_mfn, level,
- !!IOMMUF_writable, !!IOMMUF_readable);
- level++;
- old_root = new_root;
- offset >>= PTE_PER_TABLE_SHIFT;
- unmap_domain_page(new_root_vaddr);
- }
-
- if ( new_root != NULL )
- {
- hd->arch.paging_mode = level;
- hd->arch.root_table = new_root;
-
- if ( !pcidevs_locked() )
- AMD_IOMMU_DEBUG("%s Try to access pdev_list "
- "without aquiring pcidevs_lock.\n", __func__);
-
- /* Update device table entries using new root table and paging mode */
- for_each_pdev( d, pdev )
- {
- bdf = PCI_BDF2(pdev->bus, pdev->devfn);
- iommu = find_iommu_for_device(pdev->seg, bdf);
- if ( !iommu )
- {
- AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
- return -ENODEV;
- }
-
- spin_lock_irqsave(&iommu->lock, flags);
- do {
- req_id = get_dma_requestor_id(pdev->seg, bdf);
- device_entry = iommu->dev_table.buffer +
- (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
-
- /* valid = 0 only works for dom0 passthrough mode */
- amd_iommu_set_root_page_table((u32 *)device_entry,
- page_to_maddr(hd->arch.root_table),
- d->domain_id,
- hd->arch.paging_mode, 1);
-
- amd_iommu_flush_device(iommu, req_id);
- bdf += pdev->phantom_stride;
- } while ( PCI_DEVFN2(bdf) != pdev->devfn &&
- PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
-
- /* For safety, invalidate all entries */
- amd_iommu_flush_all_pages(d);
- }
- return 0;
-}
-
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
unsigned int flags)
{
return rc;
}
- /* Since HVM domain is initialized with 2 level IO page table,
- * we might need a deeper page table for lager gfn now */
- if ( is_hvm_domain(d) )
- {
- if ( update_paging_mode(d, gfn) )
- {
- spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
- domain_crash(d);
- return -EFAULT;
- }
- }
-
if ( iommu_pde_from_gfn(d, gfn, pt_mfn, true) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->arch.mapping_lock);
{
struct domain_iommu *hd = dom_iommu(d);
- /* For pv and dom0, stick with get_paging_mode(max_page)
- * For HVM dom0, use 2 level page table at first */
- hd->arch.paging_mode = is_hvm_domain(d) ?
- IOMMU_PAGING_MODE_LEVEL_2 :
- get_paging_mode(max_page);
+ /*
+ * Choose the number of levels for the IOMMU page tables.
+ * - PV needs 3 or 4, depending on whether there is RAM (including hotplug
+ * RAM) above the 512G boundary.
+ * - HVM could in principle use 3 or 4 depending on how much guest
+ * physical address space we give it, but this isn't known yet so use 4
+ * unilaterally.
+ */
+ hd->arch.paging_mode = is_hvm_domain(d)
+ ? IOMMU_PAGING_MODE_LEVEL_4 : get_paging_mode(get_upper_mfn_bound());
+
return 0;
}