# set memory limit
xc.domain_setmaxmem(self.domid, maxmem)
+ # Reserve 1 page per MiB of RAM for separate VT-d page table.
+ vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
+ # Round vtd_mem up to a multiple of a MiB.
+ vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
+
# Make sure there's enough RAM available for the domain
- balloon.free(memory + shadow)
+ balloon.free(memory + shadow + vtd_mem)
# Set up the shadow memory
shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
if ( iommu_enabled && is_hvm_domain(d) )
{
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- {
- if ( (p2mt == p2m_mmio_direct) )
- iommu_flush(d, gfn, (u64*)p2m_entry);
- }
- else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- {
- if ( p2mt == p2m_ram_rw )
- iommu_map_page(d, gfn, mfn_x(mfn));
- else
- iommu_unmap_page(d, gfn);
- }
+ if ( p2mt == p2m_ram_rw )
+ iommu_map_page(d, gfn, mfn_x(mfn));
+ else
+ iommu_unmap_page(d, gfn);
}
/* Success */
goto error;
}
-#if CONFIG_PAGING_LEVELS >= 3
- if (vtd_enabled && is_hvm_domain(d))
- iommu_set_pgd(d);
-#endif
-
P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
p2m_unlock(p2m);
return 0;
}
/* free pgd */
- if ( start == 0 && end == ((((u64)1) << addr_width) - 1) )
+ if ( start == 0 && end >= ((((u64)1) << addr_width) - 1) )
{
free_pgtable_maddr(hd->pgd_maddr);
hd->pgd_maddr = 0;
}
}
+ /* free all VT-d page tables when shut down or destroy domain. */
+static void iommu_free_pagetable(struct domain *domain)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(domain);
+ int addr_width = agaw_to_width(hd->agaw);
+ u64 start, end;
+
+ start = 0;
+ end = (((u64)1) << addr_width) - 1;
+
+ dma_pte_free_pagetable(domain, start, end);
+}
+
static int iommu_set_root_entry(struct iommu *iommu)
{
u32 cmd, sts;
if ( list_empty(&acpi_drhd_units) )
return;
- iommu_domid_release(d);
- iommu_free_pgd(d);
+ iommu_free_pagetable(d);
return_devices_to_dom0(d);
+ iommu_domid_release(d);
}
static int domain_context_mapped(struct pci_dev *pdev)