return 0;
}
-int amd_iommu_reserve_domain_unity_map(struct domain *domain,
- paddr_t phys_addr,
- unsigned long size, int iw, int ir)
+int amd_iommu_reserve_domain_unity_map(struct domain *d,
+ const struct ivrs_unity_map *map,
+ unsigned int flag)
{
- unsigned long npages, i;
- unsigned long gfn;
- unsigned int flags = !!ir;
- unsigned int flush_flags = 0;
- int rt = 0;
-
- if ( iw )
- flags |= IOMMUF_writable;
-
- npages = region_to_pages(phys_addr, size);
- gfn = phys_addr >> PAGE_SHIFT;
- for ( i = 0; i < npages; i++ )
+ int rc;
+
+ if ( d == dom_io )
+ return 0;
+
+ for ( rc = 0; !rc && map; map = map->next )
{
- unsigned long frame = gfn + i;
+ p2m_access_t p2ma = p2m_access_n;
- rt = amd_iommu_map_page(domain, _dfn(frame), _mfn(frame), flags,
- &flush_flags);
- if ( rt != 0 )
- break;
+ if ( map->read )
+ p2ma |= p2m_access_r;
+ if ( map->write )
+ p2ma |= p2m_access_w;
+
+ rc = iommu_identity_mapping(d, p2ma, map->addr,
+ map->addr + map->length - 1, flag);
}
- /* Use while-break to avoid compiler warning */
- while ( flush_flags &&
- amd_iommu_flush_iotlb_pages(domain, _dfn(gfn),
- npages, flush_flags) )
- break;
+ return rc;
+}
+
+int amd_iommu_reserve_domain_unity_unmap(struct domain *d,
+ const struct ivrs_unity_map *map)
+{
+ int rc;
+
+ if ( d == dom_io )
+ return 0;
+
+ for ( rc = 0; map; map = map->next )
+ {
+ int ret = iommu_identity_mapping(d, p2m_access_x, map->addr,
+ map->addr + map->length - 1, 0);
+
+ if ( ret && ret != -ENOENT && !rc )
+ rc = ret;
+ }
- return rt;
+ return rc;
}
/* Share p2m table with iommu. */
struct amd_iommu *iommu;
int bdf, rc;
struct domain_iommu *t = dom_iommu(target);
+ const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
bdf = PCI_BDF2(pdev->bus, pdev->devfn);
iommu = find_iommu_for_device(pdev->seg, bdf);
amd_iommu_disable_domain_device(source, iommu, devfn, pdev);
- if ( devfn == pdev->devfn )
+ /*
+ * If the device belongs to the hardware domain, and it has a unity mapping,
+ * don't remove it from the hardware domain, because BIOS may reference that
+ * mapping.
+ */
+ if ( !is_hardware_domain(source) )
{
- list_move(&pdev->domain_list, &target->arch.pdev_list);
- pdev->domain = target;
+ rc = amd_iommu_reserve_domain_unity_unmap(
+ source,
+ ivrs_mappings[get_dma_requestor_id(pdev->seg, bdf)].unity_map);
+ if ( rc )
+ return rc;
+ }
+
+ if ( devfn == pdev->devfn && pdev->domain != dom_io )
+ {
+ list_move(&pdev->domain_list, &dom_io->arch.pdev_list);
+ pdev->domain = dom_io;
}
rc = allocate_domain_resources(t);
pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
+ if ( devfn == pdev->devfn && pdev->domain != target )
+ {
+ list_move(&pdev->domain_list, &target->arch.pdev_list);
+ pdev->domain = target;
+ }
+
return 0;
}
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
int bdf = PCI_BDF2(pdev->bus, devfn);
int req_id = get_dma_requestor_id(pdev->seg, bdf);
- const struct ivrs_unity_map *unity_map;
+ int rc = amd_iommu_reserve_domain_unity_map(
+ d, ivrs_mappings[req_id].unity_map, flag);
+
+ if ( !rc )
+ rc = reassign_device(pdev->domain, d, devfn, pdev);
- for ( unity_map = ivrs_mappings[req_id].unity_map; unity_map;
- unity_map = unity_map->next )
+ if ( rc && !is_hardware_domain(d) )
{
- int rc = amd_iommu_reserve_domain_unity_map(
- d, unity_map->addr, unity_map->length,
- unity_map->write, unity_map->read);
+ int ret = amd_iommu_reserve_domain_unity_unmap(
+ d, ivrs_mappings[req_id].unity_map);
- if ( rc )
- return rc;
+ if ( ret )
+ {
+ printk(XENLOG_ERR "AMD-Vi: "
+ "unity-unmap for %pd/%04x:%02x:%02x.%u failed (%d)\n",
+ d, pdev->seg, pdev->bus,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), ret);
+ domain_crash(d);
+ }
}
- return reassign_device(pdev->domain, d, devfn, pdev);
+ return rc;
}
static void deallocate_next_page_table(struct page_info *pg, int level)
static void amd_iommu_domain_destroy(struct domain *d)
{
+ iommu_identity_map_teardown(d);
deallocate_iommu_page_tables(d);
amd_iommu_flush_all_pages(d);
}