ia64/xen-unstable
changeset 17730:9a7a6f729d2c
VT-d: flush iotlb of selective iommu when a domain's VT-d table is changed
When a domain's VT-d table is changed, only the iommus under which the
domain has assigned devices need to be flushed.
Signed-off-by: Yang, Xiaowei <xiaowei.yang@intel.com>
When a domain's VT-d table is changed, only the iommus under which the
domain has assigned devices need to be flushed.
Signed-off-by: Yang, Xiaowei <xiaowei.yang@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon May 26 08:25:36 2008 +0100 (2008-05-26) |
parents | c0c0f4fa8850 |
children | 51274688c000 |
files | xen/drivers/passthrough/vtd/iommu.c xen/include/xen/hvm/iommu.h xen/include/xen/iommu.h |
line diff
1.1 --- a/xen/drivers/passthrough/vtd/iommu.c Mon May 26 08:24:55 2008 +0100 1.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Mon May 26 08:25:36 2008 +0100 1.3 @@ -572,6 +572,7 @@ void iommu_flush_all(void) 1.4 /* clear one page's page table */ 1.5 static void dma_pte_clear_one(struct domain *domain, u64 addr) 1.6 { 1.7 + struct hvm_iommu *hd = domain_hvm_iommu(domain); 1.8 struct acpi_drhd_unit *drhd; 1.9 struct iommu *iommu; 1.10 struct dma_pte *page = NULL, *pte = NULL; 1.11 @@ -593,6 +594,10 @@ static void dma_pte_clear_one(struct dom 1.12 for_each_drhd_unit ( drhd ) 1.13 { 1.14 iommu = drhd->iommu; 1.15 + 1.16 + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) 1.17 + continue; 1.18 + 1.19 if ( cap_caching_mode(iommu->cap) ) 1.20 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), 1.21 addr, 1, 0); 1.22 @@ -1022,7 +1027,7 @@ static int iommu_alloc(struct acpi_drhd_ 1.23 1.24 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address); 1.25 iommu->reg = (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); 1.26 - nr_iommus++; 1.27 + iommu->index = nr_iommus++; 1.28 1.29 iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG); 1.30 iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG); 1.31 @@ -1191,6 +1196,8 @@ static int domain_context_mapping_one( 1.32 iommu_flush_write_buffer(iommu); 1.33 else 1.34 iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0); 1.35 + 1.36 + set_bit(iommu->index, &hd->iommu_bitmap); 1.37 spin_unlock_irqrestore(&iommu->lock, flags); 1.38 1.39 return 0; 1.40 @@ -1418,11 +1425,12 @@ void reassign_device_ownership( 1.41 { 1.42 struct hvm_iommu *source_hd = domain_hvm_iommu(source); 1.43 struct hvm_iommu *target_hd = domain_hvm_iommu(target); 1.44 - struct pci_dev *pdev; 1.45 + struct pci_dev *pdev, *pdev2; 1.46 struct acpi_drhd_unit *drhd; 1.47 struct iommu *iommu; 1.48 int status; 1.49 unsigned long flags; 1.50 + int found = 0; 1.51 1.52 pdev_flr(bus, devfn); 1.53 1.54 @@ -1444,6 +1452,18 @@ void reassign_device_ownership( 1.55 spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); 1.56 spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); 1.57 1.58 + for_each_pdev ( source, pdev2 ) 1.59 + { 1.60 + drhd = acpi_find_matched_drhd_unit(pdev2); 1.61 + if ( drhd->iommu == iommu ) 1.62 + { 1.63 + found = 1; 1.64 + break; 1.65 + } 1.66 + } 1.67 + if ( !found ) 1.68 + clear_bit(iommu->index, &source_hd->iommu_bitmap); 1.69 + 1.70 status = domain_context_mapping(target, iommu, pdev); 1.71 if ( status != 0 ) 1.72 gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n"); 1.73 @@ -1500,6 +1520,7 @@ static int domain_context_mapped(struct 1.74 int intel_iommu_map_page( 1.75 struct domain *d, unsigned long gfn, unsigned long mfn) 1.76 { 1.77 + struct hvm_iommu *hd = domain_hvm_iommu(d); 1.78 struct acpi_drhd_unit *drhd; 1.79 struct iommu *iommu; 1.80 struct dma_pte *page = NULL, *pte = NULL; 1.81 @@ -1527,6 +1548,10 @@ int intel_iommu_map_page( 1.82 for_each_drhd_unit ( drhd ) 1.83 { 1.84 iommu = drhd->iommu; 1.85 + 1.86 + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) 1.87 + continue; 1.88 + 1.89 if ( cap_caching_mode(iommu->cap) ) 1.90 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), 1.91 (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0); 1.92 @@ -1578,6 +1603,7 @@ int intel_iommu_unmap_page(struct domain 1.93 int iommu_page_mapping(struct domain *domain, paddr_t iova, 1.94 paddr_t hpa, size_t size, int prot) 1.95 { 1.96 + struct hvm_iommu *hd = domain_hvm_iommu(domain); 1.97 struct acpi_drhd_unit *drhd; 1.98 struct iommu *iommu; 1.99 u64 start_pfn, end_pfn; 1.100 @@ -1611,6 +1637,10 @@ int iommu_page_mapping(struct domain *do 1.101 for_each_drhd_unit ( drhd ) 1.102 { 1.103 iommu = drhd->iommu; 1.104 + 1.105 + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) 1.106 + continue; 1.107 + 1.108 if ( cap_caching_mode(iommu->cap) ) 1.109 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), 1.110 iova, index, 0); 1.111 @@ -1630,6 +1660,7 @@ int iommu_page_unmapping(struct domain * 1.112 1.113 void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry) 1.114 { 1.115 + struct hvm_iommu *hd = domain_hvm_iommu(d); 1.116 struct acpi_drhd_unit *drhd; 1.117 struct iommu *iommu = NULL; 1.118 struct dma_pte *pte = (struct dma_pte *) p2m_entry; 1.119 @@ -1637,6 +1668,10 @@ void iommu_flush(struct domain *d, unsig 1.120 for_each_drhd_unit ( drhd ) 1.121 { 1.122 iommu = drhd->iommu; 1.123 + 1.124 + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) 1.125 + continue; 1.126 + 1.127 if ( cap_caching_mode(iommu->cap) ) 1.128 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), 1.129 (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
2.1 --- a/xen/include/xen/hvm/iommu.h Mon May 26 08:24:55 2008 +0100 2.2 +++ b/xen/include/xen/hvm/iommu.h Mon May 26 08:25:36 2008 +0100 2.3 @@ -43,6 +43,7 @@ struct hvm_iommu { 2.4 int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ 2.5 struct list_head g2m_ioport_list; /* guest to machine ioport mapping */ 2.6 domid_t iommu_domid; /* domain id stored in iommu */ 2.7 + u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */ 2.8 2.9 /* amd iommu support */ 2.10 int domain_id;
3.1 --- a/xen/include/xen/iommu.h Mon May 26 08:24:55 2008 +0100 3.2 +++ b/xen/include/xen/iommu.h Mon May 26 08:25:36 2008 +0100 3.3 @@ -44,6 +44,7 @@ extern int iommu_pv_enabled; 3.4 struct iommu { 3.5 struct list_head list; 3.6 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 3.7 + u32 index; /* Sequence number of iommu */ 3.8 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 3.9 u64 cap; 3.10 u64 ecap;