ia64/xen-unstable
changeset 18650:609d0d34450f
vtd: code cleanup
Remove iommu_page_mapping/unmapping, which are redundant because
intel_iommu_map_page/unmap_page can handle their functions.
Correct IRTA_REG_EIMI_SHIFT to IRTA_REG_EIME_SHIFT.
and also remove useless declarations in iommu.c
Signed-off-by: Weidong Han <weidong.han@intel.com>
Remove iommu_page_mapping/unmapping, which are redundant because
intel_iommu_map_page/unmap_page can handle their functions.
Correct IRTA_REG_EIMI_SHIFT to IRTA_REG_EIME_SHIFT.
and also remove useless declarations in iommu.c
Signed-off-by: Weidong Han <weidong.han@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Oct 17 12:04:11 2008 +0100 (2008-10-17) |
parents | 50aaffd8f87c |
children | 3411819435b9 |
files | xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/iommu.c |
line diff
1.1 --- a/xen/drivers/passthrough/iommu.c Fri Oct 17 12:00:25 2008 +0100 1.2 +++ b/xen/drivers/passthrough/iommu.c Fri Oct 17 12:04:11 2008 +0100 1.3 @@ -19,8 +19,6 @@ 1.4 #include <xen/paging.h> 1.5 #include <xen/guest_access.h> 1.6 1.7 -extern struct iommu_ops intel_iommu_ops; 1.8 -extern struct iommu_ops amd_iommu_ops; 1.9 static void parse_iommu_param(char *s); 1.10 static int iommu_populate_page_table(struct domain *d); 1.11 int intel_vtd_setup(void);
2.1 --- a/xen/drivers/passthrough/vtd/intremap.c Fri Oct 17 12:00:25 2008 +0100 2.2 +++ b/xen/drivers/passthrough/vtd/intremap.c Fri Oct 17 12:04:11 2008 +0100 2.3 @@ -479,10 +479,10 @@ int intremap_setup(struct iommu *iommu) 2.4 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT) 2.5 /* set extended interrupt mode bit */ 2.6 ir_ctrl->iremap_maddr |= 2.7 - ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIMI_SHIFT) : 0; 2.8 + ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0; 2.9 #endif 2.10 - /* size field = 256 entries per 4K page = 8 - 1 */ 2.11 - ir_ctrl->iremap_maddr |= 7; 2.12 + /* set size of the interrupt remapping table */ 2.13 + ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE; 2.14 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr); 2.15 2.16 /* set SIRTP */
3.1 --- a/xen/drivers/passthrough/vtd/iommu.c Fri Oct 17 12:00:25 2008 +0100 3.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Fri Oct 17 12:04:11 2008 +0100 3.3 @@ -569,26 +569,6 @@ static void dma_pte_clear_one(struct dom 3.4 unmap_vtd_domain_page(page); 3.5 } 3.6 3.7 -/* clear last level pte, a tlb flush should be followed */ 3.8 -static void dma_pte_clear_range(struct domain *domain, u64 start, u64 end) 3.9 -{ 3.10 - struct hvm_iommu *hd = domain_hvm_iommu(domain); 3.11 - int addr_width = agaw_to_width(hd->agaw); 3.12 - 3.13 - start &= (((u64)1) << addr_width) - 1; 3.14 - end &= (((u64)1) << addr_width) - 1; 3.15 - /* in case it's partial page */ 3.16 - start = PAGE_ALIGN_4K(start); 3.17 - end &= PAGE_MASK_4K; 3.18 - 3.19 - /* we don't need lock here, nobody else touches the iova range */ 3.20 - while ( start < end ) 3.21 - { 3.22 - dma_pte_clear_one(domain, start); 3.23 - start += PAGE_SIZE_4K; 3.24 - } 3.25 -} 3.26 - 3.27 static void iommu_free_pagetable(u64 pt_maddr, int level) 3.28 { 3.29 int i; 3.30 @@ -1511,75 +1491,26 @@ int intel_iommu_unmap_page(struct domain 3.31 return 0; 3.32 } 3.33 3.34 -int iommu_page_mapping(struct domain *domain, paddr_t iova, 3.35 - paddr_t hpa, size_t size, int prot) 3.36 -{ 3.37 - struct hvm_iommu *hd = domain_hvm_iommu(domain); 3.38 - struct acpi_drhd_unit *drhd; 3.39 - struct iommu *iommu; 3.40 - u64 start_pfn, end_pfn; 3.41 - struct dma_pte *page = NULL, *pte = NULL; 3.42 - int index; 3.43 - u64 pg_maddr; 3.44 - 3.45 - if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 ) 3.46 - return -EINVAL; 3.47 - 3.48 - iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K; 3.49 - start_pfn = hpa >> PAGE_SHIFT_4K; 3.50 - end_pfn = (PAGE_ALIGN_4K(hpa + size)) >> PAGE_SHIFT_4K; 3.51 - index = 0; 3.52 - while ( start_pfn < end_pfn ) 3.53 - { 3.54 - pg_maddr = addr_to_dma_page_maddr(domain, iova + PAGE_SIZE_4K*index, 1); 3.55 - if ( pg_maddr == 0 ) 3.56 - return -ENOMEM; 3.57 - page = (struct dma_pte *)map_vtd_domain_page(pg_maddr); 3.58 - pte = page + (start_pfn & LEVEL_MASK); 3.59 - dma_set_pte_addr(*pte, (paddr_t)start_pfn << PAGE_SHIFT_4K); 3.60 - dma_set_pte_prot(*pte, prot); 3.61 - iommu_flush_cache_entry(pte); 3.62 - unmap_vtd_domain_page(page); 3.63 - start_pfn++; 3.64 - index++; 3.65 - } 3.66 - 3.67 - if ( index > 0 ) 3.68 - { 3.69 - for_each_drhd_unit ( drhd ) 3.70 - { 3.71 - iommu = drhd->iommu; 3.72 - if ( test_bit(iommu->index, &hd->iommu_bitmap) ) 3.73 - if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), 3.74 - iova, index, 1)) 3.75 - iommu_flush_write_buffer(iommu); 3.76 - } 3.77 - } 3.78 - 3.79 - return 0; 3.80 -} 3.81 - 3.82 -int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size) 3.83 -{ 3.84 - dma_pte_clear_range(domain, addr, addr + size); 3.85 - 3.86 - return 0; 3.87 -} 3.88 - 3.89 static int iommu_prepare_rmrr_dev(struct domain *d, 3.90 struct acpi_rmrr_unit *rmrr, 3.91 u8 bus, u8 devfn) 3.92 { 3.93 - u64 size; 3.94 - int ret; 3.95 + int ret = 0; 3.96 + u64 base, end; 3.97 + unsigned long base_pfn, end_pfn; 3.98 3.99 - /* page table init */ 3.100 - size = rmrr->end_address - rmrr->base_address + 1; 3.101 - ret = iommu_page_mapping(d, rmrr->base_address, 3.102 - rmrr->base_address, size, 3.103 - DMA_PTE_READ|DMA_PTE_WRITE); 3.104 - if ( ret ) 3.105 - return ret; 3.106 + ASSERT(rmrr->base_address < rmrr->end_address); 3.107 + 3.108 + base = rmrr->base_address & PAGE_MASK_4K; 3.109 + base_pfn = base >> PAGE_SHIFT_4K; 3.110 + end = PAGE_ALIGN_4K(rmrr->end_address); 3.111 + end_pfn = end >> PAGE_SHIFT_4K; 3.112 + 3.113 + while ( base_pfn < end_pfn ) 3.114 + { 3.115 + intel_iommu_map_page(d, base_pfn, base_pfn); 3.116 + base_pfn++; 3.117 + } 3.118 3.119 if ( domain_context_mapped(bus, devfn) == 0 ) 3.120 ret = domain_context_mapping(d, bus, devfn);