ia64/xen-unstable
changeset 17758:121d196b4cc8
vtd: Remove dead code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu May 29 14:30:48 2008 +0100 (2008-05-29) |
parents | 6f48c4ee8ae2 |
children | ecd266cebcab |
files | xen/drivers/passthrough/vtd/dmar.c xen/drivers/passthrough/vtd/dmar.h xen/drivers/passthrough/vtd/x86/vtd.c xen/include/xen/iommu.h |
line diff
1.1 --- a/xen/drivers/passthrough/vtd/dmar.c Thu May 29 13:38:31 2008 +0100 1.2 +++ b/xen/drivers/passthrough/vtd/dmar.c Thu May 29 14:30:48 2008 +0100 1.3 @@ -147,39 +147,6 @@ struct acpi_drhd_unit * acpi_find_matche 1.4 return NULL; 1.5 } 1.6 1.7 -struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev) 1.8 -{ 1.9 - struct acpi_rmrr_unit *rmrr; 1.10 - 1.11 - list_for_each_entry ( rmrr, &acpi_rmrr_units, list ) 1.12 - if ( acpi_pci_device_match(rmrr->devices, 1.13 - rmrr->devices_cnt, dev) ) 1.14 - return rmrr; 1.15 - 1.16 - return NULL; 1.17 -} 1.18 - 1.19 -struct acpi_atsr_unit * acpi_find_matched_atsr_unit(struct pci_dev *dev) 1.20 -{ 1.21 - struct acpi_atsr_unit *atsru; 1.22 - struct acpi_atsr_unit *all_ports_atsru; 1.23 - 1.24 - all_ports_atsru = NULL; 1.25 - list_for_each_entry ( atsru, &acpi_atsr_units, list ) 1.26 - { 1.27 - if ( atsru->all_ports ) 1.28 - all_ports_atsru = atsru; 1.29 - if ( acpi_pci_device_match(atsru->devices, 1.30 - atsru->devices_cnt, dev) ) 1.31 - return atsru; 1.32 - } 1.33 - 1.34 - if ( all_ports_atsru ) 1.35 - return all_ports_atsru;; 1.36 - 1.37 - return NULL; 1.38 -} 1.39 - 1.40 static int scope_device_count(void *start, void *end) 1.41 { 1.42 struct acpi_dev_scope *scope;
2.1 --- a/xen/drivers/passthrough/vtd/dmar.h Thu May 29 13:38:31 2008 +0100 2.2 +++ b/xen/drivers/passthrough/vtd/dmar.h Thu May 29 14:30:48 2008 +0100 2.3 @@ -86,7 +86,6 @@ struct acpi_atsr_unit { 2.4 } 2.5 2.6 struct acpi_drhd_unit * acpi_find_matched_drhd_unit(struct pci_dev *dev); 2.7 -struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev); 2.8 2.9 #define DMAR_TYPE 1 2.10 #define RMRR_TYPE 2
3.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu May 29 13:38:31 2008 +0100 3.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c Thu May 29 14:30:48 2008 +0100 3.3 @@ -123,181 +123,3 @@ void hvm_dpci_isairq_eoi(struct domain * 3.4 } 3.5 } 3.6 } 3.7 - 3.8 -void iommu_set_pgd(struct domain *d) 3.9 -{ 3.10 - struct hvm_iommu *hd = domain_hvm_iommu(d); 3.11 - unsigned long p2m_table; 3.12 - 3.13 - p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table)); 3.14 - 3.15 - if ( paging_mode_hap(d) ) 3.16 - { 3.17 - int level = agaw_to_level(hd->agaw); 3.18 - struct dma_pte *dpte = NULL; 3.19 - mfn_t pgd_mfn; 3.20 - 3.21 - switch ( level ) 3.22 - { 3.23 - case VTD_PAGE_TABLE_LEVEL_3: 3.24 - dpte = map_domain_page(p2m_table); 3.25 - if ( !dma_pte_present(*dpte) ) 3.26 - { 3.27 - gdprintk(XENLOG_ERR VTDPREFIX, 3.28 - "iommu_set_pgd: second level wasn't there\n"); 3.29 - unmap_domain_page(dpte); 3.30 - return; 3.31 - } 3.32 - pgd_mfn = _mfn(dma_pte_addr(*dpte) >> PAGE_SHIFT_4K); 3.33 - hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K; 3.34 - unmap_domain_page(dpte); 3.35 - break; 3.36 - case VTD_PAGE_TABLE_LEVEL_4: 3.37 - pgd_mfn = _mfn(p2m_table); 3.38 - hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K; 3.39 - break; 3.40 - default: 3.41 - gdprintk(XENLOG_ERR VTDPREFIX, 3.42 - "iommu_set_pgd:Unsupported p2m table sharing level!\n"); 3.43 - break; 3.44 - } 3.45 - } 3.46 - else 3.47 - { 3.48 -#if CONFIG_PAGING_LEVELS == 3 3.49 - struct dma_pte *pte = NULL, *pgd_vaddr = NULL, *pmd_vaddr = NULL; 3.50 - int i; 3.51 - u64 pmd_maddr; 3.52 - unsigned long flags; 3.53 - l3_pgentry_t *l3e; 3.54 - int level = agaw_to_level(hd->agaw); 3.55 - 3.56 - spin_lock_irqsave(&hd->mapping_lock, flags); 3.57 - hd->pgd_maddr = alloc_pgtable_maddr(); 3.58 - if ( hd->pgd_maddr == 0 ) 3.59 - { 3.60 - spin_unlock_irqrestore(&hd->mapping_lock, flags); 3.61 - gdprintk(XENLOG_ERR VTDPREFIX, 3.62 - "Allocate pgd memory failed!\n"); 3.63 - return; 3.64 - } 3.65 - 3.66 - pgd_vaddr = map_vtd_domain_page(hd->pgd_maddr); 3.67 - l3e = map_domain_page(p2m_table); 3.68 - switch ( level ) 3.69 - { 3.70 - case VTD_PAGE_TABLE_LEVEL_3: /* Weybridge */ 3.71 - /* We only support 8 entries for the PAE L3 p2m table */ 3.72 - for ( i = 0; i < 8 ; i++ ) 3.73 - { 3.74 - /* Don't create new L2 entry, use ones from p2m table */ 3.75 - pgd_vaddr[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW; 3.76 - } 3.77 - break; 3.78 - 3.79 - case VTD_PAGE_TABLE_LEVEL_4: /* Stoakley */ 3.80 - /* We allocate one more page for the top vtd page table. */ 3.81 - pmd_maddr = alloc_pgtable_maddr(); 3.82 - if ( pmd_maddr == 0 ) 3.83 - { 3.84 - unmap_vtd_domain_page(pgd_vaddr); 3.85 - unmap_domain_page(l3e); 3.86 - spin_unlock_irqrestore(&hd->mapping_lock, flags); 3.87 - gdprintk(XENLOG_ERR VTDPREFIX, 3.88 - "Allocate pmd memory failed!\n"); 3.89 - return; 3.90 - } 3.91 - 3.92 - pte = &pgd_vaddr[0]; 3.93 - dma_set_pte_addr(*pte, pmd_maddr); 3.94 - dma_set_pte_readable(*pte); 3.95 - dma_set_pte_writable(*pte); 3.96 - 3.97 - pmd_vaddr = map_vtd_domain_page(pmd_maddr); 3.98 - for ( i = 0; i < 8; i++ ) 3.99 - { 3.100 - /* Don't create new L2 entry, use ones from p2m table */ 3.101 - pmd_vaddr[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW; 3.102 - } 3.103 - 3.104 - unmap_vtd_domain_page(pmd_vaddr); 3.105 - break; 3.106 - default: 3.107 - gdprintk(XENLOG_ERR VTDPREFIX, 3.108 - "iommu_set_pgd:Unsupported p2m table sharing level!\n"); 3.109 - break; 3.110 - } 3.111 - 3.112 - unmap_vtd_domain_page(pgd_vaddr); 3.113 - unmap_domain_page(l3e); 3.114 - spin_unlock_irqrestore(&hd->mapping_lock, flags); 3.115 - 3.116 -#elif CONFIG_PAGING_LEVELS == 4 3.117 - mfn_t pgd_mfn; 3.118 - l3_pgentry_t *l3e; 3.119 - int level = agaw_to_level(hd->agaw); 3.120 - 3.121 - switch ( level ) 3.122 - { 3.123 - case VTD_PAGE_TABLE_LEVEL_3: 3.124 - l3e = map_domain_page(p2m_table); 3.125 - if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 ) 3.126 - { 3.127 - gdprintk(XENLOG_ERR VTDPREFIX, 3.128 - "iommu_set_pgd: second level wasn't there\n"); 3.129 - unmap_domain_page(l3e); 3.130 - return; 3.131 - } 3.132 - 3.133 - pgd_mfn = _mfn(l3e_get_pfn(*l3e)); 3.134 - hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K; 3.135 - unmap_domain_page(l3e); 3.136 - break; 3.137 - case VTD_PAGE_TABLE_LEVEL_4: 3.138 - pgd_mfn = _mfn(p2m_table); 3.139 - hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K; 3.140 - break; 3.141 - default: 3.142 - gdprintk(XENLOG_ERR VTDPREFIX, 3.143 - "iommu_set_pgd:Unsupported p2m table sharing level!\n"); 3.144 - break; 3.145 - } 3.146 -#endif 3.147 - } 3.148 -} 3.149 - 3.150 -void iommu_free_pgd(struct domain *d) 3.151 -{ 3.152 -#if CONFIG_PAGING_LEVELS == 3 3.153 - struct hvm_iommu *hd = domain_hvm_iommu(d); 3.154 - int level = agaw_to_level(hd->agaw); 3.155 - struct dma_pte *pgd_vaddr = NULL; 3.156 - 3.157 - switch ( level ) 3.158 - { 3.159 - case VTD_PAGE_TABLE_LEVEL_3: 3.160 - if ( hd->pgd_maddr != 0 ) 3.161 - { 3.162 - free_pgtable_maddr(hd->pgd_maddr); 3.163 - hd->pgd_maddr = 0; 3.164 - } 3.165 - break; 3.166 - case VTD_PAGE_TABLE_LEVEL_4: 3.167 - if ( hd->pgd_maddr != 0 ) 3.168 - { 3.169 - pgd_vaddr = (struct dma_pte*)map_vtd_domain_page(hd->pgd_maddr); 3.170 - if ( pgd_vaddr[0].val != 0 ) 3.171 - free_pgtable_maddr(pgd_vaddr[0].val); 3.172 - unmap_vtd_domain_page(pgd_vaddr); 3.173 - free_pgtable_maddr(hd->pgd_maddr); 3.174 - hd->pgd_maddr = 0; 3.175 - } 3.176 - break; 3.177 - default: 3.178 - gdprintk(XENLOG_ERR VTDPREFIX, 3.179 - "Unsupported p2m table sharing level!\n"); 3.180 - break; 3.181 - } 3.182 -#endif 3.183 -} 3.184 -
4.1 --- a/xen/include/xen/iommu.h Thu May 29 13:38:31 2008 +0100 4.2 +++ b/xen/include/xen/iommu.h Thu May 29 14:30:48 2008 +0100 4.3 @@ -68,8 +68,6 @@ void reassign_device_ownership(struct do 4.4 u8 bus, u8 devfn); 4.5 int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn); 4.6 int iommu_unmap_page(struct domain *d, unsigned long gfn); 4.7 -void iommu_set_pgd(struct domain *d); 4.8 -void iommu_free_pgd(struct domain *d); 4.9 void iommu_domain_teardown(struct domain *d); 4.10 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq); 4.11 int dpci_ioport_intercept(ioreq_t *p);