ia64/xen-unstable
changeset 17492:8d20c24238ad
iommu: initialisation cleanup and bugfix.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Apr 21 17:41:29 2008 +0100 (2008-04-21) |
parents | 84b5dee690f5 |
children | b2e28707ecbb |
files | xen/arch/x86/domain.c xen/arch/x86/setup.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/iommu.c xen/include/xen/iommu.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Mon Apr 21 14:59:25 2008 +0100 1.2 +++ b/xen/arch/x86/domain.c Mon Apr 21 17:41:29 2008 +0100 1.3 @@ -521,11 +521,11 @@ int arch_domain_create(struct domain *d, 1.4 clear_page(d->shared_info); 1.5 share_xen_page_with_guest( 1.6 virt_to_page(d->shared_info), d, XENSHARE_writable); 1.7 + 1.8 + if ( (rc = iommu_domain_init(d)) != 0 ) 1.9 + goto fail; 1.10 } 1.11 1.12 - if ( (rc = iommu_domain_init(d)) != 0 ) 1.13 - goto fail; 1.14 - 1.15 if ( is_hvm_domain(d) ) 1.16 { 1.17 if ( (rc = hvm_domain_initialise(d)) != 0 ) 1.18 @@ -562,7 +562,8 @@ void arch_domain_destroy(struct domain * 1.19 if ( is_hvm_domain(d) ) 1.20 hvm_domain_destroy(d); 1.21 1.22 - iommu_domain_destroy(d); 1.23 + if ( !is_idle_domain(d) ) 1.24 + iommu_domain_destroy(d); 1.25 1.26 paging_final_teardown(d); 1.27
2.1 --- a/xen/arch/x86/setup.c Mon Apr 21 14:59:25 2008 +0100 2.2 +++ b/xen/arch/x86/setup.c Mon Apr 21 17:41:29 2008 +0100 2.3 @@ -1019,8 +1019,6 @@ void __init __start_xen(unsigned long mb 2.4 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start; 2.5 } 2.6 2.7 - iommu_setup(); 2.8 - 2.9 /* 2.10 * We're going to setup domain0 using the module(s) that we stashed safely 2.11 * above our heap. The second module, if present, is an initrd ramdisk.
3.1 --- a/xen/drivers/passthrough/iommu.c Mon Apr 21 14:59:25 2008 +0100 3.2 +++ b/xen/drivers/passthrough/iommu.c Mon Apr 21 17:41:29 2008 +0100 3.3 @@ -140,7 +140,7 @@ void deassign_device(struct domain *d, u 3.4 return hd->platform_ops->reassign_device(d, dom0, bus, devfn); 3.5 } 3.6 3.7 -int iommu_setup(void) 3.8 +static int iommu_setup(void) 3.9 { 3.10 int rc = -ENODEV; 3.11 3.12 @@ -163,3 +163,4 @@ int iommu_setup(void) 3.13 printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis"); 3.14 return rc; 3.15 } 3.16 +__initcall(iommu_setup);
4.1 --- a/xen/drivers/passthrough/vtd/iommu.c Mon Apr 21 14:59:25 2008 +0100 4.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Mon Apr 21 17:41:29 2008 +0100 4.3 @@ -78,7 +78,7 @@ static struct intel_iommu *alloc_intel_i 4.4 struct intel_iommu *intel; 4.5 4.6 intel = xmalloc(struct intel_iommu); 4.7 - if ( !intel ) 4.8 + if ( intel == NULL ) 4.9 { 4.10 gdprintk(XENLOG_ERR VTDPREFIX, 4.11 "Allocate intel_iommu failed.\n"); 4.12 @@ -88,7 +88,6 @@ static struct intel_iommu *alloc_intel_i 4.13 4.14 spin_lock_init(&intel->qi_ctrl.qinval_lock); 4.15 spin_lock_init(&intel->qi_ctrl.qinval_poll_lock); 4.16 - 4.17 spin_lock_init(&intel->ir_ctrl.iremap_lock); 4.18 4.19 return intel; 4.20 @@ -96,68 +95,22 @@ static struct intel_iommu *alloc_intel_i 4.21 4.22 static void free_intel_iommu(struct intel_iommu *intel) 4.23 { 4.24 - if ( intel ) 4.25 - { 4.26 - xfree(intel); 4.27 - intel = NULL; 4.28 - } 4.29 + xfree(intel); 4.30 } 4.31 4.32 struct qi_ctrl *iommu_qi_ctrl(struct iommu *iommu) 4.33 { 4.34 - if ( !iommu ) 4.35 - return NULL; 4.36 - 4.37 - if ( !iommu->intel ) 4.38 - { 4.39 - iommu->intel = alloc_intel_iommu(); 4.40 - if ( !iommu->intel ) 4.41 - { 4.42 - dprintk(XENLOG_ERR VTDPREFIX, 4.43 - "iommu_qi_ctrl: Allocate iommu->intel failed.\n"); 4.44 - return NULL; 4.45 - } 4.46 - } 4.47 - 4.48 - return &(iommu->intel->qi_ctrl); 4.49 + return iommu ? &iommu->intel->qi_ctrl : NULL; 4.50 } 4.51 4.52 struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu) 4.53 { 4.54 - if ( !iommu ) 4.55 - return NULL; 4.56 - 4.57 - if ( !iommu->intel ) 4.58 - { 4.59 - iommu->intel = alloc_intel_iommu(); 4.60 - if ( !iommu->intel ) 4.61 - { 4.62 - dprintk(XENLOG_ERR VTDPREFIX, 4.63 - "iommu_ir_ctrl: Allocate iommu->intel failed.\n"); 4.64 - return NULL; 4.65 - } 4.66 - } 4.67 - 4.68 - return &(iommu->intel->ir_ctrl); 4.69 + return iommu ? &iommu->intel->ir_ctrl : NULL; 4.70 } 4.71 4.72 struct iommu_flush *iommu_get_flush(struct iommu *iommu) 4.73 { 4.74 - if ( !iommu ) 4.75 - return NULL; 4.76 - 4.77 - if ( !iommu->intel ) 4.78 - { 4.79 - iommu->intel = alloc_intel_iommu(); 4.80 - if ( !iommu->intel ) 4.81 - { 4.82 - dprintk(XENLOG_ERR VTDPREFIX, 4.83 - "iommu_get_flush: Allocate iommu->intel failed.\n"); 4.84 - return NULL; 4.85 - } 4.86 - } 4.87 - 4.88 - return &(iommu->intel->flush); 4.89 + return iommu ? &iommu->intel->flush : NULL; 4.90 } 4.91 4.92 unsigned int clflush_size; 4.93 @@ -1039,69 +992,65 @@ int iommu_set_interrupt(struct iommu *io 4.94 return vector; 4.95 } 4.96 4.97 -struct iommu *iommu_alloc(void *hw_data) 4.98 +static int iommu_alloc(struct acpi_drhd_unit *drhd) 4.99 { 4.100 - struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data; 4.101 struct iommu *iommu; 4.102 4.103 if ( nr_iommus > MAX_IOMMUS ) 4.104 { 4.105 gdprintk(XENLOG_ERR VTDPREFIX, 4.106 "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus); 4.107 - return NULL; 4.108 + return -ENOMEM; 4.109 } 4.110 4.111 iommu = xmalloc(struct iommu); 4.112 - if ( !iommu ) 4.113 - return NULL; 4.114 + if ( iommu == NULL ) 4.115 + return -ENOMEM; 4.116 memset(iommu, 0, sizeof(struct iommu)); 4.117 4.118 + iommu->intel = alloc_intel_iommu(); 4.119 + if ( iommu->intel == NULL ) 4.120 + { 4.121 + xfree(iommu); 4.122 + return -ENOMEM; 4.123 + } 4.124 + 4.125 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address); 4.126 - iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); 4.127 - 4.128 - printk("iommu_alloc: iommu->reg = %p drhd->address = %lx\n", 4.129 - iommu->reg, drhd->address); 4.130 - 4.131 + iommu->reg = (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); 4.132 nr_iommus++; 4.133 4.134 - if ( !iommu->reg ) 4.135 - { 4.136 - printk(KERN_ERR VTDPREFIX "IOMMU: can't mapping the region\n"); 4.137 - goto error; 4.138 - } 4.139 - 4.140 iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG); 4.141 iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG); 4.142 4.143 - printk("iommu_alloc: cap = %"PRIx64"\n",iommu->cap); 4.144 - printk("iommu_alloc: ecap = %"PRIx64"\n", iommu->ecap); 4.145 - 4.146 spin_lock_init(&iommu->lock); 4.147 spin_lock_init(&iommu->register_lock); 4.148 4.149 - iommu->intel = alloc_intel_iommu(); 4.150 4.151 drhd->iommu = iommu; 4.152 - return iommu; 4.153 - error: 4.154 - xfree(iommu); 4.155 - return NULL; 4.156 + return 0; 4.157 } 4.158 4.159 -static void free_iommu(struct iommu *iommu) 4.160 +static void iommu_free(struct acpi_drhd_unit *drhd) 4.161 { 4.162 - if ( !iommu ) 4.163 + struct iommu *iommu = drhd->iommu; 4.164 + 4.165 + if ( iommu == NULL ) 4.166 return; 4.167 + 4.168 if ( iommu->root_maddr != 0 ) 4.169 { 4.170 free_pgtable_maddr(iommu->root_maddr); 4.171 iommu->root_maddr = 0; 4.172 } 4.173 + 4.174 if ( iommu->reg ) 4.175 iounmap(iommu->reg); 4.176 + 4.177 free_intel_iommu(iommu->intel); 4.178 free_irq(iommu->vector); 4.179 xfree(iommu); 4.180 + 4.181 + drhd->iommu = NULL; 4.182 } 4.183 4.184 #define guestwidth_to_adjustwidth(gaw) ({ \ 4.185 @@ -1120,10 +1069,10 @@ static int intel_iommu_domain_init(struc 4.186 unsigned long sagaw; 4.187 struct acpi_drhd_unit *drhd; 4.188 4.189 - for_each_drhd_unit ( drhd ) 4.190 - iommu = drhd->iommu ? : iommu_alloc(drhd); 4.191 + drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); 4.192 + iommu = drhd->iommu; 4.193 4.194 - /* calculate AGAW */ 4.195 + /* Calculate AGAW. */ 4.196 if ( guest_width > cap_mgaw(iommu->cap) ) 4.197 guest_width = cap_mgaw(iommu->cap); 4.198 adjust_width = guestwidth_to_adjustwidth(guest_width); 4.199 @@ -1913,9 +1862,12 @@ int intel_vtd_setup(void) 4.200 spin_lock_init(&domid_bitmap_lock); 4.201 INIT_LIST_HEAD(&hd->pdev_list); 4.202 4.203 - /* setup clflush size */ 4.204 clflush_size = get_clflush_size(); 4.205 4.206 + for_each_drhd_unit ( drhd ) 4.207 + if ( iommu_alloc(drhd) != 0 ) 4.208 + goto error; 4.209 + 4.210 /* Allocate IO page directory page for the domain. */ 4.211 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); 4.212 iommu = drhd->iommu; 4.213 @@ -1929,7 +1881,7 @@ int intel_vtd_setup(void) 4.214 memset(domid_bitmap, 0, domid_bitmap_size / 8); 4.215 set_bit(0, domid_bitmap); 4.216 4.217 - /* setup 1:1 page table for dom0 */ 4.218 + /* Set up 1:1 page table for dom0. */ 4.219 for ( i = 0; i < max_page; i++ ) 4.220 iommu_map_page(dom0, i, i); 4.221 4.222 @@ -1944,10 +1896,7 @@ int intel_vtd_setup(void) 4.223 4.224 error: 4.225 for_each_drhd_unit ( drhd ) 4.226 - { 4.227 - iommu = drhd->iommu; 4.228 - free_iommu(iommu); 4.229 - } 4.230 + iommu_free(drhd); 4.231 vtd_enabled = 0; 4.232 return -ENOMEM; 4.233 }
5.1 --- a/xen/include/xen/iommu.h Mon Apr 21 14:59:25 2008 +0100 5.2 +++ b/xen/include/xen/iommu.h Mon Apr 21 17:41:29 2008 +0100 5.3 @@ -71,7 +71,6 @@ struct iommu { 5.4 struct intel_iommu *intel; 5.5 }; 5.6 5.7 -int iommu_setup(void); 5.8 int iommu_domain_init(struct domain *d); 5.9 void iommu_domain_destroy(struct domain *d); 5.10 int device_assigned(u8 bus, u8 devfn);