ia64/xen-unstable
changeset 17726:62f1c837057f
Enable IOMMU for PV guests
Introduce 'iommu_pv' boot parameter (default off). Added a need_iommu
flag which is set if guest has PCI devices assigned. IOMMU page
tables are populated with current guest memory when IOMMU is first
enabled for the guest.
Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
Introduce 'iommu_pv' boot parameter (default off). Added a need_iommu
flag which is set if guest has PCI devices assigned. IOMMU page
tables are populated with current guest memory when IOMMU is first
enabled for the guest.
Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Sat May 24 09:37:35 2008 +0100 (2008-05-24) |
parents | 36bbcc6baadf |
children | c684cf331f94 |
files | xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/iommu.c xen/include/xen/hvm/iommu.h xen/include/xen/iommu.h xen/include/xen/sched.h |
line diff
1.1 --- a/xen/drivers/passthrough/iommu.c Sat May 24 09:35:05 2008 +0100 1.2 +++ b/xen/drivers/passthrough/iommu.c Sat May 24 09:37:35 2008 +0100 1.3 @@ -15,15 +15,20 @@ 1.4 1.5 #include <xen/sched.h> 1.6 #include <xen/iommu.h> 1.7 +#include <xen/paging.h> 1.8 1.9 extern struct iommu_ops intel_iommu_ops; 1.10 extern struct iommu_ops amd_iommu_ops; 1.11 +static int iommu_populate_page_table(struct domain *d); 1.12 int intel_vtd_setup(void); 1.13 int amd_iov_detect(void); 1.14 1.15 int iommu_enabled = 1; 1.16 boolean_param("iommu", iommu_enabled); 1.17 1.18 +int iommu_pv_enabled = 0; 1.19 +boolean_param("iommu_pv", iommu_pv_enabled); 1.20 + 1.21 int iommu_domain_init(struct domain *domain) 1.22 { 1.23 struct hvm_iommu *hd = domain_hvm_iommu(domain); 1.24 @@ -54,11 +59,46 @@ int iommu_domain_init(struct domain *dom 1.25 int assign_device(struct domain *d, u8 bus, u8 devfn) 1.26 { 1.27 struct hvm_iommu *hd = domain_hvm_iommu(d); 1.28 + int rc; 1.29 1.30 if ( !iommu_enabled || !hd->platform_ops ) 1.31 return 0; 1.32 1.33 - return hd->platform_ops->assign_device(d, bus, devfn); 1.34 + if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) ) 1.35 + return rc; 1.36 + 1.37 + if ( has_iommu_pdevs(d) && !need_iommu(d) ) 1.38 + { 1.39 + d->need_iommu = 1; 1.40 + return iommu_populate_page_table(d); 1.41 + } 1.42 + return 0; 1.43 +} 1.44 + 1.45 +static int iommu_populate_page_table(struct domain *d) 1.46 +{ 1.47 + struct hvm_iommu *hd = domain_hvm_iommu(d); 1.48 + struct page_info *page; 1.49 + int rc; 1.50 + 1.51 + spin_lock(&d->page_alloc_lock); 1.52 + 1.53 + list_for_each_entry ( page, &d->page_list, list ) 1.54 + { 1.55 + if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page ) 1.56 + { 1.57 + rc = hd->platform_ops->map_page( 1.58 + d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page)); 1.59 + if (rc) 1.60 + { 1.61 + spin_unlock(&d->page_alloc_lock); 1.62 + hd->platform_ops->teardown(d); 1.63 + return rc; 1.64 + } 1.65 + } 1.66 + } 1.67 + spin_unlock(&d->page_alloc_lock); 1.68 + return 0; 1.69 } 1.70 1.71 void iommu_domain_destroy(struct domain *d) 1.72 @@ -137,7 +177,13 @@ void deassign_device(struct domain *d, u 1.73 if ( !iommu_enabled || !hd->platform_ops ) 1.74 return; 1.75 1.76 - return hd->platform_ops->reassign_device(d, dom0, bus, devfn); 1.77 + hd->platform_ops->reassign_device(d, dom0, bus, devfn); 1.78 + 1.79 + if ( !has_iommu_pdevs(d) && need_iommu(d) ) 1.80 + { 1.81 + d->need_iommu = 0; 1.82 + hd->platform_ops->teardown(d); 1.83 + } 1.84 } 1.85 1.86 static int iommu_setup(void) 1.87 @@ -160,7 +206,22 @@ static int iommu_setup(void) 1.88 iommu_enabled = (rc == 0); 1.89 1.90 out: 1.91 + if ( !iommu_enabled || !vtd_enabled ) 1.92 + iommu_pv_enabled = 0; 1.93 printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis"); 1.94 + if (iommu_enabled) 1.95 + printk("I/O virtualisation for PV guests %sabled\n", 1.96 + iommu_pv_enabled ? "en" : "dis"); 1.97 return rc; 1.98 } 1.99 __initcall(iommu_setup); 1.100 + 1.101 + 1.102 +/* 1.103 + * Local variables: 1.104 + * mode: C 1.105 + * c-set-style: "BSD" 1.106 + * c-basic-offset: 4 1.107 + * indent-tabs-mode: nil 1.108 + * End: 1.109 + */
2.1 --- a/xen/drivers/passthrough/vtd/iommu.c Sat May 24 09:35:05 2008 +0100 2.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Sat May 24 09:37:35 2008 +0100 2.3 @@ -1138,26 +1138,35 @@ static int domain_context_mapping_one( 2.4 } 2.5 2.6 spin_lock_irqsave(&iommu->lock, flags); 2.7 + 2.8 +#ifdef CONTEXT_PASSTHRU 2.9 + if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) ) 2.10 + context_set_translation_type(*context, CONTEXT_TT_PASS_THRU); 2.11 + else 2.12 + { 2.13 +#endif 2.14 + if ( hd->pgd_maddr == 0 ) 2.15 + { 2.16 + hd->pgd_maddr = alloc_pgtable_maddr(); 2.17 + if ( hd->pgd_maddr == 0 ) 2.18 + { 2.19 + unmap_vtd_domain_page(context_entries); 2.20 + spin_unlock_irqrestore(&iommu->lock, flags); 2.21 + return -ENOMEM; 2.22 + } 2.23 + } 2.24 + context_set_address_root(*context, hd->pgd_maddr); 2.25 + context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL); 2.26 +#ifdef CONTEXT_PASSTHRU 2.27 + } 2.28 +#endif 2.29 + 2.30 /* 2.31 * domain_id 0 is not valid on Intel's IOMMU, force domain_id to 2.32 * be 1 based as required by intel's iommu hw. 2.33 */ 2.34 context_set_domain_id(context, domain); 2.35 context_set_address_width(*context, hd->agaw); 2.36 - 2.37 - if ( ecap_pass_thru(iommu->ecap) ) 2.38 - context_set_translation_type(*context, CONTEXT_TT_PASS_THRU); 2.39 -#ifdef CONTEXT_PASSTHRU 2.40 - else 2.41 - { 2.42 -#endif 2.43 - ASSERT(hd->pgd_maddr != 0); 2.44 - context_set_address_root(*context, hd->pgd_maddr); 2.45 - context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL); 2.46 -#ifdef CONTEXT_PASSTHRU 2.47 - } 2.48 -#endif 2.49 - 2.50 context_set_fault_enable(*context); 2.51 context_set_present(*context); 2.52 iommu_flush_cache_entry(iommu, context);
3.1 --- a/xen/include/xen/hvm/iommu.h Sat May 24 09:35:05 2008 +0100 3.2 +++ b/xen/include/xen/hvm/iommu.h Sat May 24 09:37:35 2008 +0100 3.3 @@ -54,4 +54,7 @@ struct hvm_iommu { 3.4 struct iommu_ops *platform_ops; 3.5 }; 3.6 3.7 +#define has_iommu_pdevs(domain) \ 3.8 + (!list_empty(&(domain->arch.hvm_domain.hvm_iommu.pdev_list))) 3.9 + 3.10 #endif /* __ASM_X86_HVM_IOMMU_H__ */
4.1 --- a/xen/include/xen/iommu.h Sat May 24 09:35:05 2008 +0100 4.2 +++ b/xen/include/xen/iommu.h Sat May 24 09:37:35 2008 +0100 4.3 @@ -29,6 +29,7 @@ 4.4 4.5 extern int vtd_enabled; 4.6 extern int iommu_enabled; 4.7 +extern int iommu_pv_enabled; 4.8 4.9 #define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu) 4.10 #define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
5.1 --- a/xen/include/xen/sched.h Sat May 24 09:35:05 2008 +0100 5.2 +++ b/xen/include/xen/sched.h Sat May 24 09:37:35 2008 +0100 5.3 @@ -186,6 +186,8 @@ struct domain 5.4 5.5 /* Is this an HVM guest? */ 5.6 bool_t is_hvm; 5.7 + /* Does this guest need iommu mappings? */ 5.8 + bool_t need_iommu; 5.9 /* Is this guest fully privileged (aka dom0)? */ 5.10 bool_t is_privileged; 5.11 /* Which guest this guest has privileges on */ 5.12 @@ -515,6 +517,7 @@ static inline void vcpu_unblock(struct v 5.13 5.14 #define is_hvm_domain(d) ((d)->is_hvm) 5.15 #define is_hvm_vcpu(v) (is_hvm_domain(v->domain)) 5.16 +#define need_iommu(d) ((d)->need_iommu && !(d)->is_hvm) 5.17 5.18 extern enum cpufreq_controller { 5.19 FREQCTL_none, FREQCTL_dom0_kernel