ia64/xen-unstable
changeset 15940:b33ee2276b6a
domctl and p2m changes for PCI passthru.
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Thu Sep 20 09:57:10 2007 +0100 (2007-09-20) |
parents | 39c85fa942aa |
children | 1902a21dd1ae |
files | xen/arch/x86/domctl.c xen/arch/x86/mm/p2m.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/p2m.h |
line diff
1.1 --- a/xen/arch/x86/domctl.c Thu Sep 20 09:42:01 2007 +0100 1.2 +++ b/xen/arch/x86/domctl.c Thu Sep 20 09:57:10 2007 +0100 1.3 @@ -25,6 +25,8 @@ 1.4 #include <asm/hvm/support.h> 1.5 #include <asm/processor.h> 1.6 #include <xsm/xsm.h> 1.7 +#include <xen/list.h> 1.8 +#include <asm/iommu.h> 1.9 1.10 long arch_do_domctl( 1.11 struct xen_domctl *domctl, 1.12 @@ -523,6 +525,155 @@ long arch_do_domctl( 1.13 } 1.14 break; 1.15 1.16 + case XEN_DOMCTL_assign_device: 1.17 + { 1.18 + struct domain *d; 1.19 + struct hvm_iommu *hd; 1.20 + u8 bus, devfn; 1.21 + 1.22 + if (!vtd_enabled) 1.23 + break; 1.24 + 1.25 + ret = -EINVAL; 1.26 + if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) { 1.27 + gdprintk(XENLOG_ERR, 1.28 + "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); 1.29 + break; 1.30 + } 1.31 + hd = domain_hvm_iommu(d); 1.32 + bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff; 1.33 + devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff; 1.34 + ret = assign_device(d, bus, devfn); 1.35 + gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n", 1.36 + bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1.37 + put_domain(d); 1.38 + } 1.39 + break; 1.40 + 1.41 + case XEN_DOMCTL_bind_pt_irq: 1.42 + { 1.43 + struct domain * d; 1.44 + xen_domctl_bind_pt_irq_t * bind; 1.45 + 1.46 + ret = -ESRCH; 1.47 + if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) 1.48 + break; 1.49 + bind = &(domctl->u.bind_pt_irq); 1.50 + if (vtd_enabled) 1.51 + ret = pt_irq_create_bind_vtd(d, bind); 1.52 + if (ret < 0) 1.53 + gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n"); 1.54 + rcu_unlock_domain(d); 1.55 + } 1.56 + break; 1.57 + 1.58 + case XEN_DOMCTL_memory_mapping: 1.59 + { 1.60 + struct domain *d; 1.61 + unsigned long gfn = domctl->u.memory_mapping.first_gfn; 1.62 + unsigned long mfn = domctl->u.memory_mapping.first_mfn; 1.63 + unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns; 1.64 + int i; 1.65 + 1.66 + ret = -EINVAL; 1.67 + if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */ 1.68 + break; 1.69 + 1.70 + ret = -ESRCH; 1.71 + if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) 1.72 + break; 1.73 + 1.74 + ret=0; 1.75 + if ( domctl->u.memory_mapping.add_mapping ) 1.76 + { 1.77 + gdprintk(XENLOG_INFO, 1.78 + "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n", 1.79 + gfn, mfn, nr_mfns); 1.80 + 1.81 + ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); 1.82 + for ( i = 0; i < nr_mfns; i++ ) 1.83 + set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i)); 1.84 + } 1.85 + else 1.86 + { 1.87 + gdprintk(XENLOG_INFO, 1.88 + "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n", 1.89 + gfn, mfn, nr_mfns); 1.90 + 1.91 + for ( i = 0; i < nr_mfns; i++ ) 1.92 + clear_mmio_p2m_entry(d, gfn+i); 1.93 + ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); 1.94 + } 1.95 + 1.96 + rcu_unlock_domain(d); 1.97 + } 1.98 + break; 1.99 + 1.100 + case XEN_DOMCTL_ioport_mapping: 1.101 + { 1.102 +#define MAX_IOPORTS 0x10000 1.103 + struct domain *d; 1.104 + struct hvm_iommu *hd; 1.105 + unsigned int fgp = domctl->u.ioport_mapping.first_gport; 1.106 + unsigned int fmp = domctl->u.ioport_mapping.first_mport; 1.107 + unsigned int np = domctl->u.ioport_mapping.nr_ports; 1.108 + struct g2m_ioport *g2m_ioport; 1.109 + int found = 0; 1.110 + 1.111 + ret = -EINVAL; 1.112 + if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) || 1.113 + ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) ) 1.114 + { 1.115 + gdprintk(XENLOG_ERR, 1.116 + "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n", 1.117 + fgp, fmp, np); 1.118 + break; 1.119 + } 1.120 + 1.121 + ret = -ESRCH; 1.122 + if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) 1.123 + break; 1.124 + 1.125 + hd = domain_hvm_iommu(d); 1.126 + if ( domctl->u.ioport_mapping.add_mapping ) 1.127 + { 1.128 + gdprintk(XENLOG_INFO, 1.129 + "ioport_map:add f_gport=%x f_mport=%x np=%x\n", 1.130 + fgp, fmp, np); 1.131 + 1.132 + list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list) 1.133 + if (g2m_ioport->mport == fmp ) { 1.134 + g2m_ioport->gport = fgp; 1.135 + g2m_ioport->np = np; 1.136 + found = 1; 1.137 + break; 1.138 + } 1.139 + if ( !found ) 1.140 + { 1.141 + g2m_ioport = xmalloc(struct g2m_ioport); 1.142 + g2m_ioport->gport = fgp; 1.143 + g2m_ioport->mport = fmp; 1.144 + g2m_ioport->np = np; 1.145 + list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list); 1.146 + } 1.147 + ret = ioports_permit_access(d, fmp, fmp + np - 1); 1.148 + 1.149 + } 1.150 + else { 1.151 + gdprintk(XENLOG_INFO, 1.152 + "ioport_map:remove f_gport=%x f_mport=%x np=%x\n", 1.153 + fgp, fmp, np); 1.154 + list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list) 1.155 + if ( g2m_ioport->mport == fmp ) { 1.156 + list_del(&g2m_ioport->list); 1.157 + break; 1.158 + } 1.159 + ret = ioports_deny_access(d, fmp, fmp + np - 1); 1.160 + } 1.161 + rcu_unlock_domain(d); 1.162 + } 1.163 + break; 1.164 + 1.165 default: 1.166 ret = -ENOSYS; 1.167 break;
2.1 --- a/xen/arch/x86/mm/p2m.c Thu Sep 20 09:42:01 2007 +0100 2.2 +++ b/xen/arch/x86/mm/p2m.c Thu Sep 20 09:57:10 2007 +0100 2.3 @@ -27,6 +27,7 @@ 2.4 #include <asm/page.h> 2.5 #include <asm/paging.h> 2.6 #include <asm/p2m.h> 2.7 +#include <asm/iommu.h> 2.8 2.9 /* Debugging and auditing of the P2M code? */ 2.10 #define P2M_AUDIT 0 2.11 @@ -244,7 +245,7 @@ set_p2m_entry(struct domain *d, unsigned 2.12 if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) ) 2.13 d->arch.p2m.max_mapped_pfn = gfn; 2.14 2.15 - if ( mfn_valid(mfn) ) 2.16 + if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) ) 2.17 entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt)); 2.18 else 2.19 entry_content = l1e_empty(); 2.20 @@ -252,6 +253,9 @@ set_p2m_entry(struct domain *d, unsigned 2.21 /* level 1 entry */ 2.22 paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1); 2.23 2.24 + if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) ) 2.25 + iommu_flush(d, gfn, (u64*)p2m_entry); 2.26 + 2.27 /* Success */ 2.28 rv = 1; 2.29 2.30 @@ -351,6 +355,11 @@ int p2m_alloc_table(struct domain *d, 2.31 goto error; 2.32 } 2.33 2.34 +#if CONFIG_PAGING_LEVELS >= 3 2.35 + if (vtd_enabled && is_hvm_domain(d)) 2.36 + iommu_set_pgd(d); 2.37 +#endif 2.38 + 2.39 P2M_PRINTK("p2m table initialised (%u pages)\n", page_count); 2.40 p2m_unlock(d); 2.41 return 0; 2.42 @@ -860,6 +869,42 @@ p2m_type_t p2m_change_type(struct domain 2.43 return pt; 2.44 } 2.45 2.46 +int 2.47 +set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) 2.48 +{ 2.49 + int rc = 0; 2.50 + 2.51 + rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct); 2.52 + if ( 0 == rc ) 2.53 + gdprintk(XENLOG_ERR, 2.54 + "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", 2.55 + gmfn_to_mfn(d, gfn)); 2.56 + return rc; 2.57 +} 2.58 + 2.59 +int 2.60 +clear_mmio_p2m_entry(struct domain *d, unsigned long gfn) 2.61 +{ 2.62 + int rc = 0; 2.63 + 2.64 + unsigned long mfn; 2.65 + mfn = gmfn_to_mfn(d, gfn); 2.66 + if ( INVALID_MFN == mfn ) 2.67 + { 2.68 + gdprintk(XENLOG_ERR, 2.69 + "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn); 2.70 + return 0; 2.71 + } 2.72 + rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0); 2.73 + 2.74 +#if !defined(__x86_64__) 2.75 + /* x86_64 xen does not map mmio entries in machine_to_phys_mapp[] */ 2.76 + set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); 2.77 +#endif 2.78 + 2.79 + return rc; 2.80 +} 2.81 + 2.82 /* 2.83 * Local variables: 2.84 * mode: C
3.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Sep 20 09:42:01 2007 +0100 3.2 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Sep 20 09:57:10 2007 +0100 3.3 @@ -685,7 +685,7 @@ static always_inline void 3.4 /* N.B. For pass-through MMIO, either this test needs to be relaxed, 3.5 * and shadow_set_l1e() trained to handle non-valid MFNs (ugh), or the 3.6 * MMIO areas need to be added to the frame-table to make them "valid". */ 3.7 - if ( !mfn_valid(target_mfn) ) 3.8 + if ( !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) ) 3.9 { 3.10 ASSERT((ft == ft_prefetch)); 3.11 *sp = shadow_l1e_empty();
4.1 --- a/xen/include/asm-x86/p2m.h Thu Sep 20 09:42:01 2007 +0100 4.2 +++ b/xen/include/asm-x86/p2m.h Thu Sep 20 09:57:10 2007 +0100 4.3 @@ -222,6 +222,10 @@ void p2m_change_type_global(struct domai 4.4 p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, 4.5 p2m_type_t ot, p2m_type_t nt); 4.6 4.7 +/* Set mmio addresses in the p2m table (for pass-through) */ 4.8 +int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); 4.9 +int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn); 4.10 + 4.11 #endif /* _XEN_P2M_H */ 4.12 4.13 /*