ia64/xen-unstable

view xen/drivers/passthrough/pci.c @ 19402:f02a528d2e56

Xen: use proper device ID to search VT-d unit for ARI and SR-IOV device

PCIe Alternative Routing-ID Interpretation (ARI) ECN defines the Extended
Function -- a function whose function number is greater than 7 within an
ARI Device. Intel VT-d spec 1.2 section 8.3.2 specifies that the Extended
Function is under the scope of the same remapping unit as the traditional
function. The hypervisor needs to know if a function is Extended
Function so it can find proper DMAR for it.

And section 8.3.3 specifies that the SR-IOV Virtual Function is under the
scope of the same remapping unit as the Physical Function. The hypervisor
also needs to know if a function is the Virtual Function and which
Physical Function it's associated with for same reason.

Signed-off-by: Yu Zhao <yu.zhao@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Mar 19 10:20:11 2009 +0000 (2009-03-19)
parents 2e6de0f50f3f
children 6705898f768d
line source
1 /*
2 * Copyright (C) 2008, Netronome Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 */
18 #include <xen/sched.h>
19 #include <xen/pci.h>
20 #include <xen/pci_regs.h>
21 #include <xen/list.h>
22 #include <xen/prefetch.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <asm/hvm/irq.h>
26 #include <xen/delay.h>
27 #include <xen/keyhandler.h>
30 LIST_HEAD(alldevs_list);
31 spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED;
33 struct pci_dev *alloc_pdev(u8 bus, u8 devfn)
34 {
35 struct pci_dev *pdev;
37 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
38 if ( pdev->bus == bus && pdev->devfn == devfn )
39 return pdev;
41 pdev = xmalloc(struct pci_dev);
42 if ( !pdev )
43 return NULL;
44 memset(pdev, 0, sizeof(struct pci_dev));
46 *((u8*) &pdev->bus) = bus;
47 *((u8*) &pdev->devfn) = devfn;
48 pdev->domain = NULL;
49 INIT_LIST_HEAD(&pdev->msi_list);
50 list_add(&pdev->alldevs_list, &alldevs_list);
51 spin_lock_init(&pdev->msix_table_lock);
53 return pdev;
54 }
56 void free_pdev(struct pci_dev *pdev)
57 {
58 list_del(&pdev->alldevs_list);
59 xfree(pdev);
60 }
62 struct pci_dev *pci_get_pdev(int bus, int devfn)
63 {
64 struct pci_dev *pdev = NULL;
66 ASSERT(spin_is_locked(&pcidevs_lock));
68 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
69 if ( (pdev->bus == bus || bus == -1) &&
70 (pdev->devfn == devfn || devfn == -1) )
71 {
72 return pdev;
73 }
75 return NULL;
76 }
78 struct pci_dev *pci_get_pdev_by_domain(struct domain *d, int bus, int devfn)
79 {
80 struct pci_dev *pdev = NULL;
82 ASSERT(spin_is_locked(&pcidevs_lock));
84 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
85 if ( (pdev->bus == bus || bus == -1) &&
86 (pdev->devfn == devfn || devfn == -1) &&
87 (pdev->domain == d) )
88 {
89 return pdev;
90 }
92 return NULL;
93 }
95 int pci_add_device(u8 bus, u8 devfn)
96 {
97 struct pci_dev *pdev;
98 int ret = -ENOMEM;
100 spin_lock(&pcidevs_lock);
101 pdev = alloc_pdev(bus, devfn);
102 if ( !pdev )
103 goto out;
105 ret = 0;
106 if ( !pdev->domain )
107 {
108 pdev->domain = dom0;
109 ret = iommu_add_device(pdev);
110 if ( ret )
111 goto out;
113 list_add(&pdev->domain_list, &dom0->arch.pdev_list);
114 }
116 out:
117 spin_unlock(&pcidevs_lock);
118 printk(XENLOG_DEBUG "PCI add device %02x:%02x.%x\n", bus,
119 PCI_SLOT(devfn), PCI_FUNC(devfn));
120 return ret;
121 }
123 int pci_remove_device(u8 bus, u8 devfn)
124 {
125 struct pci_dev *pdev;
126 int ret = -ENODEV;;
128 spin_lock(&pcidevs_lock);
129 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
130 if ( pdev->bus == bus && pdev->devfn == devfn )
131 {
132 ret = iommu_remove_device(pdev);
133 if ( pdev->domain )
134 list_del(&pdev->domain_list);
135 pci_cleanup_msi(pdev);
136 free_pdev(pdev);
137 printk(XENLOG_DEBUG "PCI remove device %02x:%02x.%x\n", bus,
138 PCI_SLOT(devfn), PCI_FUNC(devfn));
139 break;
140 }
142 spin_unlock(&pcidevs_lock);
143 return ret;
144 }
146 int pci_add_device_ext(u8 bus, u8 devfn, struct pci_dev_info *info)
147 {
148 int ret;
149 char *pdev_type;
150 struct pci_dev *pdev;
152 if (info->is_extfn)
153 pdev_type = "Extended Function";
154 else if (info->is_virtfn)
155 pdev_type = "Virtual Function";
156 else
157 return -EINVAL;;
160 ret = -ENOMEM;
161 spin_lock(&pcidevs_lock);
162 pdev = alloc_pdev(bus, devfn);
163 if ( !pdev )
164 goto out;
166 pdev->info = *info;
168 ret = 0;
169 if ( !pdev->domain )
170 {
171 pdev->domain = dom0;
172 ret = iommu_add_device(pdev);
173 if ( ret )
174 goto out;
176 list_add(&pdev->domain_list, &dom0->arch.pdev_list);
177 }
179 out:
180 spin_unlock(&pcidevs_lock);
181 printk(XENLOG_DEBUG "PCI add %s %02x:%02x.%x\n", pdev_type,
182 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
184 return ret;
185 }
187 static void pci_clean_dpci_irqs(struct domain *d)
188 {
189 struct hvm_irq_dpci *hvm_irq_dpci = NULL;
190 uint32_t i;
191 struct list_head *digl_list, *tmp;
192 struct dev_intx_gsi_link *digl;
194 if ( !iommu_enabled )
195 return;
197 if ( !is_hvm_domain(d) && !need_iommu(d) )
198 return;
200 spin_lock(&d->event_lock);
201 hvm_irq_dpci = domain_get_irq_dpci(d);
202 if ( hvm_irq_dpci != NULL )
203 {
204 for ( i = find_first_bit(hvm_irq_dpci->mapping, NR_IRQS);
205 i < NR_IRQS;
206 i = find_next_bit(hvm_irq_dpci->mapping, NR_IRQS, i + 1) )
207 {
208 pirq_guest_unbind(d, i);
209 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
211 list_for_each_safe ( digl_list, tmp,
212 &hvm_irq_dpci->mirq[i].digl_list )
213 {
214 digl = list_entry(digl_list,
215 struct dev_intx_gsi_link, list);
216 list_del(&digl->list);
217 xfree(digl);
218 }
219 }
221 d->arch.hvm_domain.irq.dpci = NULL;
222 xfree(hvm_irq_dpci);
223 }
224 spin_unlock(&d->event_lock);
225 }
227 void pci_release_devices(struct domain *d)
228 {
229 struct pci_dev *pdev;
230 u8 bus, devfn;
232 spin_lock(&pcidevs_lock);
233 pci_clean_dpci_irqs(d);
234 while ( (pdev = pci_get_pdev_by_domain(d, -1, -1)) )
235 {
236 pci_cleanup_msi(pdev);
237 bus = pdev->bus; devfn = pdev->devfn;
238 deassign_device(d, bus, devfn);
239 }
240 spin_unlock(&pcidevs_lock);
241 }
243 #ifdef SUPPORT_MSI_REMAPPING
244 static void dump_pci_devices(unsigned char ch)
245 {
246 struct pci_dev *pdev;
247 struct msi_desc *msi;
249 printk("==== PCI devices ====\n");
250 spin_lock(&pcidevs_lock);
252 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
253 {
254 printk("%02x:%02x.%x - dom %-3d - MSIs < ",
255 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
256 pdev->domain ? pdev->domain->domain_id : -1);
257 list_for_each_entry ( msi, &pdev->msi_list, list )
258 printk("%d ", msi->vector);
259 printk(">\n");
260 }
262 spin_unlock(&pcidevs_lock);
263 }
265 static int __init setup_dump_pcidevs(void)
266 {
267 register_keyhandler('Q', dump_pci_devices, "dump PCI devices");
268 return 0;
269 }
270 __initcall(setup_dump_pcidevs);
271 #endif
274 /*
275 * Local variables:
276 * mode: C
277 * c-set-style: "BSD"
278 * c-basic-offset: 4
279 * indent-tabs-mode: nil
280 * End:
281 */