ia64/xen-unstable

view xen/drivers/passthrough/pci.c @ 19805:2f1fa2215e60

VT-d: pci code cleanup

This patch moves the pci code from iommu.c to pci.c. Instead of setup
pci hierarchy in array bus2bridge in iommu_context_mapping, use
scan_pci_devices once to add all existed PCI devices in system to
alldevs_list and setup pci hierarchy in array bus2bridge. In addition,
implement find_upstream_bridge to find the upstream PCIe-to-PCI/PCIX
bridge or PCI legacy bridge for a PCI device, therefore it's cleanly
to handle context map/unmap for PCI device, even for source-id
setting.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:45:20 2009 +0100 (2009-06-19)
parents 6705898f768d
children b6612dd06218
line source
1 /*
2 * Copyright (C) 2008, Netronome Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 */
18 #include <xen/sched.h>
19 #include <xen/pci.h>
20 #include <xen/pci_regs.h>
21 #include <xen/list.h>
22 #include <xen/prefetch.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <asm/hvm/irq.h>
26 #include <xen/delay.h>
27 #include <xen/keyhandler.h>
30 LIST_HEAD(alldevs_list);
31 spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED;
33 #define MAX_BUSES 256
34 static struct {
35 u8 map;
36 u8 bus;
37 u8 devfn;
38 } bus2bridge[MAX_BUSES];
40 /* bus2bridge_lock protects bus2bridge array */
41 static DEFINE_SPINLOCK(bus2bridge_lock);
43 struct pci_dev *alloc_pdev(u8 bus, u8 devfn)
44 {
45 struct pci_dev *pdev;
47 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
48 if ( pdev->bus == bus && pdev->devfn == devfn )
49 return pdev;
51 pdev = xmalloc(struct pci_dev);
52 if ( !pdev )
53 return NULL;
54 memset(pdev, 0, sizeof(struct pci_dev));
56 *((u8*) &pdev->bus) = bus;
57 *((u8*) &pdev->devfn) = devfn;
58 pdev->domain = NULL;
59 INIT_LIST_HEAD(&pdev->msi_list);
60 list_add(&pdev->alldevs_list, &alldevs_list);
61 spin_lock_init(&pdev->msix_table_lock);
63 return pdev;
64 }
66 void free_pdev(struct pci_dev *pdev)
67 {
68 list_del(&pdev->alldevs_list);
69 xfree(pdev);
70 }
72 struct pci_dev *pci_get_pdev(int bus, int devfn)
73 {
74 struct pci_dev *pdev = NULL;
76 ASSERT(spin_is_locked(&pcidevs_lock));
78 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
79 if ( (pdev->bus == bus || bus == -1) &&
80 (pdev->devfn == devfn || devfn == -1) )
81 {
82 return pdev;
83 }
85 return NULL;
86 }
88 struct pci_dev *pci_get_pdev_by_domain(struct domain *d, int bus, int devfn)
89 {
90 struct pci_dev *pdev = NULL;
92 ASSERT(spin_is_locked(&pcidevs_lock));
94 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
95 if ( (pdev->bus == bus || bus == -1) &&
96 (pdev->devfn == devfn || devfn == -1) &&
97 (pdev->domain == d) )
98 {
99 return pdev;
100 }
102 return NULL;
103 }
105 int pci_add_device(u8 bus, u8 devfn)
106 {
107 struct pci_dev *pdev;
108 int ret = -ENOMEM;
110 spin_lock(&pcidevs_lock);
111 pdev = alloc_pdev(bus, devfn);
112 if ( !pdev )
113 goto out;
115 ret = 0;
116 if ( !pdev->domain )
117 {
118 pdev->domain = dom0;
119 ret = iommu_add_device(pdev);
120 if ( ret )
121 goto out;
123 list_add(&pdev->domain_list, &dom0->arch.pdev_list);
124 }
126 out:
127 spin_unlock(&pcidevs_lock);
128 printk(XENLOG_DEBUG "PCI add device %02x:%02x.%x\n", bus,
129 PCI_SLOT(devfn), PCI_FUNC(devfn));
130 return ret;
131 }
133 int pci_remove_device(u8 bus, u8 devfn)
134 {
135 struct pci_dev *pdev;
136 int ret = -ENODEV;;
138 spin_lock(&pcidevs_lock);
139 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
140 if ( pdev->bus == bus && pdev->devfn == devfn )
141 {
142 ret = iommu_remove_device(pdev);
143 if ( pdev->domain )
144 list_del(&pdev->domain_list);
145 pci_cleanup_msi(pdev);
146 free_pdev(pdev);
147 printk(XENLOG_DEBUG "PCI remove device %02x:%02x.%x\n", bus,
148 PCI_SLOT(devfn), PCI_FUNC(devfn));
149 break;
150 }
152 spin_unlock(&pcidevs_lock);
153 return ret;
154 }
156 int pci_add_device_ext(u8 bus, u8 devfn, struct pci_dev_info *info)
157 {
158 int ret;
159 char *pdev_type;
160 struct pci_dev *pdev;
162 if (info->is_extfn)
163 pdev_type = "Extended Function";
164 else if (info->is_virtfn)
165 pdev_type = "Virtual Function";
166 else
167 return -EINVAL;;
170 ret = -ENOMEM;
171 spin_lock(&pcidevs_lock);
172 pdev = alloc_pdev(bus, devfn);
173 if ( !pdev )
174 goto out;
176 pdev->info = *info;
178 ret = 0;
179 if ( !pdev->domain )
180 {
181 pdev->domain = dom0;
182 ret = iommu_add_device(pdev);
183 if ( ret )
184 goto out;
186 list_add(&pdev->domain_list, &dom0->arch.pdev_list);
187 }
189 out:
190 spin_unlock(&pcidevs_lock);
191 printk(XENLOG_DEBUG "PCI add %s %02x:%02x.%x\n", pdev_type,
192 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
194 return ret;
195 }
197 static void pci_clean_dpci_irqs(struct domain *d)
198 {
199 struct hvm_irq_dpci *hvm_irq_dpci = NULL;
200 uint32_t i;
201 struct list_head *digl_list, *tmp;
202 struct dev_intx_gsi_link *digl;
204 if ( !iommu_enabled )
205 return;
207 if ( !is_hvm_domain(d) && !need_iommu(d) )
208 return;
210 spin_lock(&d->event_lock);
211 hvm_irq_dpci = domain_get_irq_dpci(d);
212 if ( hvm_irq_dpci != NULL )
213 {
214 for ( i = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
215 i < d->nr_pirqs;
216 i = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, i + 1) )
217 {
218 pirq_guest_unbind(d, i);
219 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
221 list_for_each_safe ( digl_list, tmp,
222 &hvm_irq_dpci->mirq[i].digl_list )
223 {
224 digl = list_entry(digl_list,
225 struct dev_intx_gsi_link, list);
226 list_del(&digl->list);
227 xfree(digl);
228 }
229 }
231 d->arch.hvm_domain.irq.dpci = NULL;
232 free_hvm_irq_dpci(hvm_irq_dpci);
233 }
234 spin_unlock(&d->event_lock);
235 }
237 void pci_release_devices(struct domain *d)
238 {
239 struct pci_dev *pdev;
240 u8 bus, devfn;
242 spin_lock(&pcidevs_lock);
243 pci_clean_dpci_irqs(d);
244 while ( (pdev = pci_get_pdev_by_domain(d, -1, -1)) )
245 {
246 pci_cleanup_msi(pdev);
247 bus = pdev->bus; devfn = pdev->devfn;
248 deassign_device(d, bus, devfn);
249 }
250 spin_unlock(&pcidevs_lock);
251 }
253 #define PCI_CLASS_BRIDGE_PCI 0x0604
255 int pdev_type(u8 bus, u8 devfn)
256 {
257 u16 class_device;
258 u16 status, creg;
259 int pos;
260 u8 d = PCI_SLOT(devfn), f = PCI_FUNC(devfn);
262 class_device = pci_conf_read16(bus, d, f, PCI_CLASS_DEVICE);
263 if ( class_device == PCI_CLASS_BRIDGE_PCI )
264 {
265 pos = pci_find_next_cap(bus, devfn,
266 PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP);
267 if ( !pos )
268 return DEV_TYPE_LEGACY_PCI_BRIDGE;
269 creg = pci_conf_read16(bus, d, f, pos + PCI_EXP_FLAGS);
270 return ((creg & PCI_EXP_FLAGS_TYPE) >> 4) == PCI_EXP_TYPE_PCI_BRIDGE ?
271 DEV_TYPE_PCIe2PCI_BRIDGE : DEV_TYPE_PCIe_BRIDGE;
272 }
274 status = pci_conf_read16(bus, d, f, PCI_STATUS);
275 if ( !(status & PCI_STATUS_CAP_LIST) )
276 return DEV_TYPE_PCI;
278 if ( pci_find_next_cap(bus, devfn, PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP) )
279 return DEV_TYPE_PCIe_ENDPOINT;
281 return DEV_TYPE_PCI;
282 }
284 /*
285 * find the upstream PCIe-to-PCI/PCIX bridge or PCI legacy bridge
286 * return 0: the device is integrated PCI device or PCIe
287 * return 1: find PCIe-to-PCI/PCIX bridge or PCI legacy bridge
288 * return -1: fail
289 */
290 int find_upstream_bridge(u8 *bus, u8 *devfn, u8 *secbus)
291 {
292 int ret = 0;
293 int cnt = 0;
295 if ( *bus == 0 )
296 return 0;
298 if ( !bus2bridge[*bus].map )
299 return 0;
301 ret = 1;
302 spin_lock(&bus2bridge_lock);
303 while ( bus2bridge[*bus].map )
304 {
305 *secbus = *bus;
306 *devfn = bus2bridge[*bus].devfn;
307 *bus = bus2bridge[*bus].bus;
308 if ( cnt++ >= MAX_BUSES )
309 {
310 ret = -1;
311 goto out;
312 }
313 }
315 out:
316 spin_unlock(&bus2bridge_lock);
317 return ret;
318 }
320 /*
321 * scan pci devices to add all existed PCI devices to alldevs_list,
322 * and setup pci hierarchy in array bus2bridge. This function is only
323 * called in VT-d hardware setup
324 */
325 int __init scan_pci_devices(void)
326 {
327 struct pci_dev *pdev;
328 int bus, dev, func;
329 u8 sec_bus, sub_bus;
330 int type;
331 u32 l;
333 spin_lock(&pcidevs_lock);
334 for ( bus = 0; bus < 256; bus++ )
335 {
336 for ( dev = 0; dev < 32; dev++ )
337 {
338 for ( func = 0; func < 8; func++ )
339 {
340 l = pci_conf_read32(bus, dev, func, PCI_VENDOR_ID);
341 /* some broken boards return 0 or ~0 if a slot is empty: */
342 if ( (l == 0xffffffff) || (l == 0x00000000) ||
343 (l == 0x0000ffff) || (l == 0xffff0000) )
344 continue;
346 pdev = alloc_pdev(bus, PCI_DEVFN(dev, func));
347 if ( !pdev )
348 {
349 printk("%s: alloc_pdev failed.\n", __func__);
350 spin_unlock(&pcidevs_lock);
351 return -ENOMEM;
352 }
354 /* build bus2bridge */
355 type = pdev_type(bus, PCI_DEVFN(dev, func));
356 switch ( type )
357 {
358 case DEV_TYPE_PCIe_BRIDGE:
359 break;
361 case DEV_TYPE_PCIe2PCI_BRIDGE:
362 case DEV_TYPE_LEGACY_PCI_BRIDGE:
363 sec_bus = pci_conf_read8(bus, dev, func,
364 PCI_SECONDARY_BUS);
365 sub_bus = pci_conf_read8(bus, dev, func,
366 PCI_SUBORDINATE_BUS);
368 spin_lock(&bus2bridge_lock);
369 for ( sub_bus &= 0xff; sec_bus <= sub_bus; sec_bus++ )
370 {
371 bus2bridge[sec_bus].map = 1;
372 bus2bridge[sec_bus].bus = bus;
373 bus2bridge[sec_bus].devfn = PCI_DEVFN(dev, func);
374 }
375 spin_unlock(&bus2bridge_lock);
376 break;
378 case DEV_TYPE_PCIe_ENDPOINT:
379 case DEV_TYPE_PCI:
380 break;
382 default:
383 printk("%s: unknown type: bdf = %x:%x.%x\n",
384 __func__, bus, dev, func);
385 spin_unlock(&pcidevs_lock);
386 return -EINVAL;
387 }
388 }
389 }
390 }
392 spin_unlock(&pcidevs_lock);
393 return 0;
394 }
396 #ifdef SUPPORT_MSI_REMAPPING
397 static void dump_pci_devices(unsigned char ch)
398 {
399 struct pci_dev *pdev;
400 struct msi_desc *msi;
402 printk("==== PCI devices ====\n");
403 spin_lock(&pcidevs_lock);
405 list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
406 {
407 printk("%02x:%02x.%x - dom %-3d - MSIs < ",
408 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
409 pdev->domain ? pdev->domain->domain_id : -1);
410 list_for_each_entry ( msi, &pdev->msi_list, list )
411 printk("%d ", msi->vector);
412 printk(">\n");
413 }
415 spin_unlock(&pcidevs_lock);
416 }
418 static int __init setup_dump_pcidevs(void)
419 {
420 register_keyhandler('Q', dump_pci_devices, "dump PCI devices");
421 return 0;
422 }
423 __initcall(setup_dump_pcidevs);
424 #endif
427 /*
428 * Local variables:
429 * mode: C
430 * c-set-style: "BSD"
431 * c-basic-offset: 4
432 * indent-tabs-mode: nil
433 * End:
434 */