ia64/linux-2.6.18-xen.hg

view drivers/pci/msi-xen.c @ 659:ad374a7a9f3e

Revert 654:8925ce7552528 (linux/pci-msi: translate Xen-provided PIRQs)

Breaks the -xenU configuration ("MAX_IO_APICS undefined")

Also implicated in kernel crash during save/restore in our automated
tests.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Sep 05 12:39:29 2008 +0100 (2008-09-05)
parents c47b7e47ab19
children 7886619f623e
line source
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <linux/mm.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
18 #include <asm/errno.h>
19 #include <asm/io.h>
20 #include <asm/smp.h>
22 #include "pci.h"
23 #include "msi.h"
25 static int pci_msi_enable = 1;
27 static struct msi_ops *msi_ops;
29 int msi_register(struct msi_ops *ops)
30 {
31 msi_ops = ops;
32 return 0;
33 }
35 static LIST_HEAD(msi_dev_head);
36 DEFINE_SPINLOCK(msi_dev_lock);
38 struct msi_dev_list {
39 struct pci_dev *dev;
40 struct list_head list;
41 spinlock_t pirq_list_lock;
42 struct list_head pirq_list_head;
43 };
45 struct msi_pirq_entry {
46 struct list_head list;
47 int pirq;
48 int entry_nr;
49 };
51 static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
52 {
53 struct msi_dev_list *msi_dev_list, *ret = NULL;
54 unsigned long flags;
56 spin_lock_irqsave(&msi_dev_lock, flags);
58 list_for_each_entry(msi_dev_list, &msi_dev_head, list)
59 if ( msi_dev_list->dev == dev )
60 ret = msi_dev_list;
62 if ( ret ) {
63 spin_unlock_irqrestore(&msi_dev_lock, flags);
64 return ret;
65 }
67 /* Has not allocate msi_dev until now. */
68 ret = kmalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
70 /* Failed to allocate msi_dev structure */
71 if ( !ret ) {
72 spin_unlock_irqrestore(&msi_dev_lock, flags);
73 return NULL;
74 }
76 spin_lock_init(&ret->pirq_list_lock);
77 INIT_LIST_HEAD(&ret->pirq_list_head);
78 list_add_tail(&ret->list, &msi_dev_head);
79 spin_unlock_irqrestore(&msi_dev_lock, flags);
80 return ret;
81 }
83 static int attach_pirq_entry(int pirq, int entry_nr,
84 struct msi_dev_list *msi_dev_entry)
85 {
86 struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
87 unsigned long flags;
89 if (!entry)
90 return -ENOMEM;
91 entry->pirq = pirq;
92 entry->entry_nr = entry_nr;
93 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
94 list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
95 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
96 return 0;
97 }
99 static void detach_pirq_entry(int entry_nr,
100 struct msi_dev_list *msi_dev_entry)
101 {
102 unsigned long flags;
103 struct msi_pirq_entry *pirq_entry;
105 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
106 if (pirq_entry->entry_nr == entry_nr) {
107 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
108 list_del(&pirq_entry->list);
109 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
110 kfree(pirq_entry);
111 return;
112 }
113 }
114 }
116 /*
117 * pciback will provide device's owner
118 */
119 static int (*get_owner)(struct pci_dev *dev);
121 int register_msi_get_owner(int (*func)(struct pci_dev *dev))
122 {
123 if (get_owner) {
124 printk(KERN_WARNING "register msi_get_owner again\n");
125 return -EEXIST;
126 }
127 get_owner = func;
128 return 0;
129 }
131 int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
132 {
133 if (get_owner != func)
134 return -EINVAL;
135 get_owner = NULL;
136 return 0;
137 }
139 static int msi_get_dev_owner(struct pci_dev *dev)
140 {
141 int owner;
143 BUG_ON(!is_initial_xendomain());
144 if (get_owner && (owner = get_owner(dev)) >= 0) {
145 printk(KERN_INFO "get owner for dev %x get %x \n",
146 dev->devfn, owner);
147 return owner;
148 }
150 return DOMID_SELF;
151 }
153 static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
154 {
155 struct physdev_unmap_pirq unmap;
156 int rc;
158 unmap.domid = msi_get_dev_owner(dev);
159 unmap.pirq = pirq;
161 if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
162 printk(KERN_WARNING "unmap irq %x failed\n", pirq);
164 if (rc < 0)
165 return rc;
166 return 0;
167 }
169 static u64 find_table_base(struct pci_dev *dev, int pos)
170 {
171 u8 bar;
172 u32 reg;
173 unsigned long flags;
175 pci_read_config_dword(dev, msix_table_offset_reg(pos), &reg);
176 bar = reg & PCI_MSIX_FLAGS_BIRMASK;
178 flags = pci_resource_flags(dev, bar);
179 if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
180 return 0;
182 return pci_resource_start(dev, bar);
183 }
185 /*
186 * Protected by msi_lock
187 */
188 static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
189 int entry_nr, u64 table_base)
190 {
191 struct physdev_map_pirq map_irq;
192 int rc;
193 domid_t domid = DOMID_SELF;
195 domid = msi_get_dev_owner(dev);
197 map_irq.domid = domid;
198 map_irq.type = MAP_PIRQ_TYPE_MSI;
199 map_irq.index = -1;
200 map_irq.pirq = pirq;
201 map_irq.bus = dev->bus->number;
202 map_irq.devfn = dev->devfn;
203 map_irq.entry_nr = entry_nr;
204 map_irq.table_base = table_base;
206 if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
207 printk(KERN_WARNING "map irq failed\n");
209 if (rc < 0)
210 return rc;
212 return map_irq.pirq;
213 }
215 static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
216 {
217 return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
218 }
220 static int msi_init(void)
221 {
222 static int status = 0;
224 if (pci_msi_quirk) {
225 pci_msi_enable = 0;
226 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
227 status = -EINVAL;
228 }
230 return status;
231 }
233 void pci_scan_msi_device(struct pci_dev *dev) { }
235 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
236 {
237 u16 control;
239 pci_read_config_word(dev, msi_control_reg(pos), &control);
240 if (type == PCI_CAP_ID_MSI) {
241 /* Set enabled bits to single MSI & enable MSI_enable bit */
242 msi_disable(control);
243 pci_write_config_word(dev, msi_control_reg(pos), control);
244 dev->msi_enabled = 0;
245 } else {
246 msix_disable(control);
247 pci_write_config_word(dev, msi_control_reg(pos), control);
248 dev->msix_enabled = 0;
249 }
250 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
251 /* PCI Express Endpoint device detected */
252 pci_intx(dev, 1); /* enable intx */
253 }
254 }
256 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
257 {
258 u16 control;
260 pci_read_config_word(dev, msi_control_reg(pos), &control);
261 if (type == PCI_CAP_ID_MSI) {
262 /* Set enabled bits to single MSI & enable MSI_enable bit */
263 msi_enable(control, 1);
264 pci_write_config_word(dev, msi_control_reg(pos), control);
265 dev->msi_enabled = 1;
266 } else {
267 msix_enable(control);
268 pci_write_config_word(dev, msi_control_reg(pos), control);
269 dev->msix_enabled = 1;
270 }
271 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
272 /* PCI Express Endpoint device detected */
273 pci_intx(dev, 0); /* disable intx */
274 }
275 }
277 #ifdef CONFIG_PM
278 int pci_save_msi_state(struct pci_dev *dev)
279 {
280 int pos;
282 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
283 if (pos <= 0 || dev->no_msi)
284 return 0;
286 if (!dev->msi_enabled)
287 return 0;
289 /* Restore dev->irq to its default pin-assertion vector */
290 msi_unmap_pirq(dev, dev->irq);
291 /* Disable MSI mode */
292 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
293 /* Set the flags for use of restore */
294 dev->msi_enabled = 1;
295 return 0;
296 }
298 void pci_restore_msi_state(struct pci_dev *dev)
299 {
300 int pos, pirq;
302 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
303 if (pos <= 0)
304 return;
306 if (!dev->msi_enabled)
307 return;
309 pirq = msi_map_pirq_to_vector(dev, dev->irq, 0, 0);
310 if (pirq < 0)
311 return;
312 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
313 }
315 int pci_save_msix_state(struct pci_dev *dev)
316 {
317 int pos;
318 unsigned long flags;
319 struct msi_dev_list *msi_dev_entry;
320 struct msi_pirq_entry *pirq_entry, *tmp;
322 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
323 if (pos <= 0 || dev->no_msi)
324 return 0;
326 /* save the capability */
327 if (!dev->msix_enabled)
328 return 0;
330 msi_dev_entry = get_msi_dev_pirq_list(dev);
332 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
333 list_for_each_entry_safe(pirq_entry, tmp,
334 &msi_dev_entry->pirq_list_head, list)
335 msi_unmap_pirq(dev, pirq_entry->pirq);
336 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
338 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
339 /* Set the flags for use of restore */
340 dev->msix_enabled = 1;
342 return 0;
343 }
345 void pci_restore_msix_state(struct pci_dev *dev)
346 {
347 int pos;
348 unsigned long flags;
349 u64 table_base;
350 struct msi_dev_list *msi_dev_entry;
351 struct msi_pirq_entry *pirq_entry, *tmp;
353 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
354 if (pos <= 0)
355 return;
357 if (!dev->msix_enabled)
358 return;
360 msi_dev_entry = get_msi_dev_pirq_list(dev);
361 table_base = find_table_base(dev, pos);
362 if (!table_base)
363 return;
365 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
366 list_for_each_entry_safe(pirq_entry, tmp,
367 &msi_dev_entry->pirq_list_head, list) {
368 int rc = msi_map_pirq_to_vector(dev, pirq_entry->pirq,
369 pirq_entry->entry_nr, table_base);
370 if (rc < 0)
371 printk(KERN_WARNING
372 "%s: re-mapping irq #%d (pirq%d) failed: %d\n",
373 pci_name(dev), pirq_entry->entry_nr,
374 pirq_entry->pirq, rc);
375 }
376 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
378 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
379 }
380 #endif
382 /**
383 * msi_capability_init - configure device's MSI capability structure
384 * @dev: pointer to the pci_dev data structure of MSI device function
385 *
386 * Setup the MSI capability structure of device function with a single
387 * MSI vector, regardless of device function is capable of handling
388 * multiple messages. A return of zero indicates the successful setup
389 * of an entry zero with the new MSI vector or non-zero for otherwise.
390 **/
391 static int msi_capability_init(struct pci_dev *dev)
392 {
393 int pos, pirq;
394 u16 control;
396 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
397 pci_read_config_word(dev, msi_control_reg(pos), &control);
399 pirq = msi_map_vector(dev, 0, 0);
400 if (pirq < 0)
401 return -EBUSY;
403 dev->irq = pirq;
404 /* Set MSI enabled bits */
405 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
406 dev->msi_enabled = 1;
408 return 0;
409 }
411 /**
412 * msix_capability_init - configure device's MSI-X capability
413 * @dev: pointer to the pci_dev data structure of MSI-X device function
414 * @entries: pointer to an array of struct msix_entry entries
415 * @nvec: number of @entries
416 *
417 * Setup the MSI-X capability structure of device function with a
418 * single MSI-X vector. A return of zero indicates the successful setup of
419 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
420 **/
421 static int msix_capability_init(struct pci_dev *dev,
422 struct msix_entry *entries, int nvec)
423 {
424 u64 table_base;
425 int pirq, i, j, mapped, pos;
426 struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
427 struct msi_pirq_entry *pirq_entry;
429 if (!msi_dev_entry)
430 return -ENOMEM;
432 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
433 table_base = find_table_base(dev, pos);
434 if (!table_base)
435 return -ENODEV;
437 /* MSI-X Table Initialization */
438 for (i = 0; i < nvec; i++) {
439 mapped = 0;
440 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
441 if (pirq_entry->entry_nr == entries[i].entry) {
442 printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
443 not freed before acquire again.\n", entries[i].entry,
444 dev->bus->number, PCI_SLOT(dev->devfn),
445 PCI_FUNC(dev->devfn));
446 (entries + i)->vector = pirq_entry->pirq;
447 mapped = 1;
448 break;
449 }
450 }
451 if (mapped)
452 continue;
453 pirq = msi_map_vector(dev, entries[i].entry, table_base);
454 if (pirq < 0)
455 break;
456 attach_pirq_entry(pirq, entries[i].entry, msi_dev_entry);
457 (entries + i)->vector = pirq;
458 }
460 if (i != nvec) {
461 for (j = --i; j >= 0; j--) {
462 msi_unmap_pirq(dev, entries[j].vector);
463 detach_pirq_entry(entries[j].entry, msi_dev_entry);
464 entries[j].vector = 0;
465 }
466 return -EBUSY;
467 }
469 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
470 dev->msix_enabled = 1;
472 return 0;
473 }
475 /**
476 * pci_enable_msi - configure device's MSI capability structure
477 * @dev: pointer to the pci_dev data structure of MSI device function
478 *
479 * Setup the MSI capability structure of device function with
480 * a single MSI vector upon its software driver call to request for
481 * MSI mode enabled on its hardware device function. A return of zero
482 * indicates the successful setup of an entry zero with the new MSI
483 * vector or non-zero for otherwise.
484 **/
485 extern int pci_frontend_enable_msi(struct pci_dev *dev);
486 int pci_enable_msi(struct pci_dev* dev)
487 {
488 struct pci_bus *bus;
489 int pos, temp, status = -EINVAL;
491 if (!pci_msi_enable || !dev)
492 return status;
494 if (dev->no_msi)
495 return status;
497 for (bus = dev->bus; bus; bus = bus->parent)
498 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
499 return -EINVAL;
501 status = msi_init();
502 if (status < 0)
503 return status;
505 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
506 if (!is_initial_xendomain())
507 {
508 int ret;
510 temp = dev->irq;
511 ret = pci_frontend_enable_msi(dev);
512 if (ret)
513 return ret;
515 dev->irq_old = temp;
517 return ret;
518 }
519 #endif
521 temp = dev->irq;
523 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
524 if (!pos)
525 return -EINVAL;
527 /* Check whether driver already requested for MSI-X vectors */
528 if (dev->msix_enabled) {
529 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
530 "Device already has MSI-X vectors assigned\n",
531 pci_name(dev));
532 dev->irq = temp;
533 return -EINVAL;
534 }
536 status = msi_capability_init(dev);
537 if ( !status )
538 dev->irq_old = temp;
539 else
540 dev->irq = temp;
542 return status;
543 }
545 extern void pci_frontend_disable_msi(struct pci_dev* dev);
546 void pci_disable_msi(struct pci_dev* dev)
547 {
548 int pos;
549 int pirq;
551 if (!pci_msi_enable)
552 return;
553 if (!dev)
554 return;
556 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
557 if (!is_initial_xendomain()) {
558 pci_frontend_disable_msi(dev);
559 dev->irq = dev->irq_old;
560 return;
561 }
562 #endif
564 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
565 if (!pos)
566 return;
568 pirq = dev->irq;
569 /* Restore dev->irq to its default pin-assertion vector */
570 dev->irq = dev->irq_old;
571 msi_unmap_pirq(dev, pirq);
573 /* Disable MSI mode */
574 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
575 }
577 /**
578 * pci_enable_msix - configure device's MSI-X capability structure
579 * @dev: pointer to the pci_dev data structure of MSI-X device function
580 * @entries: pointer to an array of MSI-X entries
581 * @nvec: number of MSI-X vectors requested for allocation by device driver
582 *
583 * Setup the MSI-X capability structure of device function with the number
584 * of requested vectors upon its software driver call to request for
585 * MSI-X mode enabled on its hardware device function. A return of zero
586 * indicates the successful configuration of MSI-X capability structure
587 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
588 * Or a return of > 0 indicates that driver request is exceeding the number
589 * of vectors available. Driver should use the returned value to re-send
590 * its request.
591 **/
592 extern int pci_frontend_enable_msix(struct pci_dev *dev,
593 struct msix_entry *entries, int nvec);
594 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
595 {
596 struct pci_bus *bus;
597 int status, pos, nr_entries;
598 int i, j, temp;
599 u16 control;
601 if (!pci_msi_enable || !dev || !entries)
602 return -EINVAL;
604 if (dev->no_msi)
605 return -EINVAL;
607 for (bus = dev->bus; bus; bus = bus->parent)
608 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
609 return -EINVAL;
611 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
612 if (!is_initial_xendomain()) {
613 int ret;
615 ret = pci_frontend_enable_msix(dev, entries, nvec);
616 if (ret) {
617 printk("get %x from pci_frontend_enable_msix\n", ret);
618 return ret;
619 }
621 return 0;
622 }
623 #endif
625 status = msi_init();
626 if (status < 0)
627 return status;
629 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
630 if (!pos)
631 return -EINVAL;
633 pci_read_config_word(dev, msi_control_reg(pos), &control);
634 nr_entries = multi_msix_capable(control);
635 if (nvec > nr_entries)
636 return -EINVAL;
638 /* Check for any invalid entries */
639 for (i = 0; i < nvec; i++) {
640 if (entries[i].entry >= nr_entries)
641 return -EINVAL; /* invalid entry */
642 for (j = i + 1; j < nvec; j++) {
643 if (entries[i].entry == entries[j].entry)
644 return -EINVAL; /* duplicate entry */
645 }
646 }
648 temp = dev->irq;
649 /* Check whether driver already requested for MSI vector */
650 if (dev->msi_enabled) {
651 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
652 "Device already has an MSI vector assigned\n",
653 pci_name(dev));
654 dev->irq = temp;
655 return -EINVAL;
656 }
658 status = msix_capability_init(dev, entries, nvec);
660 if ( !status )
661 dev->irq_old = temp;
662 else
663 dev->irq = temp;
665 return status;
666 }
668 extern void pci_frontend_disable_msix(struct pci_dev* dev);
669 void pci_disable_msix(struct pci_dev* dev)
670 {
671 int pos;
672 u16 control;
675 if (!pci_msi_enable)
676 return;
677 if (!dev)
678 return;
680 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
681 if (!is_initial_xendomain()) {
682 pci_frontend_disable_msix(dev);
683 dev->irq = dev->irq_old;
684 return;
685 }
686 #endif
688 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
689 if (!pos)
690 return;
692 pci_read_config_word(dev, msi_control_reg(pos), &control);
693 if (!(control & PCI_MSIX_FLAGS_ENABLE))
694 return;
696 msi_remove_pci_irq_vectors(dev);
698 /* Disable MSI mode */
699 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
700 }
702 /**
703 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
704 * @dev: pointer to the pci_dev data structure of MSI(X) device function
705 *
706 * Being called during hotplug remove, from which the device function
707 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
708 * allocated for this device function, are reclaimed to unused state,
709 * which may be used later on.
710 **/
711 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
712 {
713 unsigned long flags;
714 struct msi_dev_list *msi_dev_entry;
715 struct msi_pirq_entry *pirq_entry, *tmp;
717 if (!pci_msi_enable || !dev)
718 return;
720 msi_dev_entry = get_msi_dev_pirq_list(dev);
722 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
723 if (!list_empty(&msi_dev_entry->pirq_list_head))
724 {
725 printk(KERN_WARNING "msix pirqs for dev %02x:%02x:%01x are not freed \
726 before acquire again.\n", dev->bus->number, PCI_SLOT(dev->devfn),
727 PCI_FUNC(dev->devfn));
728 list_for_each_entry_safe(pirq_entry, tmp,
729 &msi_dev_entry->pirq_list_head, list) {
730 msi_unmap_pirq(dev, pirq_entry->pirq);
731 list_del(&pirq_entry->list);
732 kfree(pirq_entry);
733 }
734 }
735 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
736 dev->irq = dev->irq_old;
737 }
739 void pci_no_msi(void)
740 {
741 pci_msi_enable = 0;
742 }
744 EXPORT_SYMBOL(pci_enable_msi);
745 EXPORT_SYMBOL(pci_disable_msi);
746 EXPORT_SYMBOL(pci_enable_msix);
747 EXPORT_SYMBOL(pci_disable_msix);
748 #ifdef CONFIG_XEN
749 EXPORT_SYMBOL(register_msi_get_owner);
750 EXPORT_SYMBOL(unregister_msi_get_owner);
751 #endif