ia64/linux-2.6.18-xen.hg

view drivers/pci/msi-xen.c @ 751:63a878f8851b

Fix buggy mask_base in saving/restoring MSI-X table during S3

Fix mask_base (actually MSI-X table base, copy name from native) to be
a virtual address rather than a physical address. And remove wrong
printk in pci_disable_msix.

Signed-off-by: Shan Haitao <haitao.shan@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 02 11:54:47 2008 +0000 (2008-12-02)
parents 1b68d09b868f
children 77e3b255381e
line source
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <linux/mm.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
18 #include <xen/evtchn.h>
20 #include <asm/errno.h>
21 #include <asm/io.h>
22 #include <asm/smp.h>
24 #include "pci.h"
25 #include "msi.h"
27 static int pci_msi_enable = 1;
29 static struct msi_ops *msi_ops;
31 int msi_register(struct msi_ops *ops)
32 {
33 msi_ops = ops;
34 return 0;
35 }
37 static LIST_HEAD(msi_dev_head);
38 DEFINE_SPINLOCK(msi_dev_lock);
40 struct msi_dev_list {
41 struct pci_dev *dev;
42 struct list_head list;
43 spinlock_t pirq_list_lock;
44 struct list_head pirq_list_head;
45 /* Used for saving/restoring MSI-X tables */
46 void __iomem *mask_base;
47 };
49 struct msi_pirq_entry {
50 struct list_head list;
51 int pirq;
52 int entry_nr;
53 #ifdef CONFIG_PM
54 /* PM save area for MSIX address/data */
55 u32 address_hi_save;
56 u32 address_lo_save;
57 u32 data_save;
58 #endif
59 };
61 static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
62 {
63 struct msi_dev_list *msi_dev_list, *ret = NULL;
64 unsigned long flags;
66 spin_lock_irqsave(&msi_dev_lock, flags);
68 list_for_each_entry(msi_dev_list, &msi_dev_head, list)
69 if ( msi_dev_list->dev == dev )
70 ret = msi_dev_list;
72 if ( ret ) {
73 spin_unlock_irqrestore(&msi_dev_lock, flags);
74 return ret;
75 }
77 /* Has not allocate msi_dev until now. */
78 ret = kzalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
80 /* Failed to allocate msi_dev structure */
81 if ( !ret ) {
82 spin_unlock_irqrestore(&msi_dev_lock, flags);
83 return NULL;
84 }
86 ret->dev = dev;
87 spin_lock_init(&ret->pirq_list_lock);
88 INIT_LIST_HEAD(&ret->pirq_list_head);
89 list_add_tail(&ret->list, &msi_dev_head);
90 spin_unlock_irqrestore(&msi_dev_lock, flags);
91 return ret;
92 }
94 static int attach_pirq_entry(int pirq, int entry_nr,
95 struct msi_dev_list *msi_dev_entry)
96 {
97 struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
98 unsigned long flags;
100 if (!entry)
101 return -ENOMEM;
102 entry->pirq = pirq;
103 entry->entry_nr = entry_nr;
104 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
105 list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
106 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
107 return 0;
108 }
110 static void detach_pirq_entry(int entry_nr,
111 struct msi_dev_list *msi_dev_entry)
112 {
113 unsigned long flags;
114 struct msi_pirq_entry *pirq_entry;
116 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
117 if (pirq_entry->entry_nr == entry_nr) {
118 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
119 list_del(&pirq_entry->list);
120 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
121 kfree(pirq_entry);
122 return;
123 }
124 }
125 }
127 /*
128 * pciback will provide device's owner
129 */
130 static int (*get_owner)(struct pci_dev *dev);
132 int register_msi_get_owner(int (*func)(struct pci_dev *dev))
133 {
134 if (get_owner) {
135 printk(KERN_WARNING "register msi_get_owner again\n");
136 return -EEXIST;
137 }
138 get_owner = func;
139 return 0;
140 }
142 int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
143 {
144 if (get_owner != func)
145 return -EINVAL;
146 get_owner = NULL;
147 return 0;
148 }
150 static int msi_get_dev_owner(struct pci_dev *dev)
151 {
152 int owner;
154 BUG_ON(!is_initial_xendomain());
155 if (get_owner && (owner = get_owner(dev)) >= 0) {
156 printk(KERN_INFO "get owner for dev %x get %x \n",
157 dev->devfn, owner);
158 return owner;
159 }
161 return DOMID_SELF;
162 }
164 static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
165 {
166 struct physdev_unmap_pirq unmap;
167 int rc;
169 unmap.domid = msi_get_dev_owner(dev);
170 /* See comments in msi_map_pirq_to_vector, input parameter pirq
171 * mean irq number only if the device belongs to dom0 itself.
172 */
173 unmap.pirq = (unmap.domid != DOMID_SELF)
174 ? pirq : evtchn_get_xen_pirq(pirq);
176 if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
177 printk(KERN_WARNING "unmap irq %x failed\n", pirq);
179 if (rc < 0)
180 return rc;
182 if (unmap.domid == DOMID_SELF)
183 evtchn_map_pirq(pirq, 0);
185 return 0;
186 }
188 static u64 find_table_base(struct pci_dev *dev, int pos)
189 {
190 u8 bar;
191 u32 reg;
192 unsigned long flags;
194 pci_read_config_dword(dev, msix_table_offset_reg(pos), &reg);
195 bar = reg & PCI_MSIX_FLAGS_BIRMASK;
197 flags = pci_resource_flags(dev, bar);
198 if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
199 return 0;
201 return pci_resource_start(dev, bar);
202 }
204 /*
205 * Protected by msi_lock
206 */
207 static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
208 int entry_nr, u64 table_base)
209 {
210 struct physdev_map_pirq map_irq;
211 int rc;
212 domid_t domid = DOMID_SELF;
214 domid = msi_get_dev_owner(dev);
216 map_irq.domid = domid;
217 map_irq.type = MAP_PIRQ_TYPE_MSI;
218 map_irq.index = -1;
219 map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
220 map_irq.bus = dev->bus->number;
221 map_irq.devfn = dev->devfn;
222 map_irq.entry_nr = entry_nr;
223 map_irq.table_base = table_base;
225 if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
226 printk(KERN_WARNING "map irq failed\n");
228 if (rc < 0)
229 return rc;
230 /* This happens when MSI support is not enabled in Xen. */
231 if (rc == 0 && map_irq.pirq < 0)
232 return -ENOSYS;
234 BUG_ON(map_irq.pirq <= 0);
236 /* If mapping of this particular MSI is on behalf of another domain,
237 * we do not need to get an irq in dom0. This also implies:
238 * dev->irq in dom0 will be 'Xen pirq' if this device belongs to
239 * to another domain, and will be 'Linux irq' if it belongs to dom0.
240 */
241 return ((domid != DOMID_SELF) ?
242 map_irq.pirq : evtchn_map_pirq(pirq, map_irq.pirq));
243 }
245 static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
246 {
247 return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
248 }
250 static int msi_init(void)
251 {
252 static int status = 0;
254 if (pci_msi_quirk) {
255 pci_msi_enable = 0;
256 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
257 status = -EINVAL;
258 }
260 return status;
261 }
263 void pci_scan_msi_device(struct pci_dev *dev) { }
265 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
266 {
267 u16 control;
269 pci_read_config_word(dev, msi_control_reg(pos), &control);
270 if (type == PCI_CAP_ID_MSI) {
271 /* Set enabled bits to single MSI & enable MSI_enable bit */
272 msi_disable(control);
273 pci_write_config_word(dev, msi_control_reg(pos), control);
274 dev->msi_enabled = 0;
275 } else {
276 msix_disable(control);
277 pci_write_config_word(dev, msi_control_reg(pos), control);
278 dev->msix_enabled = 0;
279 }
280 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
281 /* PCI Express Endpoint device detected */
282 pci_intx(dev, 1); /* enable intx */
283 }
284 }
286 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
287 {
288 u16 control;
290 pci_read_config_word(dev, msi_control_reg(pos), &control);
291 if (type == PCI_CAP_ID_MSI) {
292 /* Set enabled bits to single MSI & enable MSI_enable bit */
293 msi_enable(control, 1);
294 pci_write_config_word(dev, msi_control_reg(pos), control);
295 dev->msi_enabled = 1;
296 } else {
297 msix_enable(control);
298 pci_write_config_word(dev, msi_control_reg(pos), control);
299 dev->msix_enabled = 1;
300 }
301 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
302 /* PCI Express Endpoint device detected */
303 pci_intx(dev, 0); /* disable intx */
304 }
305 }
307 #ifdef CONFIG_PM
308 int pci_save_msi_state(struct pci_dev *dev)
309 {
310 int pos, i = 0;
311 u16 control;
312 struct pci_cap_saved_state *save_state;
313 u32 *cap;
315 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
316 if (pos <= 0 || dev->no_msi)
317 return 0;
319 pci_read_config_word(dev, msi_control_reg(pos), &control);
320 if (!(control & PCI_MSI_FLAGS_ENABLE))
321 return 0;
323 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
324 GFP_KERNEL);
325 if (!save_state) {
326 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
327 return -ENOMEM;
328 }
329 cap = &save_state->data[0];
331 pci_read_config_dword(dev, pos, &cap[i++]);
332 control = cap[0] >> 16;
333 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
334 if (control & PCI_MSI_FLAGS_64BIT) {
335 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
336 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
337 } else
338 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
339 if (control & PCI_MSI_FLAGS_MASKBIT)
340 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
341 save_state->cap_nr = PCI_CAP_ID_MSI;
342 pci_add_saved_cap(dev, save_state);
343 return 0;
344 }
346 void pci_restore_msi_state(struct pci_dev *dev)
347 {
348 int i = 0, pos;
349 u16 control;
350 struct pci_cap_saved_state *save_state;
351 u32 *cap;
353 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
354 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
355 if (!save_state || pos <= 0)
356 return;
357 cap = &save_state->data[0];
359 control = cap[i++] >> 16;
360 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
361 if (control & PCI_MSI_FLAGS_64BIT) {
362 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
363 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
364 } else
365 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
366 if (control & PCI_MSI_FLAGS_MASKBIT)
367 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
368 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
369 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
370 pci_remove_saved_cap(save_state);
371 kfree(save_state);
372 }
374 int pci_save_msix_state(struct pci_dev *dev)
375 {
376 int pos;
377 u16 control;
378 struct pci_cap_saved_state *save_state;
379 unsigned long flags;
380 struct msi_dev_list *msi_dev_entry;
381 struct msi_pirq_entry *pirq_entry;
382 void __iomem *base;
384 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
385 if (pos <= 0 || dev->no_msi)
386 return 0;
388 /* save the capability */
389 pci_read_config_word(dev, msi_control_reg(pos), &control);
390 if (!(control & PCI_MSIX_FLAGS_ENABLE))
391 return 0;
393 msi_dev_entry = get_msi_dev_pirq_list(dev);
394 /* If we failed to map the MSI-X table at pci_enable_msix,
395 * We could not support saving them here.
396 */
397 if (!(base = msi_dev_entry->mask_base))
398 return -ENOMEM;
400 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
401 GFP_KERNEL);
402 if (!save_state) {
403 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
404 return -ENOMEM;
405 }
406 *((u16 *)&save_state->data[0]) = control;
408 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
409 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
410 int j;
412 /* save the table */
413 j = pirq_entry->entry_nr;
414 pirq_entry->address_lo_save =
415 readl(base + j * PCI_MSIX_ENTRY_SIZE +
416 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
417 pirq_entry->address_hi_save =
418 readl(base + j * PCI_MSIX_ENTRY_SIZE +
419 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
420 pirq_entry->data_save =
421 readl(base + j * PCI_MSIX_ENTRY_SIZE +
422 PCI_MSIX_ENTRY_DATA_OFFSET);
423 }
424 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
426 save_state->cap_nr = PCI_CAP_ID_MSIX;
427 pci_add_saved_cap(dev, save_state);
428 return 0;
429 }
431 void pci_restore_msix_state(struct pci_dev *dev)
432 {
433 u16 save;
434 int pos, j;
435 void __iomem *base;
436 struct pci_cap_saved_state *save_state;
437 unsigned long flags;
438 struct msi_dev_list *msi_dev_entry;
439 struct msi_pirq_entry *pirq_entry;
441 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
442 if (!save_state)
443 return;
445 save = *((u16 *)&save_state->data[0]);
446 pci_remove_saved_cap(save_state);
447 kfree(save_state);
449 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
450 if (pos <= 0)
451 return;
453 msi_dev_entry = get_msi_dev_pirq_list(dev);
454 base = msi_dev_entry->mask_base;
456 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
457 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
458 /* route the table */
459 j = pirq_entry->entry_nr;
460 writel(pirq_entry->address_lo_save,
461 base + j * PCI_MSIX_ENTRY_SIZE +
462 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
463 writel(pirq_entry->address_hi_save,
464 base + j * PCI_MSIX_ENTRY_SIZE +
465 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
466 writel(pirq_entry->data_save,
467 base + j * PCI_MSIX_ENTRY_SIZE +
468 PCI_MSIX_ENTRY_DATA_OFFSET);
469 }
470 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
472 pci_write_config_word(dev, msi_control_reg(pos), save);
473 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
474 }
475 #endif
477 /**
478 * msi_capability_init - configure device's MSI capability structure
479 * @dev: pointer to the pci_dev data structure of MSI device function
480 *
481 * Setup the MSI capability structure of device function with a single
482 * MSI vector, regardless of device function is capable of handling
483 * multiple messages. A return of zero indicates the successful setup
484 * of an entry zero with the new MSI vector or non-zero for otherwise.
485 **/
486 static int msi_capability_init(struct pci_dev *dev)
487 {
488 int pos, pirq;
489 u16 control;
491 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
492 pci_read_config_word(dev, msi_control_reg(pos), &control);
494 pirq = msi_map_vector(dev, 0, 0);
495 if (pirq < 0)
496 return -EBUSY;
498 dev->irq = pirq;
499 /* Set MSI enabled bits */
500 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
501 dev->msi_enabled = 1;
503 return 0;
504 }
506 /**
507 * msix_capability_init - configure device's MSI-X capability
508 * @dev: pointer to the pci_dev data structure of MSI-X device function
509 * @entries: pointer to an array of struct msix_entry entries
510 * @nvec: number of @entries
511 *
512 * Setup the MSI-X capability structure of device function with a
513 * single MSI-X vector. A return of zero indicates the successful setup of
514 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
515 **/
516 static int msix_capability_init(struct pci_dev *dev,
517 struct msix_entry *entries, int nvec)
518 {
519 u64 table_base;
520 u16 control;
521 int pirq, i, j, mapped, pos, nr_entries;
522 struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
523 struct msi_pirq_entry *pirq_entry;
525 if (!msi_dev_entry)
526 return -ENOMEM;
528 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
529 table_base = find_table_base(dev, pos);
530 if (!table_base)
531 return -ENODEV;
533 pci_read_config_word(dev, msi_control_reg(pos), &control);
534 nr_entries = multi_msix_capable(control);
535 if (!msi_dev_entry->mask_base)
536 msi_dev_entry->mask_base =
537 ioremap_nocache(table_base, nr_entries * PCI_MSIX_ENTRY_SIZE);
539 /* MSI-X Table Initialization */
540 for (i = 0; i < nvec; i++) {
541 mapped = 0;
542 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
543 if (pirq_entry->entry_nr == entries[i].entry) {
544 printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
545 not freed before acquire again.\n", entries[i].entry,
546 dev->bus->number, PCI_SLOT(dev->devfn),
547 PCI_FUNC(dev->devfn));
548 (entries + i)->vector = pirq_entry->pirq;
549 mapped = 1;
550 break;
551 }
552 }
553 if (mapped)
554 continue;
555 pirq = msi_map_vector(dev, entries[i].entry, table_base);
556 if (pirq < 0)
557 break;
558 attach_pirq_entry(pirq, entries[i].entry, msi_dev_entry);
559 (entries + i)->vector = pirq;
560 }
562 if (i != nvec) {
563 for (j = --i; j >= 0; j--) {
564 msi_unmap_pirq(dev, entries[j].vector);
565 detach_pirq_entry(entries[j].entry, msi_dev_entry);
566 entries[j].vector = 0;
567 }
568 return -EBUSY;
569 }
571 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
572 dev->msix_enabled = 1;
574 return 0;
575 }
577 /**
578 * pci_enable_msi - configure device's MSI capability structure
579 * @dev: pointer to the pci_dev data structure of MSI device function
580 *
581 * Setup the MSI capability structure of device function with
582 * a single MSI vector upon its software driver call to request for
583 * MSI mode enabled on its hardware device function. A return of zero
584 * indicates the successful setup of an entry zero with the new MSI
585 * vector or non-zero for otherwise.
586 **/
587 extern int pci_frontend_enable_msi(struct pci_dev *dev);
588 int pci_enable_msi(struct pci_dev* dev)
589 {
590 struct pci_bus *bus;
591 int pos, temp, status = -EINVAL;
593 if (!pci_msi_enable || !dev)
594 return status;
596 if (dev->no_msi)
597 return status;
599 for (bus = dev->bus; bus; bus = bus->parent)
600 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
601 return -EINVAL;
603 status = msi_init();
604 if (status < 0)
605 return status;
607 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
608 if (!is_initial_xendomain())
609 {
610 int ret;
612 temp = dev->irq;
613 ret = pci_frontend_enable_msi(dev);
614 if (ret)
615 return ret;
617 dev->irq = evtchn_map_pirq(-1, dev->irq);
618 dev->irq_old = temp;
620 return ret;
621 }
622 #endif
624 temp = dev->irq;
626 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
627 if (!pos)
628 return -EINVAL;
630 /* Check whether driver already requested for MSI-X vectors */
631 if (dev->msix_enabled) {
632 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
633 "Device already has MSI-X vectors assigned\n",
634 pci_name(dev));
635 dev->irq = temp;
636 return -EINVAL;
637 }
639 status = msi_capability_init(dev);
640 if ( !status )
641 dev->irq_old = temp;
642 else
643 dev->irq = temp;
645 return status;
646 }
648 extern void pci_frontend_disable_msi(struct pci_dev* dev);
649 void pci_disable_msi(struct pci_dev* dev)
650 {
651 int pos;
652 int pirq;
654 if (!pci_msi_enable)
655 return;
656 if (!dev)
657 return;
659 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
660 if (!is_initial_xendomain()) {
661 evtchn_map_pirq(dev->irq, 0);
662 pci_frontend_disable_msi(dev);
663 dev->irq = dev->irq_old;
664 return;
665 }
666 #endif
668 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
669 if (!pos)
670 return;
672 pirq = dev->irq;
673 /* Restore dev->irq to its default pin-assertion vector */
674 dev->irq = dev->irq_old;
675 msi_unmap_pirq(dev, pirq);
677 /* Disable MSI mode */
678 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
679 }
681 /**
682 * pci_enable_msix - configure device's MSI-X capability structure
683 * @dev: pointer to the pci_dev data structure of MSI-X device function
684 * @entries: pointer to an array of MSI-X entries
685 * @nvec: number of MSI-X vectors requested for allocation by device driver
686 *
687 * Setup the MSI-X capability structure of device function with the number
688 * of requested vectors upon its software driver call to request for
689 * MSI-X mode enabled on its hardware device function. A return of zero
690 * indicates the successful configuration of MSI-X capability structure
691 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
692 * Or a return of > 0 indicates that driver request is exceeding the number
693 * of vectors available. Driver should use the returned value to re-send
694 * its request.
695 **/
696 extern int pci_frontend_enable_msix(struct pci_dev *dev,
697 struct msix_entry *entries, int nvec);
698 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
699 {
700 struct pci_bus *bus;
701 int status, pos, nr_entries;
702 int i, j, temp;
703 u16 control;
705 if (!pci_msi_enable || !dev || !entries)
706 return -EINVAL;
708 if (dev->no_msi)
709 return -EINVAL;
711 for (bus = dev->bus; bus; bus = bus->parent)
712 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
713 return -EINVAL;
715 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
716 if (!is_initial_xendomain()) {
717 struct msi_dev_list *msi_dev_entry;
718 struct msi_pirq_entry *pirq_entry;
719 int ret, irq;
721 ret = pci_frontend_enable_msix(dev, entries, nvec);
722 if (ret) {
723 printk("get %x from pci_frontend_enable_msix\n", ret);
724 return ret;
725 }
727 msi_dev_entry = get_msi_dev_pirq_list(dev);
728 for (i = 0; i < nvec; i++) {
729 int mapped = 0;
731 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
732 if (pirq_entry->entry_nr == entries[i].entry) {
733 irq = pirq_entry->pirq;
734 BUG_ON(entries[i].vector != evtchn_get_xen_pirq(irq));
735 entries[i].vector = irq;
736 mapped = 1;
737 break;
738 }
739 }
740 if (mapped)
741 continue;
742 irq = evtchn_map_pirq(-1, entries[i].vector);
743 attach_pirq_entry(irq, entries[i].entry, msi_dev_entry);
744 entries[i].vector = irq;
745 }
746 return 0;
747 }
748 #endif
750 status = msi_init();
751 if (status < 0)
752 return status;
754 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
755 if (!pos)
756 return -EINVAL;
758 pci_read_config_word(dev, msi_control_reg(pos), &control);
759 nr_entries = multi_msix_capable(control);
760 if (nvec > nr_entries)
761 return -EINVAL;
763 /* Check for any invalid entries */
764 for (i = 0; i < nvec; i++) {
765 if (entries[i].entry >= nr_entries)
766 return -EINVAL; /* invalid entry */
767 for (j = i + 1; j < nvec; j++) {
768 if (entries[i].entry == entries[j].entry)
769 return -EINVAL; /* duplicate entry */
770 }
771 }
773 temp = dev->irq;
774 /* Check whether driver already requested for MSI vector */
775 if (dev->msi_enabled) {
776 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
777 "Device already has an MSI vector assigned\n",
778 pci_name(dev));
779 dev->irq = temp;
780 return -EINVAL;
781 }
783 status = msix_capability_init(dev, entries, nvec);
785 if ( !status )
786 dev->irq_old = temp;
787 else
788 dev->irq = temp;
790 return status;
791 }
793 extern void pci_frontend_disable_msix(struct pci_dev* dev);
794 void pci_disable_msix(struct pci_dev* dev)
795 {
796 int pos;
797 u16 control;
800 if (!pci_msi_enable)
801 return;
802 if (!dev)
803 return;
805 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
806 if (!is_initial_xendomain()) {
807 struct msi_dev_list *msi_dev_entry;
808 struct msi_pirq_entry *pirq_entry, *tmp;
810 pci_frontend_disable_msix(dev);
812 msi_dev_entry = get_msi_dev_pirq_list(dev);
813 list_for_each_entry_safe(pirq_entry, tmp,
814 &msi_dev_entry->pirq_list_head, list) {
815 evtchn_map_pirq(pirq_entry->pirq, 0);
816 list_del(&pirq_entry->list);
817 kfree(pirq_entry);
818 }
820 dev->irq = dev->irq_old;
821 return;
822 }
823 #endif
825 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
826 if (!pos)
827 return;
829 pci_read_config_word(dev, msi_control_reg(pos), &control);
830 if (!(control & PCI_MSIX_FLAGS_ENABLE))
831 return;
833 msi_remove_pci_irq_vectors(dev);
835 /* Disable MSI mode */
836 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
837 }
839 /**
840 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
841 * @dev: pointer to the pci_dev data structure of MSI(X) device function
842 *
843 * Being called during hotplug remove, from which the device function
844 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
845 * allocated for this device function, are reclaimed to unused state,
846 * which may be used later on.
847 **/
848 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
849 {
850 unsigned long flags;
851 struct msi_dev_list *msi_dev_entry;
852 struct msi_pirq_entry *pirq_entry, *tmp;
854 if (!pci_msi_enable || !dev)
855 return;
857 msi_dev_entry = get_msi_dev_pirq_list(dev);
859 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
860 if (!list_empty(&msi_dev_entry->pirq_list_head))
861 list_for_each_entry_safe(pirq_entry, tmp,
862 &msi_dev_entry->pirq_list_head, list) {
863 msi_unmap_pirq(dev, pirq_entry->pirq);
864 list_del(&pirq_entry->list);
865 kfree(pirq_entry);
866 }
867 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
868 iounmap(msi_dev_entry->mask_base);
869 msi_dev_entry->mask_base = NULL;
870 dev->irq = dev->irq_old;
871 }
873 void pci_no_msi(void)
874 {
875 pci_msi_enable = 0;
876 }
878 EXPORT_SYMBOL(pci_enable_msi);
879 EXPORT_SYMBOL(pci_disable_msi);
880 EXPORT_SYMBOL(pci_enable_msix);
881 EXPORT_SYMBOL(pci_disable_msix);
882 #ifdef CONFIG_XEN
883 EXPORT_SYMBOL(register_msi_get_owner);
884 EXPORT_SYMBOL(unregister_msi_get_owner);
885 #endif