ia64/linux-2.6.18-xen.hg

view drivers/pci/msi-xen.c @ 738:1b68d09b868f

Save/restore PCI MSI state across S3.

Signed-off-by: Haitao Shan <haitao.shan@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Nov 24 14:15:58 2008 +0000 (2008-11-24)
parents c3a007862994
children 63a878f8851b
line source
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <linux/mm.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
18 #include <xen/evtchn.h>
20 #include <asm/errno.h>
21 #include <asm/io.h>
22 #include <asm/smp.h>
24 #include "pci.h"
25 #include "msi.h"
27 static int pci_msi_enable = 1;
29 static struct msi_ops *msi_ops;
31 int msi_register(struct msi_ops *ops)
32 {
33 msi_ops = ops;
34 return 0;
35 }
37 static LIST_HEAD(msi_dev_head);
38 DEFINE_SPINLOCK(msi_dev_lock);
40 struct msi_dev_list {
41 struct pci_dev *dev;
42 struct list_head list;
43 spinlock_t pirq_list_lock;
44 struct list_head pirq_list_head;
45 };
47 struct msi_pirq_entry {
48 struct list_head list;
49 int pirq;
50 int entry_nr;
51 #ifdef CONFIG_PM
52 /* PM save area for MSIX address/data */
53 void __iomem *mask_base;
54 u32 address_hi_save;
55 u32 address_lo_save;
56 u32 data_save;
57 #endif
58 };
60 static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
61 {
62 struct msi_dev_list *msi_dev_list, *ret = NULL;
63 unsigned long flags;
65 spin_lock_irqsave(&msi_dev_lock, flags);
67 list_for_each_entry(msi_dev_list, &msi_dev_head, list)
68 if ( msi_dev_list->dev == dev )
69 ret = msi_dev_list;
71 if ( ret ) {
72 spin_unlock_irqrestore(&msi_dev_lock, flags);
73 return ret;
74 }
76 /* Has not allocate msi_dev until now. */
77 ret = kzalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
79 /* Failed to allocate msi_dev structure */
80 if ( !ret ) {
81 spin_unlock_irqrestore(&msi_dev_lock, flags);
82 return NULL;
83 }
85 ret->dev = dev;
86 spin_lock_init(&ret->pirq_list_lock);
87 INIT_LIST_HEAD(&ret->pirq_list_head);
88 list_add_tail(&ret->list, &msi_dev_head);
89 spin_unlock_irqrestore(&msi_dev_lock, flags);
90 return ret;
91 }
93 static int attach_pirq_entry(int pirq, int entry_nr, u64 table_base,
94 struct msi_dev_list *msi_dev_entry)
95 {
96 struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
97 unsigned long flags;
99 if (!entry)
100 return -ENOMEM;
101 entry->pirq = pirq;
102 entry->entry_nr = entry_nr;
103 #ifdef COMFIG_PM
104 entry->mask_base = table_base;
105 #endif
106 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
107 list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
108 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
109 return 0;
110 }
112 static void detach_pirq_entry(int entry_nr,
113 struct msi_dev_list *msi_dev_entry)
114 {
115 unsigned long flags;
116 struct msi_pirq_entry *pirq_entry;
118 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
119 if (pirq_entry->entry_nr == entry_nr) {
120 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
121 list_del(&pirq_entry->list);
122 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
123 kfree(pirq_entry);
124 return;
125 }
126 }
127 }
129 /*
130 * pciback will provide device's owner
131 */
132 static int (*get_owner)(struct pci_dev *dev);
134 int register_msi_get_owner(int (*func)(struct pci_dev *dev))
135 {
136 if (get_owner) {
137 printk(KERN_WARNING "register msi_get_owner again\n");
138 return -EEXIST;
139 }
140 get_owner = func;
141 return 0;
142 }
144 int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
145 {
146 if (get_owner != func)
147 return -EINVAL;
148 get_owner = NULL;
149 return 0;
150 }
152 static int msi_get_dev_owner(struct pci_dev *dev)
153 {
154 int owner;
156 BUG_ON(!is_initial_xendomain());
157 if (get_owner && (owner = get_owner(dev)) >= 0) {
158 printk(KERN_INFO "get owner for dev %x get %x \n",
159 dev->devfn, owner);
160 return owner;
161 }
163 return DOMID_SELF;
164 }
166 static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
167 {
168 struct physdev_unmap_pirq unmap;
169 int rc;
171 unmap.domid = msi_get_dev_owner(dev);
172 /* See comments in msi_map_pirq_to_vector, input parameter pirq
173 * mean irq number only if the device belongs to dom0 itself.
174 */
175 unmap.pirq = (unmap.domid != DOMID_SELF)
176 ? pirq : evtchn_get_xen_pirq(pirq);
178 if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
179 printk(KERN_WARNING "unmap irq %x failed\n", pirq);
181 if (rc < 0)
182 return rc;
184 if (unmap.domid == DOMID_SELF)
185 evtchn_map_pirq(pirq, 0);
187 return 0;
188 }
190 static u64 find_table_base(struct pci_dev *dev, int pos)
191 {
192 u8 bar;
193 u32 reg;
194 unsigned long flags;
196 pci_read_config_dword(dev, msix_table_offset_reg(pos), &reg);
197 bar = reg & PCI_MSIX_FLAGS_BIRMASK;
199 flags = pci_resource_flags(dev, bar);
200 if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
201 return 0;
203 return pci_resource_start(dev, bar);
204 }
206 /*
207 * Protected by msi_lock
208 */
209 static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
210 int entry_nr, u64 table_base)
211 {
212 struct physdev_map_pirq map_irq;
213 int rc;
214 domid_t domid = DOMID_SELF;
216 domid = msi_get_dev_owner(dev);
218 map_irq.domid = domid;
219 map_irq.type = MAP_PIRQ_TYPE_MSI;
220 map_irq.index = -1;
221 map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
222 map_irq.bus = dev->bus->number;
223 map_irq.devfn = dev->devfn;
224 map_irq.entry_nr = entry_nr;
225 map_irq.table_base = table_base;
227 if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
228 printk(KERN_WARNING "map irq failed\n");
230 if (rc < 0)
231 return rc;
232 /* This happens when MSI support is not enabled in Xen. */
233 if (rc == 0 && map_irq.pirq < 0)
234 return -ENOSYS;
236 BUG_ON(map_irq.pirq <= 0);
238 /* If mapping of this particular MSI is on behalf of another domain,
239 * we do not need to get an irq in dom0. This also implies:
240 * dev->irq in dom0 will be 'Xen pirq' if this device belongs to
241 * to another domain, and will be 'Linux irq' if it belongs to dom0.
242 */
243 return ((domid != DOMID_SELF) ?
244 map_irq.pirq : evtchn_map_pirq(pirq, map_irq.pirq));
245 }
247 static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
248 {
249 return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
250 }
252 static int msi_init(void)
253 {
254 static int status = 0;
256 if (pci_msi_quirk) {
257 pci_msi_enable = 0;
258 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
259 status = -EINVAL;
260 }
262 return status;
263 }
265 void pci_scan_msi_device(struct pci_dev *dev) { }
267 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
268 {
269 u16 control;
271 pci_read_config_word(dev, msi_control_reg(pos), &control);
272 if (type == PCI_CAP_ID_MSI) {
273 /* Set enabled bits to single MSI & enable MSI_enable bit */
274 msi_disable(control);
275 pci_write_config_word(dev, msi_control_reg(pos), control);
276 dev->msi_enabled = 0;
277 } else {
278 msix_disable(control);
279 pci_write_config_word(dev, msi_control_reg(pos), control);
280 dev->msix_enabled = 0;
281 }
282 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
283 /* PCI Express Endpoint device detected */
284 pci_intx(dev, 1); /* enable intx */
285 }
286 }
288 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
289 {
290 u16 control;
292 pci_read_config_word(dev, msi_control_reg(pos), &control);
293 if (type == PCI_CAP_ID_MSI) {
294 /* Set enabled bits to single MSI & enable MSI_enable bit */
295 msi_enable(control, 1);
296 pci_write_config_word(dev, msi_control_reg(pos), control);
297 dev->msi_enabled = 1;
298 } else {
299 msix_enable(control);
300 pci_write_config_word(dev, msi_control_reg(pos), control);
301 dev->msix_enabled = 1;
302 }
303 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
304 /* PCI Express Endpoint device detected */
305 pci_intx(dev, 0); /* disable intx */
306 }
307 }
309 #ifdef CONFIG_PM
310 int pci_save_msi_state(struct pci_dev *dev)
311 {
312 int pos, i = 0;
313 u16 control;
314 struct pci_cap_saved_state *save_state;
315 u32 *cap;
317 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
318 if (pos <= 0 || dev->no_msi)
319 return 0;
321 pci_read_config_word(dev, msi_control_reg(pos), &control);
322 if (!(control & PCI_MSI_FLAGS_ENABLE))
323 return 0;
325 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
326 GFP_KERNEL);
327 if (!save_state) {
328 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
329 return -ENOMEM;
330 }
331 cap = &save_state->data[0];
333 pci_read_config_dword(dev, pos, &cap[i++]);
334 control = cap[0] >> 16;
335 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
336 if (control & PCI_MSI_FLAGS_64BIT) {
337 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
338 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
339 } else
340 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
341 if (control & PCI_MSI_FLAGS_MASKBIT)
342 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
343 save_state->cap_nr = PCI_CAP_ID_MSI;
344 pci_add_saved_cap(dev, save_state);
345 return 0;
346 }
348 void pci_restore_msi_state(struct pci_dev *dev)
349 {
350 int i = 0, pos;
351 u16 control;
352 struct pci_cap_saved_state *save_state;
353 u32 *cap;
355 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
356 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
357 if (!save_state || pos <= 0)
358 return;
359 cap = &save_state->data[0];
361 control = cap[i++] >> 16;
362 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
363 if (control & PCI_MSI_FLAGS_64BIT) {
364 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
365 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
366 } else
367 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
368 if (control & PCI_MSI_FLAGS_MASKBIT)
369 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
370 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
371 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
372 pci_remove_saved_cap(save_state);
373 kfree(save_state);
374 }
376 int pci_save_msix_state(struct pci_dev *dev)
377 {
378 int pos;
379 u16 control;
380 struct pci_cap_saved_state *save_state;
381 unsigned long flags;
382 struct msi_dev_list *msi_dev_entry;
383 struct msi_pirq_entry *pirq_entry;
385 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
386 if (pos <= 0 || dev->no_msi)
387 return 0;
389 printk(KERN_CRIT "Saving MSIX cap\n");
391 /* save the capability */
392 pci_read_config_word(dev, msi_control_reg(pos), &control);
393 if (!(control & PCI_MSIX_FLAGS_ENABLE))
394 return 0;
395 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
396 GFP_KERNEL);
397 if (!save_state) {
398 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
399 return -ENOMEM;
400 }
401 *((u16 *)&save_state->data[0]) = control;
403 msi_dev_entry = get_msi_dev_pirq_list(dev);
405 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
406 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
407 int j;
408 void __iomem *base;
410 /* save the table */
411 base = pirq_entry->mask_base;
412 j = pirq_entry->entry_nr;
413 printk(KERN_CRIT "Save msix table entry %d pirq %x base %p\n",
414 j, pirq_entry->pirq, base);
416 pirq_entry->address_lo_save =
417 readl(base + j * PCI_MSIX_ENTRY_SIZE +
418 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
419 pirq_entry->address_hi_save =
420 readl(base + j * PCI_MSIX_ENTRY_SIZE +
421 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
422 pirq_entry->data_save =
423 readl(base + j * PCI_MSIX_ENTRY_SIZE +
424 PCI_MSIX_ENTRY_DATA_OFFSET);
425 }
426 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
428 save_state->cap_nr = PCI_CAP_ID_MSIX;
429 pci_add_saved_cap(dev, save_state);
430 return 0;
431 }
433 void pci_restore_msix_state(struct pci_dev *dev)
434 {
435 u16 save;
436 int pos, j;
437 void __iomem *base;
438 struct pci_cap_saved_state *save_state;
439 unsigned long flags;
440 struct msi_dev_list *msi_dev_entry;
441 struct msi_pirq_entry *pirq_entry;
443 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
444 if (!save_state)
445 return;
446 printk(KERN_CRIT "Restoring MSIX cap\n");
448 save = *((u16 *)&save_state->data[0]);
449 pci_remove_saved_cap(save_state);
450 kfree(save_state);
452 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
453 if (pos <= 0)
454 return;
456 msi_dev_entry = get_msi_dev_pirq_list(dev);
458 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
459 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
460 /* route the table */
461 base = pirq_entry->mask_base;
462 j = pirq_entry->entry_nr;
464 printk(KERN_CRIT "Restore msix table entry %d pirq %x base %p\n",
465 j, pirq_entry->pirq, base);
466 writel(pirq_entry->address_lo_save,
467 base + j * PCI_MSIX_ENTRY_SIZE +
468 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
469 writel(pirq_entry->address_hi_save,
470 base + j * PCI_MSIX_ENTRY_SIZE +
471 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
472 writel(pirq_entry->data_save,
473 base + j * PCI_MSIX_ENTRY_SIZE +
474 PCI_MSIX_ENTRY_DATA_OFFSET);
475 }
476 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
478 pci_write_config_word(dev, msi_control_reg(pos), save);
479 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
480 }
481 #endif
483 /**
484 * msi_capability_init - configure device's MSI capability structure
485 * @dev: pointer to the pci_dev data structure of MSI device function
486 *
487 * Setup the MSI capability structure of device function with a single
488 * MSI vector, regardless of device function is capable of handling
489 * multiple messages. A return of zero indicates the successful setup
490 * of an entry zero with the new MSI vector or non-zero for otherwise.
491 **/
492 static int msi_capability_init(struct pci_dev *dev)
493 {
494 int pos, pirq;
495 u16 control;
497 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
498 pci_read_config_word(dev, msi_control_reg(pos), &control);
500 pirq = msi_map_vector(dev, 0, 0);
501 if (pirq < 0)
502 return -EBUSY;
504 dev->irq = pirq;
505 /* Set MSI enabled bits */
506 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
507 dev->msi_enabled = 1;
509 return 0;
510 }
512 /**
513 * msix_capability_init - configure device's MSI-X capability
514 * @dev: pointer to the pci_dev data structure of MSI-X device function
515 * @entries: pointer to an array of struct msix_entry entries
516 * @nvec: number of @entries
517 *
518 * Setup the MSI-X capability structure of device function with a
519 * single MSI-X vector. A return of zero indicates the successful setup of
520 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
521 **/
522 static int msix_capability_init(struct pci_dev *dev,
523 struct msix_entry *entries, int nvec)
524 {
525 u64 table_base;
526 int pirq, i, j, mapped, pos;
527 struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
528 struct msi_pirq_entry *pirq_entry;
530 if (!msi_dev_entry)
531 return -ENOMEM;
533 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
534 table_base = find_table_base(dev, pos);
535 if (!table_base)
536 return -ENODEV;
538 /* MSI-X Table Initialization */
539 for (i = 0; i < nvec; i++) {
540 mapped = 0;
541 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
542 if (pirq_entry->entry_nr == entries[i].entry) {
543 printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
544 not freed before acquire again.\n", entries[i].entry,
545 dev->bus->number, PCI_SLOT(dev->devfn),
546 PCI_FUNC(dev->devfn));
547 (entries + i)->vector = pirq_entry->pirq;
548 mapped = 1;
549 break;
550 }
551 }
552 if (mapped)
553 continue;
554 pirq = msi_map_vector(dev, entries[i].entry, table_base);
555 if (pirq < 0)
556 break;
557 attach_pirq_entry(pirq, entries[i].entry, table_base, msi_dev_entry);
558 (entries + i)->vector = pirq;
559 }
561 if (i != nvec) {
562 for (j = --i; j >= 0; j--) {
563 msi_unmap_pirq(dev, entries[j].vector);
564 detach_pirq_entry(entries[j].entry, msi_dev_entry);
565 entries[j].vector = 0;
566 }
567 return -EBUSY;
568 }
570 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
571 dev->msix_enabled = 1;
573 return 0;
574 }
576 /**
577 * pci_enable_msi - configure device's MSI capability structure
578 * @dev: pointer to the pci_dev data structure of MSI device function
579 *
580 * Setup the MSI capability structure of device function with
581 * a single MSI vector upon its software driver call to request for
582 * MSI mode enabled on its hardware device function. A return of zero
583 * indicates the successful setup of an entry zero with the new MSI
584 * vector or non-zero for otherwise.
585 **/
586 extern int pci_frontend_enable_msi(struct pci_dev *dev);
587 int pci_enable_msi(struct pci_dev* dev)
588 {
589 struct pci_bus *bus;
590 int pos, temp, status = -EINVAL;
592 if (!pci_msi_enable || !dev)
593 return status;
595 if (dev->no_msi)
596 return status;
598 for (bus = dev->bus; bus; bus = bus->parent)
599 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
600 return -EINVAL;
602 status = msi_init();
603 if (status < 0)
604 return status;
606 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
607 if (!is_initial_xendomain())
608 {
609 int ret;
611 temp = dev->irq;
612 ret = pci_frontend_enable_msi(dev);
613 if (ret)
614 return ret;
616 dev->irq = evtchn_map_pirq(-1, dev->irq);
617 dev->irq_old = temp;
619 return ret;
620 }
621 #endif
623 temp = dev->irq;
625 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
626 if (!pos)
627 return -EINVAL;
629 /* Check whether driver already requested for MSI-X vectors */
630 if (dev->msix_enabled) {
631 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
632 "Device already has MSI-X vectors assigned\n",
633 pci_name(dev));
634 dev->irq = temp;
635 return -EINVAL;
636 }
638 status = msi_capability_init(dev);
639 if ( !status )
640 dev->irq_old = temp;
641 else
642 dev->irq = temp;
644 return status;
645 }
647 extern void pci_frontend_disable_msi(struct pci_dev* dev);
648 void pci_disable_msi(struct pci_dev* dev)
649 {
650 int pos;
651 int pirq;
653 if (!pci_msi_enable)
654 return;
655 if (!dev)
656 return;
658 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
659 if (!is_initial_xendomain()) {
660 evtchn_map_pirq(dev->irq, 0);
661 pci_frontend_disable_msi(dev);
662 dev->irq = dev->irq_old;
663 return;
664 }
665 #endif
667 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
668 if (!pos)
669 return;
671 pirq = dev->irq;
672 /* Restore dev->irq to its default pin-assertion vector */
673 dev->irq = dev->irq_old;
674 msi_unmap_pirq(dev, pirq);
676 /* Disable MSI mode */
677 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
678 }
680 /**
681 * pci_enable_msix - configure device's MSI-X capability structure
682 * @dev: pointer to the pci_dev data structure of MSI-X device function
683 * @entries: pointer to an array of MSI-X entries
684 * @nvec: number of MSI-X vectors requested for allocation by device driver
685 *
686 * Setup the MSI-X capability structure of device function with the number
687 * of requested vectors upon its software driver call to request for
688 * MSI-X mode enabled on its hardware device function. A return of zero
689 * indicates the successful configuration of MSI-X capability structure
690 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
691 * Or a return of > 0 indicates that driver request is exceeding the number
692 * of vectors available. Driver should use the returned value to re-send
693 * its request.
694 **/
695 extern int pci_frontend_enable_msix(struct pci_dev *dev,
696 struct msix_entry *entries, int nvec);
697 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
698 {
699 struct pci_bus *bus;
700 int status, pos, nr_entries;
701 int i, j, temp;
702 u16 control;
704 if (!pci_msi_enable || !dev || !entries)
705 return -EINVAL;
707 if (dev->no_msi)
708 return -EINVAL;
710 for (bus = dev->bus; bus; bus = bus->parent)
711 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
712 return -EINVAL;
714 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
715 if (!is_initial_xendomain()) {
716 struct msi_dev_list *msi_dev_entry;
717 struct msi_pirq_entry *pirq_entry;
718 int ret, irq;
720 ret = pci_frontend_enable_msix(dev, entries, nvec);
721 if (ret) {
722 printk("get %x from pci_frontend_enable_msix\n", ret);
723 return ret;
724 }
726 msi_dev_entry = get_msi_dev_pirq_list(dev);
727 for (i = 0; i < nvec; i++) {
728 int mapped = 0;
730 list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
731 if (pirq_entry->entry_nr == entries[i].entry) {
732 irq = pirq_entry->pirq;
733 BUG_ON(entries[i].vector != evtchn_get_xen_pirq(irq));
734 entries[i].vector = irq;
735 mapped = 1;
736 break;
737 }
738 }
739 if (mapped)
740 continue;
741 irq = evtchn_map_pirq(-1, entries[i].vector);
742 attach_pirq_entry(irq, entries[i].entry, 0, msi_dev_entry);
743 entries[i].vector = irq;
744 }
745 return 0;
746 }
747 #endif
749 status = msi_init();
750 if (status < 0)
751 return status;
753 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
754 if (!pos)
755 return -EINVAL;
757 pci_read_config_word(dev, msi_control_reg(pos), &control);
758 nr_entries = multi_msix_capable(control);
759 if (nvec > nr_entries)
760 return -EINVAL;
762 /* Check for any invalid entries */
763 for (i = 0; i < nvec; i++) {
764 if (entries[i].entry >= nr_entries)
765 return -EINVAL; /* invalid entry */
766 for (j = i + 1; j < nvec; j++) {
767 if (entries[i].entry == entries[j].entry)
768 return -EINVAL; /* duplicate entry */
769 }
770 }
772 temp = dev->irq;
773 /* Check whether driver already requested for MSI vector */
774 if (dev->msi_enabled) {
775 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
776 "Device already has an MSI vector assigned\n",
777 pci_name(dev));
778 dev->irq = temp;
779 return -EINVAL;
780 }
782 status = msix_capability_init(dev, entries, nvec);
784 if ( !status )
785 dev->irq_old = temp;
786 else
787 dev->irq = temp;
789 return status;
790 }
792 extern void pci_frontend_disable_msix(struct pci_dev* dev);
793 void pci_disable_msix(struct pci_dev* dev)
794 {
795 int pos;
796 u16 control;
799 if (!pci_msi_enable)
800 return;
801 if (!dev)
802 return;
804 #ifdef CONFIG_XEN_PCIDEV_FRONTEND
805 if (!is_initial_xendomain()) {
806 struct msi_dev_list *msi_dev_entry;
807 struct msi_pirq_entry *pirq_entry, *tmp;
809 pci_frontend_disable_msix(dev);
811 msi_dev_entry = get_msi_dev_pirq_list(dev);
812 list_for_each_entry_safe(pirq_entry, tmp,
813 &msi_dev_entry->pirq_list_head, list) {
814 evtchn_map_pirq(pirq_entry->pirq, 0);
815 list_del(&pirq_entry->list);
816 kfree(pirq_entry);
817 }
819 dev->irq = dev->irq_old;
820 return;
821 }
822 #endif
824 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
825 if (!pos)
826 return;
828 pci_read_config_word(dev, msi_control_reg(pos), &control);
829 if (!(control & PCI_MSIX_FLAGS_ENABLE))
830 return;
832 msi_remove_pci_irq_vectors(dev);
834 /* Disable MSI mode */
835 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
836 }
838 /**
839 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
840 * @dev: pointer to the pci_dev data structure of MSI(X) device function
841 *
842 * Being called during hotplug remove, from which the device function
843 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
844 * allocated for this device function, are reclaimed to unused state,
845 * which may be used later on.
846 **/
847 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
848 {
849 unsigned long flags;
850 struct msi_dev_list *msi_dev_entry;
851 struct msi_pirq_entry *pirq_entry, *tmp;
853 if (!pci_msi_enable || !dev)
854 return;
856 msi_dev_entry = get_msi_dev_pirq_list(dev);
858 spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
859 if (!list_empty(&msi_dev_entry->pirq_list_head))
860 {
861 printk(KERN_WARNING "msix pirqs for dev %02x:%02x:%01x are not freed \
862 before acquire again.\n", dev->bus->number, PCI_SLOT(dev->devfn),
863 PCI_FUNC(dev->devfn));
864 list_for_each_entry_safe(pirq_entry, tmp,
865 &msi_dev_entry->pirq_list_head, list) {
866 msi_unmap_pirq(dev, pirq_entry->pirq);
867 list_del(&pirq_entry->list);
868 kfree(pirq_entry);
869 }
870 }
871 spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
872 dev->irq = dev->irq_old;
873 }
875 void pci_no_msi(void)
876 {
877 pci_msi_enable = 0;
878 }
880 EXPORT_SYMBOL(pci_enable_msi);
881 EXPORT_SYMBOL(pci_disable_msi);
882 EXPORT_SYMBOL(pci_enable_msix);
883 EXPORT_SYMBOL(pci_disable_msix);
884 #ifdef CONFIG_XEN
885 EXPORT_SYMBOL(register_msi_get_owner);
886 EXPORT_SYMBOL(unregister_msi_get_owner);
887 #endif