ia64/xen-unstable

view xen/arch/x86/msi.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents a0910b1b5ec0
children 6468257e9e62
line source
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/init.h>
12 #include <xen/irq.h>
13 #include <xen/delay.h>
14 #include <xen/sched.h>
15 #include <xen/acpi.h>
16 #include <xen/errno.h>
17 #include <xen/pci.h>
18 #include <xen/pci_regs.h>
19 #include <xen/keyhandler.h>
20 #include <asm/io.h>
21 #include <asm/smp.h>
22 #include <asm/desc.h>
23 #include <asm/msi.h>
24 #include <asm/fixmap.h>
25 #include <mach_apic.h>
26 #include <io_ports.h>
27 #include <public/physdev.h>
28 #include <xen/iommu.h>
30 /* bitmap indicate which fixed map is free */
31 DEFINE_SPINLOCK(msix_fixmap_lock);
32 DECLARE_BITMAP(msix_fixmap_pages, MAX_MSIX_PAGES);
34 static int msix_fixmap_alloc(void)
35 {
36 int i, rc = -1;
38 spin_lock(&msix_fixmap_lock);
39 for ( i = 0; i < MAX_MSIX_PAGES; i++ )
40 if ( !test_bit(i, &msix_fixmap_pages) )
41 break;
42 if ( i == MAX_MSIX_PAGES )
43 goto out;
44 rc = FIX_MSIX_IO_RESERV_BASE + i;
45 set_bit(i, &msix_fixmap_pages);
47 out:
48 spin_unlock(&msix_fixmap_lock);
49 return rc;
50 }
52 static void msix_fixmap_free(int idx)
53 {
54 if ( idx >= FIX_MSIX_IO_RESERV_BASE )
55 clear_bit(idx - FIX_MSIX_IO_RESERV_BASE, &msix_fixmap_pages);
56 }
58 /*
59 * MSI message composition
60 */
61 static void msi_compose_msg(struct pci_dev *pdev, int vector,
62 struct msi_msg *msg)
63 {
64 unsigned dest;
65 cpumask_t tmp;
67 tmp = TARGET_CPUS;
68 if ( vector )
69 {
70 dest = cpu_mask_to_apicid(tmp);
72 msg->address_hi = MSI_ADDR_BASE_HI;
73 msg->address_lo =
74 MSI_ADDR_BASE_LO |
75 ((INT_DEST_MODE == 0) ?
76 MSI_ADDR_DESTMODE_PHYS:
77 MSI_ADDR_DESTMODE_LOGIC) |
78 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
79 MSI_ADDR_REDIRECTION_CPU:
80 MSI_ADDR_REDIRECTION_LOWPRI) |
81 MSI_ADDR_DEST_ID(dest);
83 msg->data =
84 MSI_DATA_TRIGGER_EDGE |
85 MSI_DATA_LEVEL_ASSERT |
86 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
87 MSI_DATA_DELIVERY_FIXED:
88 MSI_DATA_DELIVERY_LOWPRI) |
89 MSI_DATA_VECTOR(vector);
90 }
91 }
93 static void read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
94 {
95 switch ( entry->msi_attrib.type )
96 {
97 case PCI_CAP_ID_MSI:
98 {
99 struct pci_dev *dev = entry->dev;
100 int pos = entry->msi_attrib.pos;
101 u16 data;
102 u8 bus = dev->bus;
103 u8 slot = PCI_SLOT(dev->devfn);
104 u8 func = PCI_FUNC(dev->devfn);
106 msg->address_lo = pci_conf_read32(bus, slot, func,
107 msi_lower_address_reg(pos));
108 if ( entry->msi_attrib.is_64 )
109 {
110 msg->address_hi = pci_conf_read32(bus, slot, func,
111 msi_upper_address_reg(pos));
112 data = pci_conf_read16(bus, slot, func, msi_data_reg(pos, 1));
113 }
114 else
115 {
116 msg->address_hi = 0;
117 data = pci_conf_read16(bus, slot, func, msi_data_reg(pos, 0));
118 }
119 msg->data = data;
120 break;
121 }
122 case PCI_CAP_ID_MSIX:
123 {
124 void __iomem *base;
125 base = entry->mask_base +
126 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
128 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
129 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
130 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
131 break;
132 }
133 default:
134 BUG();
135 }
137 if ( vtd_enabled )
138 msi_msg_read_remap_rte(entry, msg);
139 }
141 static int set_vector_msi(struct msi_desc *entry)
142 {
143 if ( entry->vector >= NR_VECTORS )
144 {
145 dprintk(XENLOG_ERR, "Trying to install msi data for Vector %d\n",
146 entry->vector);
147 return -EINVAL;
148 }
150 irq_desc[entry->vector].msi_desc = entry;
151 return 0;
152 }
154 static int unset_vector_msi(int vector)
155 {
156 if ( vector >= NR_VECTORS )
157 {
158 dprintk(XENLOG_ERR, "Trying to uninstall msi data for Vector %d\n",
159 vector);
160 return -EINVAL;
161 }
163 irq_desc[vector].msi_desc = NULL;
164 return 0;
165 }
167 static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
168 {
169 if ( iommu_enabled )
170 iommu_update_ire_from_msi(entry, msg);
172 switch ( entry->msi_attrib.type )
173 {
174 case PCI_CAP_ID_MSI:
175 {
176 struct pci_dev *dev = entry->dev;
177 int pos = entry->msi_attrib.pos;
178 u8 bus = dev->bus;
179 u8 slot = PCI_SLOT(dev->devfn);
180 u8 func = PCI_FUNC(dev->devfn);
182 pci_conf_write32(bus, slot, func, msi_lower_address_reg(pos),
183 msg->address_lo);
184 if ( entry->msi_attrib.is_64 )
185 {
186 pci_conf_write32(bus, slot, func, msi_upper_address_reg(pos),
187 msg->address_hi);
188 pci_conf_write16(bus, slot, func, msi_data_reg(pos, 1),
189 msg->data);
190 }
191 else
192 pci_conf_write16(bus, slot, func, msi_data_reg(pos, 0),
193 msg->data);
194 break;
195 }
196 case PCI_CAP_ID_MSIX:
197 {
198 void __iomem *base;
199 base = entry->mask_base +
200 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
202 writel(msg->address_lo,
203 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
204 writel(msg->address_hi,
205 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
206 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
207 break;
208 }
209 default:
210 BUG();
211 }
212 entry->msg = *msg;
213 }
215 void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
216 {
217 struct msi_desc *desc = irq_desc[irq].msi_desc;
218 struct msi_msg msg;
219 unsigned int dest;
221 memset(&msg, 0, sizeof(msg));
222 cpus_and(mask, mask, cpu_online_map);
223 if ( cpus_empty(mask) )
224 mask = TARGET_CPUS;
225 dest = cpu_mask_to_apicid(mask);
227 if ( !desc )
228 return;
230 ASSERT(spin_is_locked(&irq_desc[irq].lock));
231 spin_lock(&desc->dev->lock);
232 read_msi_msg(desc, &msg);
234 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
235 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
237 write_msi_msg(desc, &msg);
238 spin_unlock(&desc->dev->lock);
239 }
241 static void msi_set_enable(struct pci_dev *dev, int enable)
242 {
243 int pos;
244 u16 control;
245 u8 bus = dev->bus;
246 u8 slot = PCI_SLOT(dev->devfn);
247 u8 func = PCI_FUNC(dev->devfn);
249 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
250 if ( pos )
251 {
252 control = pci_conf_read16(bus, slot, func, pos + PCI_MSI_FLAGS);
253 control &= ~PCI_MSI_FLAGS_ENABLE;
254 if ( enable )
255 control |= PCI_MSI_FLAGS_ENABLE;
256 pci_conf_write16(bus, slot, func, pos + PCI_MSI_FLAGS, control);
257 }
258 }
260 static void msix_set_enable(struct pci_dev *dev, int enable)
261 {
262 int pos;
263 u16 control;
264 u8 bus = dev->bus;
265 u8 slot = PCI_SLOT(dev->devfn);
266 u8 func = PCI_FUNC(dev->devfn);
268 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
269 if ( pos )
270 {
271 control = pci_conf_read16(bus, slot, func, pos + PCI_MSIX_FLAGS);
272 control &= ~PCI_MSIX_FLAGS_ENABLE;
273 if ( enable )
274 control |= PCI_MSIX_FLAGS_ENABLE;
275 pci_conf_write16(bus, slot, func, pos + PCI_MSIX_FLAGS, control);
276 }
277 }
279 static void msix_flush_writes(unsigned int irq)
280 {
281 struct msi_desc *entry = irq_desc[irq].msi_desc;
283 BUG_ON(!entry || !entry->dev);
284 switch (entry->msi_attrib.type) {
285 case PCI_CAP_ID_MSI:
286 /* nothing to do */
287 break;
288 case PCI_CAP_ID_MSIX:
289 {
290 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
291 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
292 readl(entry->mask_base + offset);
293 break;
294 }
295 default:
296 BUG();
297 break;
298 }
299 }
301 int msi_maskable_irq(const struct msi_desc *entry)
302 {
303 BUG_ON(!entry);
304 return entry->msi_attrib.type != PCI_CAP_ID_MSI
305 || entry->msi_attrib.maskbit;
306 }
308 static void msi_set_mask_bit(unsigned int irq, int flag)
309 {
310 struct msi_desc *entry = irq_desc[irq].msi_desc;
312 ASSERT(spin_is_locked(&irq_desc[irq].lock));
313 BUG_ON(!entry || !entry->dev);
314 switch (entry->msi_attrib.type) {
315 case PCI_CAP_ID_MSI:
316 if (entry->msi_attrib.maskbit) {
317 int pos;
318 u32 mask_bits;
319 u8 bus = entry->dev->bus;
320 u8 slot = PCI_SLOT(entry->dev->devfn);
321 u8 func = PCI_FUNC(entry->dev->devfn);
323 pos = (long)entry->mask_base;
324 mask_bits = pci_conf_read32(bus, slot, func, pos);
325 mask_bits &= ~(1);
326 mask_bits |= flag;
327 pci_conf_write32(bus, slot, func, pos, mask_bits);
328 }
329 break;
330 case PCI_CAP_ID_MSIX:
331 {
332 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
333 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
334 writel(flag, entry->mask_base + offset);
335 readl(entry->mask_base + offset);
336 break;
337 }
338 default:
339 BUG();
340 break;
341 }
342 entry->msi_attrib.masked = !!flag;
343 }
345 void mask_msi_irq(unsigned int irq)
346 {
347 msi_set_mask_bit(irq, 1);
348 msix_flush_writes(irq);
349 }
351 void unmask_msi_irq(unsigned int irq)
352 {
353 msi_set_mask_bit(irq, 0);
354 msix_flush_writes(irq);
355 }
357 static struct msi_desc* alloc_msi_entry(void)
358 {
359 struct msi_desc *entry;
361 entry = xmalloc(struct msi_desc);
362 if ( !entry )
363 return NULL;
365 INIT_LIST_HEAD(&entry->list);
366 entry->dev = NULL;
367 entry->remap_index = -1;
369 return entry;
370 }
372 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
373 {
374 struct msi_msg msg;
376 msi_compose_msg(dev, desc->vector, &msg);
377 set_vector_msi(desc);
378 write_msi_msg(irq_desc[desc->vector].msi_desc, &msg);
380 return 0;
381 }
383 static void teardown_msi_vector(int vector)
384 {
385 unset_vector_msi(vector);
386 }
388 static void msi_free_vector(int vector)
389 {
390 struct msi_desc *entry;
392 ASSERT(spin_is_locked(&irq_desc[vector].lock));
393 entry = irq_desc[vector].msi_desc;
394 teardown_msi_vector(vector);
396 if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
397 {
398 unsigned long start;
400 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
401 * PCI_MSIX_ENTRY_SIZE
402 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
404 start = (unsigned long)entry->mask_base & ~(PAGE_SIZE - 1);
405 msix_fixmap_free(virt_to_fix(start));
406 destroy_xen_mappings(start, start + PAGE_SIZE);
407 }
408 list_del(&entry->list);
409 xfree(entry);
410 }
412 static struct msi_desc *find_msi_entry(struct pci_dev *dev,
413 int vector, int cap_id)
414 {
415 struct msi_desc *entry;
417 list_for_each_entry( entry, &dev->msi_list, list )
418 {
419 if ( entry->msi_attrib.type == cap_id &&
420 (vector == -1 || entry->vector == vector) )
421 return entry;
422 }
424 return NULL;
425 }
427 /**
428 * msi_capability_init - configure device's MSI capability structure
429 * @dev: pointer to the pci_dev data structure of MSI device function
430 *
431 * Setup the MSI capability structure of device function with a single
432 * MSI irq, regardless of device function is capable of handling
433 * multiple messages. A return of zero indicates the successful setup
434 * of an entry zero with the new MSI irq or non-zero for otherwise.
435 **/
436 static int msi_capability_init(struct pci_dev *dev, int vector)
437 {
438 struct msi_desc *entry;
439 int pos, ret;
440 u16 control;
441 u8 bus = dev->bus;
442 u8 slot = PCI_SLOT(dev->devfn);
443 u8 func = PCI_FUNC(dev->devfn);
445 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
446 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
447 /* MSI Entry Initialization */
448 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
450 entry = alloc_msi_entry();
451 if ( !entry )
452 return -ENOMEM;
454 entry->msi_attrib.type = PCI_CAP_ID_MSI;
455 entry->msi_attrib.is_64 = is_64bit_address(control);
456 entry->msi_attrib.entry_nr = 0;
457 entry->msi_attrib.maskbit = is_mask_bit_support(control);
458 entry->msi_attrib.masked = 1;
459 entry->msi_attrib.pos = pos;
460 entry->vector = vector;
461 if ( is_mask_bit_support(control) )
462 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
463 is_64bit_address(control));
464 entry->dev = dev;
465 if ( entry->msi_attrib.maskbit )
466 {
467 unsigned int maskbits, temp;
468 /* All MSIs are unmasked by default, Mask them all */
469 maskbits = pci_conf_read32(bus, slot, func,
470 msi_mask_bits_reg(pos, is_64bit_address(control)));
471 temp = (1 << multi_msi_capable(control));
472 temp = ((temp - 1) & ~temp);
473 maskbits |= temp;
474 pci_conf_write32(bus, slot, func,
475 msi_mask_bits_reg(pos, is_64bit_address(control)),
476 maskbits);
477 }
478 list_add_tail(&entry->list, &dev->msi_list);
480 /* Configure MSI capability structure */
481 ret = setup_msi_irq(dev, entry);
482 if ( ret )
483 {
484 msi_free_vector(vector);
485 return ret;
486 }
488 /* Restore the original MSI enabled bits */
489 pci_conf_write16(bus, slot, func, msi_control_reg(pos), control);
491 return 0;
492 }
494 /**
495 * msix_capability_init - configure device's MSI-X capability
496 * @dev: pointer to the pci_dev data structure of MSI-X device function
497 * @entries: pointer to an array of struct msix_entry entries
498 * @nvec: number of @entries
499 *
500 * Setup the MSI-X capability structure of device function with a
501 * single MSI-X irq. A return of zero indicates the successful setup of
502 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
503 **/
504 static int msix_capability_init(struct pci_dev *dev, struct msi_info *msi)
505 {
506 struct msi_desc *entry;
507 int pos;
508 u16 control;
509 unsigned long phys_addr;
510 u32 table_offset;
511 u8 bir;
512 void __iomem *base;
513 int idx;
514 u8 bus = dev->bus;
515 u8 slot = PCI_SLOT(dev->devfn);
516 u8 func = PCI_FUNC(dev->devfn);
518 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
519 control = pci_conf_read16(bus, slot, func, msix_control_reg(pos));
520 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
522 /* MSI-X Table Initialization */
523 entry = alloc_msi_entry();
524 if ( !entry )
525 return -ENOMEM;
527 /* Request & Map MSI-X table region */
528 table_offset = pci_conf_read32(bus, slot, func, msix_table_offset_reg(pos));
529 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
530 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
531 phys_addr = msi->table_base + table_offset;
532 idx = msix_fixmap_alloc();
533 if ( idx < 0 )
534 {
535 xfree(entry);
536 return -ENOMEM;
537 }
538 set_fixmap_nocache(idx, phys_addr);
539 base = (void *)(fix_to_virt(idx) + (phys_addr & ((1UL << PAGE_SHIFT) - 1)));
541 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
542 entry->msi_attrib.is_64 = 1;
543 entry->msi_attrib.entry_nr = msi->entry_nr;
544 entry->msi_attrib.maskbit = 1;
545 entry->msi_attrib.masked = 1;
546 entry->msi_attrib.pos = pos;
547 entry->vector = msi->vector;
548 entry->dev = dev;
549 entry->mask_base = base;
551 list_add_tail(&entry->list, &dev->msi_list);
553 setup_msi_irq(dev, entry);
555 /* Set MSI-X enabled bits */
556 pci_conf_write16(bus, slot, func, msix_control_reg(pos), control);
558 return 0;
559 }
561 /**
562 * pci_enable_msi - configure device's MSI capability structure
563 * @dev: pointer to the pci_dev data structure of MSI device function
564 *
565 * Setup the MSI capability structure of device function with
566 * a single MSI irq upon its software driver call to request for
567 * MSI mode enabled on its hardware device function. A return of zero
568 * indicates the successful setup of an entry zero with the new MSI
569 * irq or non-zero for otherwise.
570 **/
571 static int __pci_enable_msi(struct msi_info *msi)
572 {
573 int status;
574 struct pci_dev *pdev;
576 pdev = pci_lock_pdev(msi->bus, msi->devfn);
577 if ( !pdev )
578 return -ENODEV;
580 if ( find_msi_entry(pdev, msi->vector, PCI_CAP_ID_MSI) )
581 {
582 spin_unlock(&pdev->lock);
583 dprintk(XENLOG_WARNING, "vector %d has already mapped to MSI on "
584 "device %02x:%02x.%01x.\n", msi->vector, msi->bus,
585 PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
586 return 0;
587 }
589 status = msi_capability_init(pdev, msi->vector);
590 spin_unlock(&pdev->lock);
591 return status;
592 }
594 static void __pci_disable_msi(int vector)
595 {
596 struct msi_desc *entry;
597 struct pci_dev *dev;
598 int pos;
599 u16 control;
600 u8 bus, slot, func;
602 entry = irq_desc[vector].msi_desc;
603 if ( !entry )
604 return;
605 /*
606 * Lock here is safe. msi_desc can not be removed without holding
607 * both irq_desc[].lock (which we do) and pdev->lock.
608 */
609 spin_lock(&entry->dev->lock);
610 dev = entry->dev;
611 bus = dev->bus;
612 slot = PCI_SLOT(dev->devfn);
613 func = PCI_FUNC(dev->devfn);
615 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
616 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
617 msi_set_enable(dev, 0);
619 BUG_ON(list_empty(&dev->msi_list));
621 msi_free_vector(vector);
623 pci_conf_write16(bus, slot, func, msi_control_reg(pos), control);
624 spin_unlock(&dev->lock);
625 }
627 /**
628 * pci_enable_msix - configure device's MSI-X capability structure
629 * @dev: pointer to the pci_dev data structure of MSI-X device function
630 * @entries: pointer to an array of MSI-X entries
631 * @nvec: number of MSI-X irqs requested for allocation by device driver
632 *
633 * Setup the MSI-X capability structure of device function with the number
634 * of requested irqs upon its software driver call to request for
635 * MSI-X mode enabled on its hardware device function. A return of zero
636 * indicates the successful configuration of MSI-X capability structure
637 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
638 * Or a return of > 0 indicates that driver request is exceeding the number
639 * of irqs available. Driver should use the returned value to re-send
640 * its request.
641 **/
642 static int __pci_enable_msix(struct msi_info *msi)
643 {
644 int status, pos, nr_entries;
645 struct pci_dev *pdev;
646 u16 control;
647 u8 slot = PCI_SLOT(msi->devfn);
648 u8 func = PCI_FUNC(msi->devfn);
650 pdev = pci_lock_pdev(msi->bus, msi->devfn);
651 if ( !pdev )
652 return -ENODEV;
654 pos = pci_find_cap_offset(msi->bus, slot, func, PCI_CAP_ID_MSIX);
655 control = pci_conf_read16(msi->bus, slot, func, msi_control_reg(pos));
656 nr_entries = multi_msix_capable(control);
657 if (msi->entry_nr >= nr_entries)
658 {
659 spin_unlock(&pdev->lock);
660 return -EINVAL;
661 }
663 if ( find_msi_entry(pdev, msi->vector, PCI_CAP_ID_MSIX) )
664 {
665 spin_unlock(&pdev->lock);
666 dprintk(XENLOG_WARNING, "vector %d has already mapped to MSIX on "
667 "device %02x:%02x.%01x.\n", msi->vector, msi->bus,
668 PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
669 return 0;
670 }
672 status = msix_capability_init(pdev, msi);
673 spin_unlock(&pdev->lock);
674 return status;
675 }
677 static void __pci_disable_msix(int vector)
678 {
679 struct msi_desc *entry;
680 struct pci_dev *dev;
681 int pos;
682 u16 control;
683 u8 bus, slot, func;
685 entry = irq_desc[vector].msi_desc;
686 if ( !entry )
687 return;
688 /*
689 * Lock here is safe. msi_desc can not be removed without holding
690 * both irq_desc[].lock (which we do) and pdev->lock.
691 */
692 spin_lock(&entry->dev->lock);
693 dev = entry->dev;
694 bus = dev->bus;
695 slot = PCI_SLOT(dev->devfn);
696 func = PCI_FUNC(dev->devfn);
698 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
699 control = pci_conf_read16(bus, slot, func, msix_control_reg(pos));
700 msi_set_enable(dev, 0);
702 BUG_ON(list_empty(&dev->msi_list));
704 msi_free_vector(vector);
706 pci_conf_write16(bus, slot, func, msix_control_reg(pos), control);
707 spin_unlock(&dev->lock);
708 }
710 int pci_enable_msi(struct msi_info *msi)
711 {
712 ASSERT(spin_is_locked(&irq_desc[msi->vector].lock));
714 return msi->table_base ? __pci_enable_msix(msi) :
715 __pci_enable_msi(msi);
716 }
718 void pci_disable_msi(int vector)
719 {
720 irq_desc_t *desc = &irq_desc[vector];
721 ASSERT(spin_is_locked(&desc->lock));
722 if ( !desc->msi_desc )
723 return;
725 if ( desc->msi_desc->msi_attrib.type == PCI_CAP_ID_MSI )
726 __pci_disable_msi(vector);
727 else if ( desc->msi_desc->msi_attrib.type == PCI_CAP_ID_MSIX )
728 __pci_disable_msix(vector);
729 }
731 static void msi_free_vectors(struct pci_dev* dev)
732 {
733 struct msi_desc *entry, *tmp;
734 irq_desc_t *desc;
735 unsigned long flags;
737 retry:
738 list_for_each_entry_safe( entry, tmp, &dev->msi_list, list )
739 {
740 desc = &irq_desc[entry->vector];
742 local_irq_save(flags);
743 if ( !spin_trylock(&desc->lock) )
744 {
745 local_irq_restore(flags);
746 goto retry;
747 }
749 if ( desc->handler == &pci_msi_type )
750 {
751 /* MSI is not shared, so should be released already */
752 BUG_ON(desc->status & IRQ_GUEST);
753 desc->handler = &no_irq_type;
754 }
756 msi_free_vector(entry->vector);
757 spin_unlock_irqrestore(&desc->lock, flags);
758 }
759 }
761 void pci_cleanup_msi(struct pci_dev *pdev)
762 {
763 /* Disable MSI and/or MSI-X */
764 msi_set_enable(pdev, 0);
765 msix_set_enable(pdev, 0);
766 msi_free_vectors(pdev);
767 }