*/
x2apic_enabled = (current_local_apic_mode() == APIC_MODE_X2APIC);
+ if ( pcidevs_trylock() )
+ {
+ /*
+ * Assume the PCI device list to be in a consistent state if the
+ * lock is not held when the crash happened.
+ */
+ pci_disable_msi_all();
+ pcidevs_unlock();
+ }
+
disable_IO_APIC();
hpet_disable();
}
extern void pci_disable_msi(struct msi_desc *msi_desc);
extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool off);
extern void pci_cleanup_msi(struct pci_dev *pdev);
+extern void pci_disable_msi_all(void);
extern int setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc);
extern int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc,
hw_irq_controller *handler);
msi_free_irqs(pdev);
}
+static int cf_check disable_msi(struct pci_dev *pdev, void *arg)
+{
+ msi_set_enable(pdev, 0);
+ msix_set_enable(pdev, 0);
+
+ return 0;
+}
+
+/* Disable MSI and/or MSI-X on all devices known by Xen. */
+void pci_disable_msi_all(void)
+{
+ int rc = pci_iterate_devices(disable_msi, NULL);
+
+ if ( rc )
+ printk(XENLOG_ERR
+ "Failed to disable MSI(-X) on some devices: %d\n", rc);
+}
+
int pci_reset_msix_state(struct pci_dev *pdev)
{
unsigned int pos = pdev->msix_pos;
smp_call_function(stop_this_cpu, &stop_aps, 0);
local_irq_disable();
+ pci_disable_msi_all();
disable_IO_APIC();
hpet_disable();
return rspin_is_locked(&_pcidevs_lock);
}
+bool pcidevs_trylock_unsafe(void)
+{
+ return _rspin_trylock(&_pcidevs_lock);
+}
+
static RADIX_TREE(pci_segments);
static inline struct pci_seg *get_pseg(u16 seg)
return ret;
}
+struct segment_iter {
+ int (*handler)(struct pci_dev *pdev, void *arg);
+ void *arg;
+ int rc;
+};
+
+static int cf_check iterate_all(struct pci_seg *pseg, void *arg)
+{
+ struct segment_iter *iter = arg;
+ struct pci_dev *pdev;
+
+ list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
+ {
+ int rc = iter->handler(pdev, iter->arg);
+
+ if ( !iter->rc )
+ iter->rc = rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Iterate without locking or preemption over all PCI devices known by Xen.
+ * Can be called with interrupts disabled.
+ */
+int pci_iterate_devices(int (*handler)(struct pci_dev *pdev, void *arg),
+ void *arg)
+{
+ struct segment_iter iter = {
+ .handler = handler,
+ .arg = arg,
+ };
+
+ return pci_segments_iterate(iterate_all, &iter) ?: iter.rc;
+}
+
/*
* Local variables:
* mode: C
}
void pcidevs_unlock(void);
bool __must_check pcidevs_locked(void);
+bool pcidevs_trylock_unsafe(void);
+static always_inline bool pcidevs_trylock(void)
+{
+ return lock_evaluate_nospec(pcidevs_trylock_unsafe());
+}
#ifndef NDEBUG
/*
struct pci_dev *pci_get_real_pdev(pci_sbdf_t sbdf);
void pci_check_disable_device(u16 seg, u8 bus, u8 devfn);
+/*
+ * Iterate without locking or preemption over all PCI devices known by Xen.
+ * Can be called with interrupts disabled.
+ */
+int pci_iterate_devices(int (*handler)(struct pci_dev *pdev, void *arg),
+ void *arg);
+
uint8_t pci_conf_read8(pci_sbdf_t sbdf, unsigned int reg);
uint16_t pci_conf_read16(pci_sbdf_t sbdf, unsigned int reg);
uint32_t pci_conf_read32(pci_sbdf_t sbdf, unsigned int reg);