#include <xen/errno.h>
#include <xen/sched.h>
#include <xen/irq.h>
+#include <xen/vpci.h>
#include <public/hvm/ioreq.h>
#include <asm/hvm/io.h>
#include <asm/hvm/vpic.h>
if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY )
gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n");
}
+
+static unsigned int msi_gflags(uint16_t data, uint64_t addr)
+{
+ /*
+ * We need to use the DOMCTL constants here because the output of this
+ * function is used as input to pt_irq_create_bind, which also takes the
+ * input from the DOMCTL itself.
+ */
+ return MASK_INSR(MASK_EXTR(addr, MSI_ADDR_DEST_ID_MASK),
+ XEN_DOMCTL_VMSI_X86_DEST_ID_MASK) |
+ MASK_INSR(MASK_EXTR(addr, MSI_ADDR_REDIRECTION_MASK),
+ XEN_DOMCTL_VMSI_X86_RH_MASK) |
+ MASK_INSR(MASK_EXTR(addr, MSI_ADDR_DESTMODE_MASK),
+ XEN_DOMCTL_VMSI_X86_DM_MASK) |
+ MASK_INSR(MASK_EXTR(data, MSI_DATA_DELIVERY_MODE_MASK),
+ XEN_DOMCTL_VMSI_X86_DELIV_MASK) |
+ MASK_INSR(MASK_EXTR(data, MSI_DATA_TRIGGER_MASK),
+ XEN_DOMCTL_VMSI_X86_TRIG_MASK);
+}
+
+void vpci_msi_arch_mask(struct vpci_msi *msi, const struct pci_dev *pdev,
+ unsigned int entry, bool mask)
+{
+ const struct pirq *pinfo;
+ struct irq_desc *desc;
+ unsigned long flags;
+ int irq;
+
+ ASSERT(msi->arch.pirq >= 0 && entry < msi->vectors);
+ pinfo = pirq_info(pdev->domain, msi->arch.pirq + entry);
+ if ( !pinfo )
+ return;
+
+ irq = pinfo->arch.irq;
+ if ( irq >= nr_irqs || irq < 0)
+ return;
+
+ desc = irq_to_desc(irq);
+ if ( !desc )
+ return;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ guest_mask_msi_irq(desc, mask);
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+int vpci_msi_arch_enable(struct vpci_msi *msi, const struct pci_dev *pdev,
+ unsigned int vectors)
+{
+ struct msi_info msi_info = {
+ .seg = pdev->seg,
+ .bus = pdev->bus,
+ .devfn = pdev->devfn,
+ .entry_nr = vectors,
+ };
+ unsigned int i;
+ int rc;
+
+ ASSERT(msi->arch.pirq == INVALID_PIRQ);
+
+ /* Get a PIRQ. */
+ rc = allocate_and_map_msi_pirq(pdev->domain, -1, &msi->arch.pirq,
+ MAP_PIRQ_TYPE_MULTI_MSI, &msi_info);
+ if ( rc )
+ {
+ gdprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: %d\n",
+ pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), rc);
+ return rc;
+ }
+
+ for ( i = 0; i < vectors; i++ )
+ {
+ uint8_t vector = MASK_EXTR(msi->data, MSI_DATA_VECTOR_MASK);
+ uint8_t vector_mask = 0xff >> (8 - fls(msi->vectors) + 1);
+ xen_domctl_bind_pt_irq_t bind = {
+ .machine_irq = msi->arch.pirq + i,
+ .irq_type = PT_IRQ_TYPE_MSI,
+ .u.msi.gvec = (vector & ~vector_mask) |
+ ((vector + i) & vector_mask),
+ .u.msi.gflags = msi_gflags(msi->data, msi->address),
+ };
+
+ pcidevs_lock();
+ rc = pt_irq_create_bind(pdev->domain, &bind);
+ if ( rc )
+ {
+ gdprintk(XENLOG_ERR,
+ "%04x:%02x:%02x.%u: failed to bind PIRQ %u: %d\n",
+ pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), msi->arch.pirq + i, rc);
+ while ( bind.machine_irq-- )
+ pt_irq_destroy_bind(pdev->domain, &bind);
+ spin_lock(&pdev->domain->event_lock);
+ unmap_domain_pirq(pdev->domain, msi->arch.pirq);
+ spin_unlock(&pdev->domain->event_lock);
+ pcidevs_unlock();
+ msi->arch.pirq = INVALID_PIRQ;
+ return rc;
+ }
+ pcidevs_unlock();
+ }
+
+ return 0;
+}
+
+int vpci_msi_arch_disable(struct vpci_msi *msi, const struct pci_dev *pdev)
+{
+ unsigned int i;
+
+ ASSERT(msi->arch.pirq != INVALID_PIRQ);
+
+ pcidevs_lock();
+ for ( i = 0; i < msi->vectors; i++ )
+ {
+ xen_domctl_bind_pt_irq_t bind = {
+ .machine_irq = msi->arch.pirq + i,
+ .irq_type = PT_IRQ_TYPE_MSI,
+ };
+ int rc;
+
+ rc = pt_irq_destroy_bind(pdev->domain, &bind);
+ ASSERT(!rc);
+ }
+
+ spin_lock(&pdev->domain->event_lock);
+ unmap_domain_pirq(pdev->domain, msi->arch.pirq);
+ spin_unlock(&pdev->domain->event_lock);
+ pcidevs_unlock();
+
+ msi->arch.pirq = INVALID_PIRQ;
+
+ return 0;
+}
+
+void vpci_msi_arch_init(struct vpci_msi *msi)
+{
+ msi->arch.pirq = INVALID_PIRQ;
+}
+
+void vpci_msi_arch_print(const struct vpci_msi *msi)
+{
+ printk("vec=%#02x%7s%6s%3sassert%5s%7s dest_id=%lu pirq: %d\n",
+ MASK_EXTR(msi->data, MSI_DATA_VECTOR_MASK),
+ msi->data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
+ msi->data & MSI_DATA_TRIGGER_LEVEL ? "level" : "edge",
+ msi->data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
+ msi->address & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
+ msi->address & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "fixed",
+ MASK_EXTR(msi->address, MSI_ADDR_DEST_ID_MASK),
+ msi->arch.pirq);
+}
--- /dev/null
+/*
+ * Handlers for accesses to the MSI capability structure.
+ *
+ * Copyright (C) 2017 Citrix Systems R&D
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms and conditions of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/sched.h>
+#include <xen/softirq.h>
+#include <xen/vpci.h>
+
+#include <asm/msi.h>
+
+static uint32_t vpci_msi_control_read(const struct pci_dev *pdev,
+ unsigned int reg, void *data)
+{
+ const struct vpci_msi *msi = data;
+ uint16_t val;
+
+ /* Set the number of supported/configured messages. */
+ val = MASK_INSR(fls(msi->max_vectors) - 1, PCI_MSI_FLAGS_QMASK);
+ val |= MASK_INSR(fls(msi->vectors) - 1, PCI_MSI_FLAGS_QSIZE);
+
+ val |= msi->enabled ? PCI_MSI_FLAGS_ENABLE : 0;
+ val |= msi->masking ? PCI_MSI_FLAGS_MASKBIT : 0;
+ val |= msi->address64 ? PCI_MSI_FLAGS_64BIT : 0;
+
+ return val;
+}
+
+static void vpci_msi_enable(const struct pci_dev *pdev, struct vpci_msi *msi,
+ unsigned int vectors)
+{
+ int ret;
+
+ ASSERT(!msi->enabled);
+ ret = vpci_msi_arch_enable(msi, pdev, vectors);
+ if ( ret )
+ return;
+
+ /* Apply the mask bits. */
+ if ( msi->masking )
+ {
+ unsigned int i;
+ uint32_t mask = msi->mask;
+
+ for ( i = ffs(mask) - 1; mask && i < vectors; i = ffs(mask) - 1 )
+ {
+ vpci_msi_arch_mask(msi, pdev, i, true);
+ __clear_bit(i, &mask);
+ }
+ }
+
+ __msi_set_enable(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), msi->pos, 1);
+
+ msi->enabled = true;
+}
+
+static int vpci_msi_disable(const struct pci_dev *pdev, struct vpci_msi *msi)
+{
+ int ret;
+
+ ASSERT(msi->enabled);
+ __msi_set_enable(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), msi->pos, 0);
+
+ ret = vpci_msi_arch_disable(msi, pdev);
+ if ( !ret )
+ msi->enabled = false;
+
+ return ret;
+}
+
+static void vpci_msi_control_write(const struct pci_dev *pdev,
+ unsigned int reg, uint32_t val, void *data)
+{
+ struct vpci_msi *msi = data;
+ unsigned int vectors = 1 << MASK_EXTR(val, PCI_MSI_FLAGS_QSIZE);
+ bool new_enabled = val & PCI_MSI_FLAGS_ENABLE;
+
+ if ( vectors > msi->max_vectors )
+ vectors = msi->max_vectors;
+
+ /*
+ * No change if the enable field and the number of vectors is
+ * the same or the device is not enabled, in which case the
+ * vectors field can be updated directly.
+ */
+ if ( new_enabled == msi->enabled &&
+ (vectors == msi->vectors || !msi->enabled) )
+ {
+ msi->vectors = vectors;
+ return;
+ }
+
+ if ( new_enabled )
+ {
+ /*
+ * If the device is already enabled it means the number of
+ * enabled messages has changed. Disable and re-enable the
+ * device in order to apply the change.
+ */
+ if ( msi->enabled && vpci_msi_disable(pdev, msi) )
+ /*
+ * Somehow Xen has not been able to disable the
+ * configured MSI messages, leave the device state as-is,
+ * so that the guest can try to disable MSI again.
+ */
+ return;
+
+ vpci_msi_enable(pdev, msi, vectors);
+ }
+ else
+ vpci_msi_disable(pdev, msi);
+
+ msi->vectors = vectors;
+}
+
+/* Handlers for the address field (32bit or low part of a 64bit address). */
+static uint32_t vpci_msi_address_read(const struct pci_dev *pdev,
+ unsigned int reg, void *data)
+{
+ const struct vpci_msi *msi = data;
+
+ return msi->address;
+}
+
+static void vpci_msi_address_write(const struct pci_dev *pdev,
+ unsigned int reg, uint32_t val, void *data)
+{
+ struct vpci_msi *msi = data;
+
+ /* Clear low part. */
+ msi->address &= ~0xffffffffull;
+ msi->address |= val;
+}
+
+/* Handlers for the high part of a 64bit address field. */
+static uint32_t vpci_msi_address_upper_read(const struct pci_dev *pdev,
+ unsigned int reg, void *data)
+{
+ const struct vpci_msi *msi = data;
+
+ return msi->address >> 32;
+}
+
+static void vpci_msi_address_upper_write(const struct pci_dev *pdev,
+ unsigned int reg, uint32_t val,
+ void *data)
+{
+ struct vpci_msi *msi = data;
+
+ /* Clear high part. */
+ msi->address &= 0xffffffff;
+ msi->address |= (uint64_t)val << 32;
+}
+
+/* Handlers for the data field. */
+static uint32_t vpci_msi_data_read(const struct pci_dev *pdev,
+ unsigned int reg, void *data)
+{
+ const struct vpci_msi *msi = data;
+
+ return msi->data;
+}
+
+static void vpci_msi_data_write(const struct pci_dev *pdev, unsigned int reg,
+ uint32_t val, void *data)
+{
+ struct vpci_msi *msi = data;
+
+ msi->data = val;
+}
+
+/* Handlers for the MSI mask bits. */
+static uint32_t vpci_msi_mask_read(const struct pci_dev *pdev,
+ unsigned int reg, void *data)
+{
+ const struct vpci_msi *msi = data;
+
+ return msi->mask;
+}
+
+static void vpci_msi_mask_write(const struct pci_dev *pdev, unsigned int reg,
+ uint32_t val, void *data)
+{
+ struct vpci_msi *msi = data;
+ uint32_t dmask;
+
+ dmask = msi->mask ^ val;
+
+ if ( !dmask )
+ return;
+
+ if ( msi->enabled )
+ {
+ unsigned int i;
+
+ for ( i = ffs(dmask) - 1; dmask && i < msi->vectors;
+ i = ffs(dmask) - 1 )
+ {
+ vpci_msi_arch_mask(msi, pdev, i, (val >> i) & 1);
+ __clear_bit(i, &dmask);
+ }
+ }
+
+ msi->mask = val;
+}
+
+static int vpci_init_msi(struct pci_dev *pdev)
+{
+ uint8_t seg = pdev->seg, bus = pdev->bus;
+ uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
+ struct vpci_msi *msi;
+ unsigned int pos;
+ uint16_t control;
+ int ret;
+
+ pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
+ if ( !pos )
+ return 0;
+
+ msi = xzalloc(struct vpci_msi);
+ if ( !msi )
+ return -ENOMEM;
+
+ msi->pos = pos;
+
+ ret = vpci_add_register(pdev, vpci_msi_control_read,
+ vpci_msi_control_write,
+ msi_control_reg(pos), 2, msi);
+ if ( ret )
+ {
+ xfree(msi);
+ return ret;
+ }
+
+ /* Get the maximum number of vectors the device supports. */
+ control = pci_conf_read16(seg, bus, slot, func, msi_control_reg(pos));
+ msi->max_vectors = multi_msi_capable(control);
+ ASSERT(msi->max_vectors <= 32);
+
+ /* The multiple message enable is 0 after reset (1 message enabled). */
+ msi->vectors = 1;
+
+ /* No PIRQ bound yet. */
+ vpci_msi_arch_init(msi);
+
+ msi->address64 = is_64bit_address(control);
+ msi->masking = is_mask_bit_support(control);
+
+ ret = vpci_add_register(pdev, vpci_msi_address_read,
+ vpci_msi_address_write,
+ msi_lower_address_reg(pos), 4, msi);
+ if ( ret )
+ {
+ xfree(msi);
+ return ret;
+ }
+
+ ret = vpci_add_register(pdev, vpci_msi_data_read, vpci_msi_data_write,
+ msi_data_reg(pos, msi->address64), 2,
+ msi);
+ if ( ret )
+ {
+ xfree(msi);
+ return ret;
+ }
+
+ if ( msi->address64 )
+ {
+ ret = vpci_add_register(pdev, vpci_msi_address_upper_read,
+ vpci_msi_address_upper_write,
+ msi_upper_address_reg(pos), 4, msi);
+ if ( ret )
+ {
+ xfree(msi);
+ return ret;
+ }
+ }
+
+ if ( msi->masking )
+ {
+ ret = vpci_add_register(pdev, vpci_msi_mask_read, vpci_msi_mask_write,
+ msi_mask_bits_reg(pos, msi->address64), 4,
+ msi);
+ if ( ret )
+ {
+ xfree(msi);
+ return ret;
+ }
+ }
+
+ pdev->vpci->msi = msi;
+
+ return 0;
+}
+REGISTER_VPCI_INIT(vpci_init_msi);
+
+void vpci_dump_msi(void)
+{
+ struct domain *d;
+
+ for_each_domain ( d )
+ {
+ const struct pci_dev *pdev;
+
+ if ( !has_vpci(d) )
+ continue;
+
+ printk("vPCI MSI information for d%d\n", d->domain_id);
+
+ list_for_each_entry ( pdev, &d->arch.pdev_list, domain_list )
+ {
+ uint8_t seg = pdev->seg, bus = pdev->bus;
+ uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
+ const struct vpci_msi *msi = pdev->vpci->msi;
+
+ if ( !spin_trylock(&pdev->vpci->lock) )
+ {
+ printk("Unable to get vPCI lock, skipping\n");
+ continue;
+ }
+
+ if ( msi )
+ {
+ printk("Device %04x:%02x:%02x.%u\n", seg, bus, slot, func);
+
+ printk(" Enabled: %u Supports masking: %u 64-bit addresses: %u\n",
+ msi->enabled, msi->masking, msi->address64);
+ printk(" Max vectors: %u enabled vectors: %u\n",
+ msi->max_vectors, msi->vectors);
+
+ vpci_msi_arch_print(msi);
+
+ if ( msi->masking )
+ printk(" mask=%08x\n", msi->mask);
+ }
+
+ spin_unlock(&pdev->vpci->lock);
+ process_pending_softirqs();
+ }
+ }
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */