ch->msi.msi_attrib.masked = 1;
}
-static void hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
+static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
{
ch->msi.msg = *msg;
+
if ( iommu_intremap )
- iommu_update_ire_from_msi(&ch->msi, msg);
+ {
+ int rc = iommu_update_ire_from_msi(&ch->msi, msg);
+
+ if ( rc )
+ return rc;
+ }
+
hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
+
+ return 0;
}
static void __maybe_unused
.set_affinity = hpet_msi_set_affinity,
};
-static void __hpet_setup_msi_irq(struct irq_desc *desc)
+static int __hpet_setup_msi_irq(struct irq_desc *desc)
{
struct msi_msg msg;
msi_compose_msg(desc, &msg);
- hpet_msi_write(desc->action->dev_id, &msg);
+ return hpet_msi_write(desc->action->dev_id, &msg);
}
static int __init hpet_setup_msi_irq(struct hpet_event_channel *ch)
desc->handler = &hpet_msi_type;
ret = request_irq(ch->msi.irq, hpet_interrupt_handler, 0, "HPET", ch);
+ if ( ret >= 0 )
+ ret = __hpet_setup_msi_irq(desc);
if ( ret < 0 )
{
if ( iommu_intremap )
return ret;
}
- __hpet_setup_msi_irq(desc);
desc->msi_desc = &ch->msi;
return 0;
if ( desc->handler != &no_irq_type )
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
- setup_msi_handler(desc, msi_desc);
+
+ ret = setup_msi_irq(desc, msi_desc);
+ if ( ret )
+ {
+ spin_unlock_irqrestore(&desc->lock, flags);
+ pci_disable_msi(msi_desc);
+ goto done;
+ }
if ( opt_irq_vector_map == OPT_IRQ_VECTOR_MAP_PERDEV
&& !desc->arch.used_vectors )
}
set_domain_irq_pirq(d, irq, info);
- setup_msi_irq(desc);
spin_unlock_irqrestore(&desc->lock, flags);
}
else
iommu_read_msi_from_ire(entry, msg);
}
-static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
entry->msg = *msg;
if ( iommu_intremap )
{
+ int rc;
+
ASSERT(msg != &entry->msg);
- iommu_update_ire_from_msi(entry, msg);
+ rc = iommu_update_ire_from_msi(entry, msg);
+ if ( rc )
+ return rc;
}
switch ( entry->msi_attrib.type )
default:
BUG();
}
+
+ return 0;
}
void set_msi_affinity(struct irq_desc *desc, const cpumask_t *mask)
return entry;
}
-void setup_msi_handler(struct irq_desc *desc, struct msi_desc *msidesc)
+int setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc)
{
+ struct msi_msg msg;
+
desc->msi_desc = msidesc;
desc->handler = msi_maskable_irq(msidesc) ? &pci_msi_maskable
: &pci_msi_nonmaskable;
-}
-
-void setup_msi_irq(struct irq_desc *desc)
-{
- struct msi_msg msg;
-
msi_compose_msg(desc, &msg);
- write_msi_msg(desc->msi_desc, &msg);
+ return write_msi_msg(msidesc, &msg);
}
int msi_free_irq(struct msi_desc *entry)
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <xen/err.h>
#include <xen/sched.h>
#include <xen/hvm/iommu.h>
#include <asm/amd-iommu.h>
}
}
-void amd_iommu_msi_msg_update_ire(
+static struct amd_iommu *_find_iommu_for_device(int seg, int bdf)
+{
+ struct amd_iommu *iommu = find_iommu_for_device(seg, bdf);
+
+ if ( iommu )
+ return iommu;
+
+ list_for_each_entry ( iommu, &amd_iommu_head, list )
+ if ( iommu->seg == seg && iommu->bdf == bdf )
+ return NULL;
+
+ AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %04x:%02x:%02x.%u\n",
+ seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf));
+ return ERR_PTR(-EINVAL);
+}
+
+int amd_iommu_msi_msg_update_ire(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
struct pci_dev *pdev = msi_desc->dev;
int bdf, seg;
struct amd_iommu *iommu;
- if ( !iommu_intremap )
- return;
-
bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
seg = pdev ? pdev->seg : hpet_sbdf.seg;
- iommu = find_iommu_for_device(seg, bdf);
- if ( !iommu )
- {
- AMD_IOMMU_DEBUG("Fail to find iommu for MSI device id = %#x\n", bdf);
- return;
- }
+ iommu = _find_iommu_for_device(seg, bdf);
+ if ( IS_ERR_OR_NULL(iommu) )
+ return PTR_ERR(iommu);
if ( msi_desc->remap_index >= 0 )
{
}
if ( !msg )
- return;
+ return 0;
do {
update_intremap_entry_from_msi_msg(iommu, bdf, &msi_desc->remap_index,
break;
bdf += pdev->phantom_stride;
} while ( PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
+
+ return 0;
}
void amd_iommu_read_msi_from_ire(
const struct iommu_ops *ops = iommu_get_ops();
ops->update_ire_from_apic(apic, reg, value);
}
-void iommu_update_ire_from_msi(
+
+int iommu_update_ire_from_msi(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
const struct iommu_ops *ops = iommu_get_ops();
- ops->update_ire_from_msi(msi_desc, msg);
+ return iommu_intremap ? ops->update_ire_from_msi(msi_desc, msg) : 0;
}
void iommu_read_msi_from_ire(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
const struct iommu_ops *ops = iommu_get_ops();
- ops->read_msi_from_ire(msi_desc, msg);
+ if ( iommu_intremap )
+ ops->read_msi_from_ire(msi_desc, msg);
}
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
struct msi_desc;
struct msi_msg;
void msi_msg_read_remap_rte(struct msi_desc *, struct msi_msg *);
-void msi_msg_write_remap_rte(struct msi_desc *, struct msi_msg *);
+int msi_msg_write_remap_rte(struct msi_desc *, struct msi_msg *);
int intel_setup_hpet_msi(struct msi_desc *);
remap_entry_to_msi_msg(drhd->iommu, msg);
}
-void msi_msg_write_remap_rte(
+int msi_msg_write_remap_rte(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
struct pci_dev *pdev = msi_desc->dev;
drhd = pdev ? acpi_find_matched_drhd_unit(pdev)
: hpet_to_drhd(msi_desc->hpet_id);
- if ( drhd )
- msi_msg_to_remap_entry(drhd->iommu, pdev, msi_desc, msg);
+ return drhd ? msi_msg_to_remap_entry(drhd->iommu, pdev, msi_desc, msg)
+ : -EINVAL;
}
int __init intel_setup_hpet_msi(struct msi_desc *msi_desc)
int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
void amd_iommu_ioapic_update_ire(
unsigned int apic, unsigned int reg, unsigned int value);
-void amd_iommu_msi_msg_update_ire(
+int amd_iommu_msi_msg_update_ire(
struct msi_desc *msi_desc, struct msi_msg *msg);
void amd_iommu_read_msi_from_ire(
struct msi_desc *msi_desc, struct msi_msg *msg);
extern void pci_disable_msi(struct msi_desc *desc);
extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool_t off);
extern void pci_cleanup_msi(struct pci_dev *pdev);
-extern void setup_msi_handler(struct irq_desc *, struct msi_desc *);
-extern void setup_msi_irq(struct irq_desc *);
+extern int setup_msi_irq(struct irq_desc *, struct msi_desc *);
extern void teardown_msi_irq(int irq);
extern int msi_free_vector(struct msi_desc *entry);
extern int pci_restore_msi_state(struct pci_dev *pdev);
u8 devfn, struct pci_dev *);
int (*get_device_group_id)(u16 seg, u8 bus, u8 devfn);
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
- void (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg *msg);
+ int (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg *msg);
void (*read_msi_from_ire)(struct msi_desc *msi_desc, struct msi_msg *msg);
unsigned int (*read_apic_from_ire)(unsigned int apic, unsigned int reg);
int (*setup_hpet_msi)(struct msi_desc *);
};
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
-void iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg);
+int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg);
void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg);
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
int iommu_setup_hpet_msi(struct msi_desc *);