return 1;
}
+int iov_adjust_irq_affinities(void)
+{
+ const struct amd_iommu *iommu;
+
+ if ( !iommu_enabled )
+ return 0;
+
+ for_each_amd_iommu ( iommu )
+ {
+ struct irq_desc *desc = irq_to_desc(iommu->msi.irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if ( iommu->ctrl.int_cap_xt_en )
+ set_x2apic_affinity(desc, NULL);
+ else
+ set_msi_affinity(desc, NULL);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+__initcall(iov_adjust_irq_affinities);
+
/*
* Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
* Workaround:
IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log");
}
-static int __init amd_iommu_init_one(struct amd_iommu *iommu)
+static int __init amd_iommu_init_one(struct amd_iommu *iommu, bool intr)
{
if ( allocate_cmd_buffer(iommu) == NULL )
goto error_out;
if ( iommu->features.flds.ppr_sup && !allocate_ppr_log(iommu) )
goto error_out;
- if ( !set_iommu_interrupt_handler(iommu) )
+ if ( intr && !set_iommu_interrupt_handler(iommu) )
goto error_out;
/* To make sure that device_table.buffer has been successfully allocated */
list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
{
list_del(&iommu->list);
+
+ iommu->ctrl.ga_en = 0;
+ iommu->ctrl.xt_en = 0;
+ iommu->ctrl.int_cap_xt_en = 0;
+
if ( iommu->enabled )
disable_iommu(iommu);
+ else if ( iommu->mmio_base )
+ writeq(iommu->ctrl.raw,
+ iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
deallocate_ring_buffer(&iommu->cmd_buffer);
deallocate_ring_buffer(&iommu->event_log);
return 0;
}
-int __init amd_iommu_init(void)
+int __init amd_iommu_prepare(bool xt)
{
struct amd_iommu *iommu;
int rc = -ENODEV;
if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) )
goto error_out;
+ /* Have we been here before? */
+ if ( ivhd_type )
+ return 0;
+
rc = amd_iommu_get_supported_ivhd_type();
if ( rc < 0 )
goto error_out;
+ BUG_ON(!rc);
ivhd_type = rc;
rc = amd_iommu_get_ivrs_dev_entries();
rc = amd_iommu_prepare_one(iommu);
if ( rc )
goto error_out;
+
+ rc = -ENODEV;
+ if ( xt && (!iommu->features.flds.ga_sup || !iommu->features.flds.xt_sup) )
+ goto error_out;
+ }
+
+ for_each_amd_iommu ( iommu )
+ {
+ /* NB: There's no need to actually write these out right here. */
+ iommu->ctrl.ga_en |= xt;
+ iommu->ctrl.xt_en = xt;
+ iommu->ctrl.int_cap_xt_en = xt;
}
rc = amd_iommu_update_ivrs_mapping_acpi();
+
+ error_out:
+ if ( rc )
+ {
+ amd_iommu_init_cleanup();
+ ivhd_type = 0;
+ }
+
+ return rc;
+}
+
+int __init amd_iommu_init(bool xt)
+{
+ struct amd_iommu *iommu;
+ int rc = amd_iommu_prepare(xt);
+
if ( rc )
goto error_out;
/* per iommu initialization */
for_each_amd_iommu ( iommu )
{
- rc = amd_iommu_init_one(iommu);
+ /*
+ * Setting up of the IOMMU interrupts cannot occur yet at the (very
+ * early) time we get here when enabling x2APIC mode. Suppress it
+ * here, and do it explicitly in amd_iommu_init_interrupt().
+ */
+ rc = amd_iommu_init_one(iommu, !xt);
if ( rc )
goto error_out;
}
return rc;
}
+int __init amd_iommu_init_interrupt(void)
+{
+ struct amd_iommu *iommu;
+ int rc = 0;
+
+ for_each_amd_iommu ( iommu )
+ {
+ struct irq_desc *desc;
+
+ if ( !set_iommu_interrupt_handler(iommu) )
+ {
+ rc = -EIO;
+ break;
+ }
+
+ desc = irq_to_desc(iommu->msi.irq);
+
+ spin_lock(&desc->lock);
+ ASSERT(iommu->ctrl.int_cap_xt_en);
+ set_x2apic_affinity(desc, &cpu_online_map);
+ spin_unlock(&desc->lock);
+
+ set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
+
+ if ( iommu->features.flds.ppr_sup )
+ set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_ENABLED);
+ }
+
+ if ( rc )
+ amd_iommu_init_cleanup();
+
+ return rc;
+}
+
static void invalidate_all_domain_pages(void)
{
struct domain *d;
if ( !iommu_enable && !iommu_intremap )
return 0;
- if ( amd_iommu_init() != 0 )
+ else if ( (init_done ? amd_iommu_init_interrupt()
+ : amd_iommu_init(false)) != 0 )
{
printk("AMD-Vi: Error initialization\n");
return -ENODEV;
return 0;
}
+static int iov_enable_xt(void)
+{
+ int rc;
+
+ if ( system_state >= SYS_STATE_active )
+ return 0;
+
+ if ( (rc = amd_iommu_init(true)) != 0 )
+ {
+ printk("AMD-Vi: Error %d initializing for x2APIC mode\n", rc);
+ /* -ENXIO has special meaning to the caller - convert it. */
+ return rc != -ENXIO ? rc : -ENODATA;
+ }
+
+ init_done = true;
+
+ return 0;
+}
+
int amd_iommu_alloc_root(struct domain_iommu *hd)
{
if ( unlikely(!hd->arch.root_table) )
.free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
+ .enable_x2apic = iov_enable_xt,
.update_ire_from_apic = amd_iommu_ioapic_update_ire,
.update_ire_from_msi = amd_iommu_msi_msg_update_ire,
.read_apic_from_ire = amd_iommu_read_ioapic_from_ire,
.read_msi_from_ire = amd_iommu_read_msi_from_ire,
.setup_hpet_msi = amd_setup_hpet_msi,
+ .adjust_irq_affinities = iov_adjust_irq_affinities,
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
.crash_shutdown = amd_iommu_crash_shutdown,
static const struct iommu_init_ops __initconstrel _iommu_init_ops = {
.ops = &_iommu_ops,
.setup = iov_detect,
+ .supports_x2apic = iov_supports_xt,
};
void get_iommu_features(struct amd_iommu *iommu);
/* amd-iommu-init functions */
-int amd_iommu_init(void);
+int amd_iommu_prepare(bool xt);
+int amd_iommu_init(bool xt);
+int amd_iommu_init_interrupt(void);
int amd_iommu_update_ivrs_mapping_acpi(void);
+int iov_adjust_irq_affinities(void);
/* mapping functions */
int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
struct amd_iommu *find_iommu_for_device(int seg, int bdf);
/* interrupt remapping */
+bool iov_supports_xt(void);
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(
const struct amd_iommu *, unsigned long **);