};
static void __init add_ivrs_mapping_entry(
- u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)
+ uint16_t bdf, uint16_t alias_id, uint8_t flags, bool alloc_irt,
+ struct amd_iommu *iommu)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg);
if ( iommu->bdf == bdf )
return;
- if ( !ivrs_mappings[alias_id].intremap_table )
+ /* Allocate interrupt remapping table if needed. */
+ if ( iommu_intremap && !ivrs_mappings[alias_id].intremap_table )
{
- /* allocate per-device interrupt remapping table */
- if ( amd_iommu_perdev_intremap )
- ivrs_mappings[alias_id].intremap_table =
- amd_iommu_alloc_intremap_table(
- iommu,
- &ivrs_mappings[alias_id].intremap_inuse);
- else
- {
- if ( shared_intremap_table == NULL )
- shared_intremap_table = amd_iommu_alloc_intremap_table(
- iommu,
- &shared_intremap_inuse);
- ivrs_mappings[alias_id].intremap_table = shared_intremap_table;
- ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse;
- }
-
- if ( !ivrs_mappings[alias_id].intremap_table )
- panic("No memory for %04x:%02x:%02x.%u's IRT\n", iommu->seg,
- PCI_BUS(alias_id), PCI_SLOT(alias_id), PCI_FUNC(alias_id));
+ if ( !amd_iommu_perdev_intremap )
+ {
+ if ( !shared_intremap_table )
+ shared_intremap_table = amd_iommu_alloc_intremap_table(
+ iommu, &shared_intremap_inuse);
+
+ if ( !shared_intremap_table )
+ panic("No memory for shared IRT\n");
+
+ ivrs_mappings[alias_id].intremap_table = shared_intremap_table;
+ ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse;
+ }
+ else if ( alloc_irt )
+ {
+ ivrs_mappings[alias_id].intremap_table =
+ amd_iommu_alloc_intremap_table(
+ iommu, &ivrs_mappings[alias_id].intremap_inuse);
+
+ if ( !ivrs_mappings[alias_id].intremap_table )
+ panic("No memory for %04x:%02x:%02x.%u's IRT\n",
+ iommu->seg, PCI_BUS(alias_id), PCI_SLOT(alias_id),
+ PCI_FUNC(alias_id));
+ }
}
ivrs_mappings[alias_id].valid = true;
return 0;
}
- add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, iommu);
+ add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, false,
+ iommu);
return sizeof(*select);
}
for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
add_ivrs_mapping_entry(bdf, bdf, range->start.header.data_setting,
- iommu);
+ false, iommu);
return dev_length;
}
AMD_IOMMU_DEBUG(" Dev_Id Alias: %#x\n", alias_id);
- add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, iommu);
+ add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, true,
+ iommu);
return dev_length;
}
for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
add_ivrs_mapping_entry(bdf, alias_id, range->alias.header.data_setting,
- iommu);
+ true, iommu);
return dev_length;
}
return 0;
}
- add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, iommu);
+ add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, false, iommu);
return dev_length;
}
for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
add_ivrs_mapping_entry(bdf, bdf, range->extended.header.data_setting,
- iommu);
+ false, iommu);
return dev_length;
}
AMD_IOMMU_DEBUG("IVHD Special: %04x:%02x:%02x.%u variety %#x handle %#x\n",
seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf),
special->variety, special->handle);
- add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu);
+ add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, true,
+ iommu);
switch ( special->variety )
{
#include <xen/delay.h>
static int __initdata nr_amd_iommus;
+static bool __initdata pci_init;
static void do_amd_iommu_irq(unsigned long data);
static DECLARE_SOFTIRQ_TASKLET(amd_iommu_irq_tasklet, do_amd_iommu_irq, 0);
BUG_ON( (ivrs_bdf_entries == 0) );
- /* allocate 'device table' on a 4K boundary */
- device_table.alloc_size = PAGE_SIZE <<
- get_order_from_bytes(
- PAGE_ALIGN(ivrs_bdf_entries *
- IOMMU_DEV_TABLE_ENTRY_SIZE));
- device_table.entries = device_table.alloc_size /
- IOMMU_DEV_TABLE_ENTRY_SIZE;
-
- device_table.buffer = allocate_buffer(device_table.alloc_size,
- "Device Table");
- if ( device_table.buffer == NULL )
+ if ( !device_table.buffer )
+ {
+ /* allocate 'device table' on a 4K boundary */
+ device_table.alloc_size = PAGE_SIZE <<
+ get_order_from_bytes(
+ PAGE_ALIGN(ivrs_bdf_entries *
+ IOMMU_DEV_TABLE_ENTRY_SIZE));
+ device_table.entries = device_table.alloc_size /
+ IOMMU_DEV_TABLE_ENTRY_SIZE;
+
+ device_table.buffer = allocate_buffer(device_table.alloc_size,
+ "Device Table");
+ }
+ if ( !device_table.buffer )
return -ENOMEM;
/* Add device table entries */
if ( ivrs_mappings[bdf].valid )
{
void *dte;
+ const struct pci_dev *pdev = NULL;
/* add device table entry */
dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE);
iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]);
+ if ( iommu_intremap &&
+ ivrs_mappings[bdf].dte_requestor_id == bdf &&
+ !ivrs_mappings[bdf].intremap_table )
+ {
+ if ( !pci_init )
+ continue;
+ pcidevs_lock();
+ pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
+ pcidevs_unlock();
+ }
+
+ if ( pdev )
+ {
+ unsigned int req_id = bdf;
+
+ do {
+ ivrs_mappings[req_id].intremap_table =
+ amd_iommu_alloc_intremap_table(
+ ivrs_mappings[bdf].iommu,
+ &ivrs_mappings[req_id].intremap_inuse);
+ if ( !ivrs_mappings[req_id].intremap_table )
+ return -ENOMEM;
+
+ if ( !pdev->phantom_stride )
+ break;
+ req_id += pdev->phantom_stride;
+ } while ( PCI_SLOT(req_id) == pdev->sbdf.dev );
+ }
+
amd_iommu_set_intremap_table(
- dte, virt_to_maddr(ivrs_mappings[bdf].intremap_table),
+ dte,
+ ivrs_mappings[bdf].intremap_table
+ ? virt_to_maddr(ivrs_mappings[bdf].intremap_table)
+ : 0,
iommu_intremap);
}
}
if ( rc )
goto error_out;
- /* allocate and initialize a global device table shared by all iommus */
+ /* Allocate and initialize device table(s). */
+ pci_init = !xt;
rc = iterate_ivrs_mappings(amd_iommu_setup_device_table);
if ( rc )
goto error_out;
/*
* Setting up of the IOMMU interrupts cannot occur yet at the (very
* early) time we get here when enabling x2APIC mode. Suppress it
- * here, and do it explicitly in amd_iommu_init_interrupt().
+ * here, and do it explicitly in amd_iommu_init_late().
*/
rc = amd_iommu_init_one(iommu, !xt);
if ( rc )
return rc;
}
-int __init amd_iommu_init_interrupt(void)
+int __init amd_iommu_init_late(void)
{
struct amd_iommu *iommu;
int rc = 0;
+ /* Further initialize the device table(s). */
+ pci_init = true;
+ if ( iommu_intremap )
+ rc = iterate_ivrs_mappings(amd_iommu_setup_device_table);
+
for_each_amd_iommu ( iommu )
{
struct irq_desc *desc;
}
}
-int __init amd_iommu_free_intremap_table(
+int amd_iommu_free_intremap_table(
const struct amd_iommu *iommu, struct ivrs_mappings *ivrs_mapping)
{
void **tblp;
return 0;
}
-void *__init amd_iommu_alloc_intremap_table(
+void *amd_iommu_alloc_intremap_table(
const struct amd_iommu *iommu, unsigned long **inuse_map)
{
unsigned int order = intremap_table_order(iommu);
struct amd_iommu_dte *dte, uint64_t intremap_ptr, bool valid)
{
dte->it_root = intremap_ptr >> 6;
- dte->int_tab_len = IOMMU_INTREMAP_ORDER;
- dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED;
+ dte->int_tab_len = intremap_ptr ? IOMMU_INTREMAP_ORDER : 0;
+ dte->int_ctl = intremap_ptr ? IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED
+ : IOMMU_DEV_TABLE_INT_CONTROL_ABORTED;
dte->ig = false; /* unmapped interrupts result in i/o page faults */
dte->iv = valid;
}
if ( !iommu_enable && !iommu_intremap )
return 0;
- if ( (init_done ? amd_iommu_init_interrupt()
+ if ( (init_done ? amd_iommu_init_late()
: amd_iommu_init(false)) != 0 )
{
printk("AMD-Vi: Error initialization\n");
{
struct amd_iommu *iommu;
u16 bdf;
+ struct ivrs_mappings *ivrs_mappings;
if ( !pdev->domain )
return -EINVAL;
return -ENODEV;
}
+ ivrs_mappings = get_ivrs_mappings(pdev->seg);
+ bdf = PCI_BDF2(pdev->bus, devfn);
+ if ( !ivrs_mappings ||
+ !ivrs_mappings[ivrs_mappings[bdf].dte_requestor_id].valid )
+ return -EPERM;
+
+ if ( iommu_intremap &&
+ ivrs_mappings[bdf].dte_requestor_id == bdf &&
+ !ivrs_mappings[bdf].intremap_table )
+ {
+ unsigned long flags;
+
+ ivrs_mappings[bdf].intremap_table =
+ amd_iommu_alloc_intremap_table(
+ iommu, &ivrs_mappings[bdf].intremap_inuse);
+ if ( !ivrs_mappings[bdf].intremap_table )
+ return -ENOMEM;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ amd_iommu_set_intremap_table(
+ iommu->dev_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE),
+ virt_to_maddr(ivrs_mappings[bdf].intremap_table),
+ iommu_intremap);
+
+ amd_iommu_flush_device(iommu, bdf);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
return 0;
}
{
struct amd_iommu *iommu;
u16 bdf;
+ struct ivrs_mappings *ivrs_mappings;
+
if ( !pdev->domain )
return -EINVAL;
}
amd_iommu_disable_domain_device(pdev->domain, iommu, devfn, pdev);
+
+ ivrs_mappings = get_ivrs_mappings(pdev->seg);
+ bdf = PCI_BDF2(pdev->bus, devfn);
+ if ( amd_iommu_perdev_intremap &&
+ ivrs_mappings[bdf].dte_requestor_id == bdf &&
+ ivrs_mappings[bdf].intremap_table )
+ amd_iommu_free_intremap_table(iommu, &ivrs_mappings[bdf]);
+
return 0;
}
/* amd-iommu-init functions */
int amd_iommu_prepare(bool xt);
int amd_iommu_init(bool xt);
-int amd_iommu_init_interrupt(void);
+int amd_iommu_init_late(void);
int amd_iommu_update_ivrs_mapping_acpi(void);
int iov_adjust_irq_affinities(void);