return dev_length;
}
-static DECLARE_BITMAP(ioapic_cmdline, ARRAY_SIZE(ioapic_sbdf)) __initdata;
-
static void __init parse_ivrs_ioapic(char *str)
{
const char *s = str;
unsigned long id;
unsigned int seg, bus, dev, func;
+ unsigned int idx;
ASSERT(*s == '[');
id = simple_strtoul(s + 1, &s, 0);
- if ( id >= ARRAY_SIZE(ioapic_sbdf) || *s != ']' || *++s != '=' )
+ if ( *s != ']' || *++s != '=' )
return;
s = parse_pci(s + 1, &seg, &bus, &dev, &func);
if ( !s || *s )
return;
- ioapic_sbdf[id].bdf = PCI_BDF(bus, dev, func);
- ioapic_sbdf[id].seg = seg;
- __set_bit(id, ioapic_cmdline);
+ idx = ioapic_id_to_index(id);
+ if ( idx == MAX_IO_APICS )
+ {
+ idx = get_next_ioapic_sbdf_index();
+ if ( idx == MAX_IO_APICS )
+ {
+ printk(XENLOG_ERR "Error: %s: Too many IO APICs.\n", __func__);
+ return;
+ }
+ }
+
+ ioapic_sbdf[idx].bdf = PCI_BDF(bus, dev, func);
+ ioapic_sbdf[idx].seg = seg;
+ ioapic_sbdf[idx].id = id;
+ ioapic_sbdf[idx].cmdline = true;
}
custom_param("ivrs_ioapic[", parse_ivrs_ioapic);
u16 header_length, u16 block_length, struct amd_iommu *iommu)
{
u16 dev_length, bdf;
- int apic;
+ unsigned int apic, idx;
dev_length = sizeof(*special);
if ( header_length < (block_length + dev_length) )
* consistency here --- whether entry's IOAPIC ID is valid and
* whether there are conflicting/duplicated entries.
*/
- apic = find_first_bit(ioapic_cmdline, ARRAY_SIZE(ioapic_sbdf));
- while ( apic < ARRAY_SIZE(ioapic_sbdf) )
+ for ( idx = 0; idx < nr_ioapic_sbdf; idx++ )
{
- if ( ioapic_sbdf[apic].bdf == bdf &&
- ioapic_sbdf[apic].seg == seg )
+ if ( ioapic_sbdf[idx].bdf == bdf &&
+ ioapic_sbdf[idx].seg == seg &&
+ ioapic_sbdf[idx].cmdline )
break;
- apic = find_next_bit(ioapic_cmdline, ARRAY_SIZE(ioapic_sbdf),
- apic + 1);
}
- if ( apic < ARRAY_SIZE(ioapic_sbdf) )
+ if ( idx < nr_ioapic_sbdf )
{
AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x"
"(IVRS: %#x devID %04x:%02x:%02x.%u)\n",
- apic, special->handle, seg, PCI_BUS(bdf),
- PCI_SLOT(bdf), PCI_FUNC(bdf));
+ ioapic_sbdf[idx].id, special->handle, seg,
+ PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf));
break;
}
if ( IO_APIC_ID(apic) != special->handle )
continue;
- if ( special->handle >= ARRAY_SIZE(ioapic_sbdf) )
- {
- printk(XENLOG_ERR "IVHD Error: IO-APIC %#x entry beyond bounds\n",
- special->handle);
- return 0;
- }
-
- if ( test_bit(special->handle, ioapic_cmdline) )
+ idx = ioapic_id_to_index(special->handle);
+ if ( idx != MAX_IO_APICS && ioapic_sbdf[idx].cmdline )
AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x\n",
special->handle);
- else if ( ioapic_sbdf[special->handle].pin_2_idx )
+ else if ( idx != MAX_IO_APICS && ioapic_sbdf[idx].pin_2_idx )
{
- if ( ioapic_sbdf[special->handle].bdf == bdf &&
- ioapic_sbdf[special->handle].seg == seg )
+ if ( ioapic_sbdf[idx].bdf == bdf &&
+ ioapic_sbdf[idx].seg == seg )
AMD_IOMMU_DEBUG("IVHD Warning: Duplicate IO-APIC %#x entries\n",
special->handle);
else
}
else
{
+ idx = get_next_ioapic_sbdf_index();
+ if ( idx == MAX_IO_APICS )
+ {
+ printk(XENLOG_ERR "IVHD Error: Too many IO APICs.\n");
+ return 0;
+ }
+
/* set device id of ioapic */
- ioapic_sbdf[special->handle].bdf = bdf;
- ioapic_sbdf[special->handle].seg = seg;
+ ioapic_sbdf[idx].bdf = bdf;
+ ioapic_sbdf[idx].seg = seg;
+ ioapic_sbdf[idx].id = special->handle;
- ioapic_sbdf[special->handle].pin_2_idx = xmalloc_array(
+ ioapic_sbdf[idx].pin_2_idx = xmalloc_array(
u16, nr_ioapic_entries[apic]);
if ( nr_ioapic_entries[apic] &&
- !ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx )
+ !ioapic_sbdf[idx].pin_2_idx )
{
printk(XENLOG_ERR "IVHD Error: Out of memory\n");
return 0;
}
- memset(ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx, -1,
+ memset(ioapic_sbdf[idx].pin_2_idx, -1,
nr_ioapic_entries[apic] *
sizeof(*ioapic_sbdf->pin_2_idx));
}
/* Each IO-APIC must have been mentioned in the table. */
for ( apic = 0; !error && iommu_intremap && apic < nr_ioapics; ++apic )
{
- if ( !nr_ioapic_entries[apic] )
- continue;
-
- if ( !ioapic_sbdf[IO_APIC_ID(apic)].seg &&
- /* SB IO-APIC is always on this device in AMD systems. */
- ioapic_sbdf[IO_APIC_ID(apic)].bdf == PCI_BDF(0, 0x14, 0) )
- sb_ioapic = 1;
+ unsigned int idx;
- if ( ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx )
+ if ( !nr_ioapic_entries[apic] )
continue;
- if ( !test_bit(IO_APIC_ID(apic), ioapic_cmdline) )
+ idx = ioapic_id_to_index(IO_APIC_ID(apic));
+ if ( idx == MAX_IO_APICS )
{
printk(XENLOG_ERR "IVHD Error: no information for IO-APIC %#x\n",
IO_APIC_ID(apic));
return -ENXIO;
}
- ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx = xmalloc_array(
+ if ( !ioapic_sbdf[idx].seg &&
+ /* SB IO-APIC is always on this device in AMD systems. */
+ ioapic_sbdf[idx].bdf == PCI_BDF(0, 0x14, 0) )
+ sb_ioapic = 1;
+
+ if ( ioapic_sbdf[idx].pin_2_idx )
+ continue;
+
+ ioapic_sbdf[idx].pin_2_idx = xmalloc_array(
u16, nr_ioapic_entries[apic]);
- if ( ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx )
- memset(ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx, -1,
+ if ( ioapic_sbdf[idx].pin_2_idx )
+ memset(ioapic_sbdf[idx].pin_2_idx, -1,
nr_ioapic_entries[apic] * sizeof(*ioapic_sbdf->pin_2_idx));
else
{
void *shared_intremap_table;
unsigned long *shared_intremap_inuse;
static DEFINE_SPINLOCK(shared_intremap_lock);
+unsigned int nr_ioapic_sbdf;
static void dump_intremap_tables(unsigned char key);
+unsigned int ioapic_id_to_index(unsigned int apic_id)
+{
+ unsigned int idx;
+
+ for ( idx = 0 ; idx < nr_ioapic_sbdf; idx++ )
+ if ( ioapic_sbdf[idx].id == apic_id )
+ break;
+
+ if ( idx == nr_ioapic_sbdf )
+ return MAX_IO_APICS;
+
+ return idx;
+}
+
+unsigned int __init get_next_ioapic_sbdf_index(void)
+{
+ if ( nr_ioapic_sbdf < MAX_IO_APICS )
+ return nr_ioapic_sbdf++;
+
+ return MAX_IO_APICS;
+}
+
static spinlock_t* get_intremap_lock(int seg, int req_id)
{
return (amd_iommu_perdev_intremap ?
{
for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ )
{
+ unsigned int idx;
+
rte = __ioapic_read_entry(apic, pin, 1);
if ( rte.mask == 1 )
continue;
/* get device id of ioapic devices */
- bdf = ioapic_sbdf[IO_APIC_ID(apic)].bdf;
- seg = ioapic_sbdf[IO_APIC_ID(apic)].seg;
+ idx = ioapic_id_to_index(IO_APIC_ID(apic));
+ if ( idx == MAX_IO_APICS )
+ return -EINVAL;
+
+ bdf = ioapic_sbdf[idx].bdf;
+ seg = ioapic_sbdf[idx].seg;
iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
{
spin_unlock_irqrestore(lock, flags);
set_rte_index(&rte, offset);
- ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx[pin] = offset;
+ ioapic_sbdf[idx].pin_2_idx[pin] = offset;
__ioapic_write_entry(apic, pin, 1, rte);
if ( iommu->enabled )
unsigned int pin = (reg - 0x10) / 2;
int saved_mask, seg, bdf, rc;
struct amd_iommu *iommu;
+ unsigned int idx;
if ( !iommu_intremap )
{
return;
}
+ idx = ioapic_id_to_index(IO_APIC_ID(apic));
+ if ( idx == MAX_IO_APICS )
+ return;
+
/* get device id of ioapic devices */
- bdf = ioapic_sbdf[IO_APIC_ID(apic)].bdf;
- seg = ioapic_sbdf[IO_APIC_ID(apic)].seg;
+ bdf = ioapic_sbdf[idx].bdf;
+ seg = ioapic_sbdf[idx].seg;
iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
{
}
if ( new_rte.mask &&
- ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx[pin] >= INTREMAP_ENTRIES )
+ ioapic_sbdf[idx].pin_2_idx[pin] >= INTREMAP_ENTRIES )
{
ASSERT(saved_mask);
__io_apic_write(apic, reg, value);
/* Update interrupt remapping entry */
rc = update_intremap_entry_from_ioapic(
bdf, iommu, &new_rte, reg == rte_lo,
- &ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx[pin]);
+ &ioapic_sbdf[idx].pin_2_idx[pin]);
__io_apic_write(apic, reg, ((u32 *)&new_rte)[reg != rte_lo]);
unsigned int amd_iommu_read_ioapic_from_ire(
unsigned int apic, unsigned int reg)
{
+ unsigned int idx;
+ unsigned int offset;
unsigned int val = __io_apic_read(apic, reg);
unsigned int pin = (reg - 0x10) / 2;
- unsigned int offset = ioapic_sbdf[IO_APIC_ID(apic)].pin_2_idx[pin];
+
+ idx = ioapic_id_to_index(IO_APIC_ID(apic));
+ if ( idx == MAX_IO_APICS )
+ return -EINVAL;
+
+ offset = ioapic_sbdf[idx].pin_2_idx[pin];
if ( !(reg & 1) && offset < INTREMAP_ENTRIES )
{
- u16 bdf = ioapic_sbdf[IO_APIC_ID(apic)].bdf;
- u16 seg = ioapic_sbdf[IO_APIC_ID(apic)].seg;
+ u16 bdf = ioapic_sbdf[idx].bdf;
+ u16 seg = ioapic_sbdf[idx].seg;
u16 req_id = get_intremap_requestor_id(seg, bdf);
const u32 *entry = get_intremap_entry(seg, req_id, offset);