"ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
if (g2m_ioport->mport == fmp )
{
g2m_ioport->gport = fgp;
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
- list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
+ list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
}
if ( !ret )
ret = ioports_permit_access(d, fmp, fmp + np - 1);
printk(XENLOG_G_INFO
"ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
if ( g2m_ioport->mport == fmp )
{
list_del(&g2m_ioport->list);
unsigned int s = 0, e = 0;
int rc;
- list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
+ list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
{
s = g2m_ioport->gport;
e = s + g2m_ioport->np;
if ( !is_idle_domain(d) )
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- update_iommu_mac(&ctx, hd->pgd_maddr, agaw_to_level(hd->agaw));
+ update_iommu_mac(&ctx, hd->arch.pgd_maddr,
+ agaw_to_level(hd->arch.agaw));
}
}
static inline struct guest_iommu *domain_iommu(struct domain *d)
{
- return domain_hvm_iommu(d)->g_iommu;
+ return domain_hvm_iommu(d)->arch.g_iommu;
}
static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
{
- return domain_hvm_iommu(v->domain)->g_iommu;
+ return domain_hvm_iommu(v->domain)->arch.g_iommu;
}
static void guest_iommu_enable(struct guest_iommu *iommu)
guest_iommu_reg_init(iommu);
iommu->domain = d;
- hd->g_iommu = iommu;
+ hd->arch.g_iommu = iommu;
tasklet_init(&iommu->cmd_buffer_tasklet,
guest_iommu_process_command, (unsigned long)d);
tasklet_kill(&iommu->cmd_buffer_tasklet);
xfree(iommu);
- domain_hvm_iommu(d)->g_iommu = NULL;
+ domain_hvm_iommu(d)->arch.g_iommu = NULL;
}
static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
struct hvm_iommu *hd = domain_hvm_iommu(d);
bool_t ok = 0;
- ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
+ ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
next_level = merge_level - 1;
unsigned long first_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
+ ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
table = map_domain_page(pt_mfn);
pde = table + pfn_to_pde_idx(gfn, merge_level);
struct page_info *table;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- table = hd->root_table;
- level = hd->paging_mode;
+ table = hd->arch.root_table;
+ level = hd->arch.paging_mode;
BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 ||
level > IOMMU_PAGING_MODE_LEVEL_6 );
unsigned long old_root_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- level = hd->paging_mode;
- old_root = hd->root_table;
+ level = hd->arch.paging_mode;
+ old_root = hd->arch.root_table;
offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
- ASSERT(spin_is_locked(&hd->mapping_lock) && is_hvm_domain(d));
+ ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
while ( offset >= PTE_PER_TABLE_SIZE )
{
if ( new_root != NULL )
{
- hd->paging_mode = level;
- hd->root_table = new_root;
+ hd->arch.paging_mode = level;
+ hd->arch.root_table = new_root;
if ( !spin_is_locked(&pcidevs_lock) )
AMD_IOMMU_DEBUG("%s Try to access pdev_list "
/* valid = 0 only works for dom0 passthrough mode */
amd_iommu_set_root_page_table((u32 *)device_entry,
- page_to_maddr(hd->root_table),
+ page_to_maddr(hd->arch.root_table),
d->domain_id,
- hd->paging_mode, 1);
+ hd->arch.paging_mode, 1);
amd_iommu_flush_device(iommu, req_id);
bdf += pdev->phantom_stride;
unsigned long pt_mfn[7];
unsigned int merge_level;
- BUG_ON( !hd->root_table );
+ BUG_ON( !hd->arch.root_table );
if ( iommu_use_hap_pt(d) )
return 0;
memset(pt_mfn, 0, sizeof(pt_mfn));
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
{
if ( update_paging_mode(d, gfn) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
amd_iommu_flush_pages(d, gfn, 0);
for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
- merge_level <= hd->paging_mode; merge_level++ )
+ merge_level <= hd->arch.paging_mode; merge_level++ )
{
if ( pt_mfn[merge_level] == 0 )
break;
if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn,
flags, merge_level) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
"gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
domain_crash(d);
}
out:
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return 0;
}
unsigned long pt_mfn[7];
struct hvm_iommu *hd = domain_hvm_iommu(d);
- BUG_ON( !hd->root_table );
+ BUG_ON( !hd->arch.root_table );
if ( iommu_use_hap_pt(d) )
return 0;
memset(pt_mfn, 0, sizeof(pt_mfn));
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
{
if ( update_paging_mode(d, gfn) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
/* mark PTE as 'page not present' */
clear_iommu_pte_present(pt_mfn[1], gfn);
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
amd_iommu_flush_pages(d, gfn, 0);
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
p2m_table = mfn_to_page(mfn_x(pgd_mfn));
- if ( hd->root_table != p2m_table )
+ if ( hd->arch.root_table != p2m_table )
{
- free_amd_iommu_pgtable(hd->root_table);
- hd->root_table = p2m_table;
+ free_amd_iommu_pgtable(hd->arch.root_table);
+ hd->arch.root_table = p2m_table;
/* When sharing p2m with iommu, paging mode = 4 */
- hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
+ hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
mfn_x(pgd_mfn));
}
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
+ BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
+ !iommu->dev_table.buffer );
if ( iommu_passthrough && is_hardware_domain(domain) )
valid = 0;
{
/* bind DTE to domain page-tables */
amd_iommu_set_root_page_table(
- (u32 *)dte, page_to_maddr(hd->root_table), domain->domain_id,
- hd->paging_mode, valid);
+ (u32 *)dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
+ hd->arch.paging_mode, valid);
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
"root table = %#"PRIx64", "
"domain = %d, paging mode = %d\n",
req_id, pdev->type,
- page_to_maddr(hd->root_table),
- domain->domain_id, hd->paging_mode);
+ page_to_maddr(hd->arch.root_table),
+ domain->domain_id, hd->arch.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
static int allocate_domain_resources(struct hvm_iommu *hd)
{
/* allocate root table */
- spin_lock(&hd->mapping_lock);
- if ( !hd->root_table )
+ spin_lock(&hd->arch.mapping_lock);
+ if ( !hd->arch.root_table )
{
- hd->root_table = alloc_amd_iommu_pgtable();
- if ( !hd->root_table )
+ hd->arch.root_table = alloc_amd_iommu_pgtable();
+ if ( !hd->arch.root_table )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
}
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return 0;
}
/* allocate page directroy */
if ( allocate_domain_resources(hd) != 0 )
{
- if ( hd->root_table )
- free_domheap_page(hd->root_table);
+ if ( hd->arch.root_table )
+ free_domheap_page(hd->arch.root_table);
return -ENOMEM;
}
/* For pv and dom0, stick with get_paging_mode(max_page)
* For HVM dom0, use 2 level page table at first */
- hd->paging_mode = is_hvm_domain(d) ?
+ hd->arch.paging_mode = is_hvm_domain(d) ?
IOMMU_PAGING_MODE_LEVEL_2 :
get_paging_mode(max_page);
AMD_IOMMU_DEBUG("Disable: device id = %#x, "
"domain = %d, paging mode = %d\n",
req_id, domain->domain_id,
- domain_hvm_iommu(domain)->paging_mode);
+ domain_hvm_iommu(domain)->arch.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
/* IO page tables might be destroyed after pci-detach the last device
* In this case, we have to re-allocate root table for next pci-attach.*/
- if ( t->root_table == NULL )
+ if ( t->arch.root_table == NULL )
allocate_domain_resources(t);
amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
if ( iommu_use_hap_pt(d) )
return;
- spin_lock(&hd->mapping_lock);
- if ( hd->root_table )
+ spin_lock(&hd->arch.mapping_lock);
+ if ( hd->arch.root_table )
{
- deallocate_next_page_table(hd->root_table, hd->paging_mode);
- hd->root_table = NULL;
+ deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
+ hd->arch.root_table = NULL;
}
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
}
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- if ( !hd->root_table )
+ if ( !hd->arch.root_table )
return;
- printk("p2m table has %d levels\n", hd->paging_mode);
- amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
+ printk("p2m table has %d levels\n", hd->arch.paging_mode);
+ amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
}
const struct iommu_ops amd_iommu_ops = {
int iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ int ret = 0;
- spin_lock_init(&hd->mapping_lock);
- INIT_LIST_HEAD(&hd->g2m_ioport_list);
- INIT_LIST_HEAD(&hd->mapped_rmrrs);
+ ret = arch_iommu_domain_init(d);
+ if ( ret )
+ return ret;
if ( !iommu_enabled )
return 0;
void iommu_domain_destroy(struct domain *d)
{
- struct hvm_iommu *hd = domain_hvm_iommu(d);
- struct list_head *ioport_list, *tmp;
- struct g2m_ioport *ioport;
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops )
return;
if ( need_iommu(d) )
iommu_teardown(d);
- list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
- {
- ioport = list_entry(ioport_list, struct g2m_ioport, list);
- list_del(&ioport->list);
- xfree(ioport);
- }
+ arch_iommu_domain_destroy(d);
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
struct acpi_drhd_unit *drhd;
struct pci_dev *pdev;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- int addr_width = agaw_to_width(hd->agaw);
+ int addr_width = agaw_to_width(hd->arch.agaw);
struct dma_pte *parent, *pte = NULL;
- int level = agaw_to_level(hd->agaw);
+ int level = agaw_to_level(hd->arch.agaw);
int offset;
u64 pte_maddr = 0, maddr;
u64 *vaddr = NULL;
addr &= (((u64)1) << addr_width) - 1;
- ASSERT(spin_is_locked(&hd->mapping_lock));
- if ( hd->pgd_maddr == 0 )
+ ASSERT(spin_is_locked(&hd->arch.mapping_lock));
+ if ( hd->arch.pgd_maddr == 0 )
{
/*
* just get any passthrough device in the domainr - assume user
*/
pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
drhd = acpi_find_matched_drhd_unit(pdev);
- if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
+ if ( !alloc || ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
goto out;
}
- parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
+ parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
while ( level > 1 )
{
offset = address_level_offset(addr, level);
{
iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+ if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
continue;
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* get last level pte */
pg_maddr = addr_to_dma_page_maddr(domain, addr, 0);
if ( pg_maddr == 0 )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return;
}
if ( !dma_pte_present(*pte) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
return;
}
dma_clear_pte(*pte);
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
if ( !this_cpu(iommu_dont_flush_iotlb) )
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
return 0;
}
}
else
{
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* Ensure we have pagetables allocated down to leaf PTE. */
- if ( hd->pgd_maddr == 0 )
+ if ( hd->arch.pgd_maddr == 0 )
{
addr_to_dma_page_maddr(domain, 0, 1);
- if ( hd->pgd_maddr == 0 )
+ if ( hd->arch.pgd_maddr == 0 )
{
nomem:
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
spin_unlock(&iommu->lock);
unmap_vtd_domain_page(context_entries);
return -ENOMEM;
}
/* Skip top levels of page tables for 2- and 3-level DRHDs. */
- pgd_maddr = hd->pgd_maddr;
+ pgd_maddr = hd->arch.pgd_maddr;
for ( agaw = level_to_agaw(4);
agaw != level_to_agaw(iommu->nr_pt_levels);
agaw-- )
else
context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
}
if ( context_set_domain_id(context, domain, iommu) )
iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
}
- set_bit(iommu->index, &hd->iommu_bitmap);
+ set_bit(iommu->index, &hd->arch.iommu_bitmap);
unmap_vtd_domain_page(context_entries);
struct hvm_iommu *hd = domain_hvm_iommu(domain);
int iommu_domid;
- clear_bit(iommu->index, &hd->iommu_bitmap);
+ clear_bit(iommu->index, &hd->arch.iommu_bitmap);
iommu_domid = domain_iommu_domid(domain, iommu);
if ( iommu_domid == -1 )
if ( list_empty(&acpi_drhd_units) )
return;
- list_for_each_entry_safe ( mrmrr, tmp, &hd->mapped_rmrrs, list )
+ list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
{
list_del(&mrmrr->list);
xfree(mrmrr);
if ( iommu_use_hap_pt(d) )
return;
- spin_lock(&hd->mapping_lock);
- iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
- hd->pgd_maddr = 0;
- spin_unlock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
+ iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
+ hd->arch.pgd_maddr = 0;
+ spin_unlock(&hd->arch.mapping_lock);
}
static int intel_iommu_map_page(
if ( iommu_passthrough && is_hardware_domain(d) )
return 0;
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
if ( pg_maddr == 0 )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
if ( old.val == new.val )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
return 0;
}
*pte = new;
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
if ( !this_cpu(iommu_dont_flush_iotlb) )
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+ if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
continue;
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
return;
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
- hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
+ hd->arch.pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
}
static int rmrr_identity_mapping(struct domain *d,
ASSERT(rmrr->base_address < rmrr->end_address);
/*
- * No need to acquire hd->mapping_lock: Both insertion and removal
+ * No need to acquire hd->arch.mapping_lock: Both insertion and removal
* get done while holding pcidevs_lock.
*/
- list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
+ list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
{
if ( mrmrr->base == rmrr->base_address &&
mrmrr->end == rmrr->end_address )
mrmrr->base = rmrr->base_address;
mrmrr->end = rmrr->end_address;
mrmrr->count = 1;
- list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
+ list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
return 0;
}
* get done while holding pcidevs_lock.
*/
ASSERT(spin_is_locked(&pcidevs_lock));
- list_for_each_entry_safe ( mrmrr, tmp, &hd->mapped_rmrrs, list )
+ list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
{
unsigned long base_pfn, end_pfn;
return;
hd = domain_hvm_iommu(d);
- printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
- vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
+ printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
+ vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0);
}
const struct iommu_ops intel_iommu_ops = {
panic("Presently, iommu must be enabled for PVH hardware domain\n");
}
+int arch_iommu_domain_init(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ spin_lock_init(&hd->arch.mapping_lock);
+ INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
+ INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
+
+ return 0;
+}
+
+void arch_iommu_domain_destroy(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+ struct list_head *ioport_list, *tmp;
+ struct g2m_ioport *ioport;
+
+ list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
+ {
+ ioport = list_entry(ioport_list, struct g2m_ioport, list);
+ list_del(&ioport->list);
+ xfree(ioport);
+ }
+}
+
/*
* Local variables:
* mode: C
return 0;
}
+struct g2m_ioport {
+ struct list_head list;
+ unsigned int gport;
+ unsigned int mport;
+ unsigned int np;
+};
+
+struct arch_hvm_iommu
+{
+ u64 pgd_maddr; /* io page directory machine address */
+ spinlock_t mapping_lock; /* io page table lock */
+ int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
+ struct list_head g2m_ioport_list; /* guest to machine ioport mapping */
+ u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
+ struct list_head mapped_rmrrs;
+
+ /* amd iommu support */
+ int paging_mode;
+ struct page_info *root_table;
+ struct guest_iommu *g_iommu;
+};
+
#endif /* __ASM_X86_HVM_IOMMU_H__ */
#define MAX_IOMMUS 32
-#include <asm/msi.h>
+/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
+#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
+#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
#include <xen/iommu.h>
#include <asm/hvm/iommu.h>
-struct g2m_ioport {
- struct list_head list;
- unsigned int gport;
- unsigned int mport;
- unsigned int np;
-};
-
struct hvm_iommu {
- u64 pgd_maddr; /* io page directory machine address */
- spinlock_t mapping_lock; /* io page table lock */
- int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
- struct list_head g2m_ioport_list; /* guest to machine ioport mapping */
- u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
- struct list_head mapped_rmrrs;
-
- /* amd iommu support */
- int paging_mode;
- struct page_info *root_table;
- struct guest_iommu *g_iommu;
+ struct arch_hvm_iommu arch;
/* iommu_ops */
const struct iommu_ops *platform_ops;
extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
-/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
-#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
-
-#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
-
#define PAGE_SHIFT_4K (12)
#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
void iommu_domain_destroy(struct domain *d);
int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn);
+void arch_iommu_domain_destroy(struct domain *d);
+int arch_iommu_domain_init(struct domain *d);
int arch_iommu_populate_page_table(struct domain *d);
void arch_iommu_check_autotranslated_hwdom(struct domain *d);