... from those specific to VT-d or AMD IOMMU, and put the latter in a union.
There is no functional change in this patch, although the initialization of
the 'mapped_rmrrs' list occurs slightly later in iommu_domain_init() since
it is now done (correctly) in VT-d specific code rather than in general x86
code.
NOTE: I have not combined the AMD IOMMU 'root_table' and VT-d 'pgd_maddr'
fields even though they perform essentially the same function. The
concept of 'root table' in the VT-d code is different from that in the
AMD code so attempting to use a common name will probably only serve
to confuse the reader.
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
{
const struct domain_iommu *dio = dom_iommu(d);
- update_iommu_mac(&ctx, dio->arch.pgd_maddr,
- agaw_to_level(dio->arch.agaw));
+ update_iommu_mac(&ctx, dio->arch.vtd.pgd_maddr,
+ agaw_to_level(dio->arch.vtd.agaw));
}
}
static inline struct guest_iommu *domain_iommu(struct domain *d)
{
- return dom_iommu(d)->arch.g_iommu;
+ return dom_iommu(d)->arch.amd.g_iommu;
}
static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
{
- return dom_iommu(v->domain)->arch.g_iommu;
+ return dom_iommu(v->domain)->arch.amd.g_iommu;
}
static void guest_iommu_enable(struct guest_iommu *iommu)
guest_iommu_reg_init(iommu);
iommu->mmio_base = ~0ULL;
iommu->domain = d;
- hd->arch.g_iommu = iommu;
+ hd->arch.amd.g_iommu = iommu;
tasklet_init(&iommu->cmd_buffer_tasklet, guest_iommu_process_command, d);
tasklet_kill(&iommu->cmd_buffer_tasklet);
xfree(iommu);
- dom_iommu(d)->arch.g_iommu = NULL;
+ dom_iommu(d)->arch.amd.g_iommu = NULL;
}
struct page_info *table;
const struct domain_iommu *hd = dom_iommu(d);
- table = hd->arch.root_table;
- level = hd->arch.paging_mode;
+ table = hd->arch.amd.root_table;
+ level = hd->arch.amd.paging_mode;
BUG_ON( table == NULL || level < 1 || level > 6 );
spin_lock(&hd->arch.mapping_lock);
- if ( !hd->arch.root_table )
+ if ( !hd->arch.amd.root_table )
{
spin_unlock(&hd->arch.mapping_lock);
return 0;
unsigned int level = amd_iommu_get_paging_mode(end_gfn);
struct amd_iommu_pte *table;
- if ( hd->arch.root_table )
+ if ( hd->arch.amd.root_table )
{
ASSERT_UNREACHABLE();
return 0;
spin_lock(&hd->arch.mapping_lock);
- hd->arch.root_table = alloc_amd_iommu_pgtable();
- if ( !hd->arch.root_table )
+ hd->arch.amd.root_table = alloc_amd_iommu_pgtable();
+ if ( !hd->arch.amd.root_table )
goto out;
- table = __map_domain_page(hd->arch.root_table);
+ table = __map_domain_page(hd->arch.amd.root_table);
while ( level )
{
struct page_info *pg;
u8 bus = pdev->bus;
const struct domain_iommu *hd = dom_iommu(domain);
- BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
+ BUG_ON( !hd->arch.amd.root_table ||
+ !hd->arch.amd.paging_mode ||
!iommu->dev_table.buffer );
if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
/* bind DTE to domain page-tables */
amd_iommu_set_root_page_table(
- dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
- hd->arch.paging_mode, valid);
+ dte, page_to_maddr(hd->arch.amd.root_table),
+ domain->domain_id, hd->arch.amd.paging_mode, valid);
/* Undo what amd_iommu_disable_domain_device() may have done. */
ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id];
"root table = %#"PRIx64", "
"domain = %d, paging mode = %d\n",
req_id, pdev->type,
- page_to_maddr(hd->arch.root_table),
- domain->domain_id, hd->arch.paging_mode);
+ page_to_maddr(hd->arch.amd.root_table),
+ domain->domain_id, hd->arch.amd.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
int amd_iommu_alloc_root(struct domain_iommu *hd)
{
- if ( unlikely(!hd->arch.root_table) )
+ if ( unlikely(!hd->arch.amd.root_table) )
{
- hd->arch.root_table = alloc_amd_iommu_pgtable();
- if ( !hd->arch.root_table )
+ hd->arch.amd.root_table = alloc_amd_iommu_pgtable();
+ if ( !hd->arch.amd.root_table )
return -ENOMEM;
}
* physical address space we give it, but this isn't known yet so use 4
* unilaterally.
*/
- hd->arch.paging_mode = amd_iommu_get_paging_mode(
+ hd->arch.amd.paging_mode = amd_iommu_get_paging_mode(
is_hvm_domain(d)
? 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT)
: get_upper_mfn_bound() + 1);
AMD_IOMMU_DEBUG("Disable: device id = %#x, "
"domain = %d, paging mode = %d\n",
req_id, domain->domain_id,
- dom_iommu(domain)->arch.paging_mode);
+ dom_iommu(domain)->arch.amd.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
struct domain_iommu *hd = dom_iommu(d);
spin_lock(&hd->arch.mapping_lock);
- if ( hd->arch.root_table )
+ if ( hd->arch.amd.root_table )
{
- deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
- hd->arch.root_table = NULL;
+ deallocate_next_page_table(hd->arch.amd.root_table,
+ hd->arch.amd.paging_mode);
+ hd->arch.amd.root_table = NULL;
}
spin_unlock(&hd->arch.mapping_lock);
}
{
const struct domain_iommu *hd = dom_iommu(d);
- if ( !hd->arch.root_table )
+ if ( !hd->arch.amd.root_table )
return;
- printk("p2m table has %d levels\n", hd->arch.paging_mode);
- amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
+ printk("p2m table has %u levels\n", hd->arch.amd.paging_mode);
+ amd_dump_p2m_table_level(hd->arch.amd.root_table,
+ hd->arch.amd.paging_mode, 0, 0);
}
static const struct iommu_ops __initconstrel _iommu_ops = {
static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
{
struct domain_iommu *hd = dom_iommu(domain);
- int addr_width = agaw_to_width(hd->arch.agaw);
+ int addr_width = agaw_to_width(hd->arch.vtd.agaw);
struct dma_pte *parent, *pte = NULL;
- int level = agaw_to_level(hd->arch.agaw);
+ int level = agaw_to_level(hd->arch.vtd.agaw);
int offset;
u64 pte_maddr = 0;
addr &= (((u64)1) << addr_width) - 1;
ASSERT(spin_is_locked(&hd->arch.mapping_lock));
- if ( !hd->arch.pgd_maddr &&
+ if ( !hd->arch.vtd.pgd_maddr &&
(!alloc ||
- ((hd->arch.pgd_maddr = alloc_pgtable_maddr(1, hd->node)) == 0)) )
+ ((hd->arch.vtd.pgd_maddr = alloc_pgtable_maddr(1, hd->node)) == 0)) )
goto out;
- parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
+ parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.vtd.pgd_maddr);
while ( level > 1 )
{
offset = address_level_offset(addr, level);
{
iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
+ if ( !test_bit(iommu->index, &hd->arch.vtd.iommu_bitmap) )
continue;
flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
static int intel_iommu_domain_init(struct domain *d)
{
- dom_iommu(d)->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ struct domain_iommu *hd = dom_iommu(d);
+
+ hd->arch.vtd.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ INIT_LIST_HEAD(&hd->arch.vtd.mapped_rmrrs);
return 0;
}
spin_lock(&hd->arch.mapping_lock);
/* Ensure we have pagetables allocated down to leaf PTE. */
- if ( hd->arch.pgd_maddr == 0 )
+ if ( hd->arch.vtd.pgd_maddr == 0 )
{
addr_to_dma_page_maddr(domain, 0, 1);
- if ( hd->arch.pgd_maddr == 0 )
+ if ( hd->arch.vtd.pgd_maddr == 0 )
{
nomem:
spin_unlock(&hd->arch.mapping_lock);
}
/* Skip top levels of page tables for 2- and 3-level DRHDs. */
- pgd_maddr = hd->arch.pgd_maddr;
+ pgd_maddr = hd->arch.vtd.pgd_maddr;
for ( agaw = level_to_agaw(4);
agaw != level_to_agaw(iommu->nr_pt_levels);
agaw-- )
if ( rc > 0 )
rc = 0;
- set_bit(iommu->index, &hd->arch.iommu_bitmap);
+ set_bit(iommu->index, &hd->arch.vtd.iommu_bitmap);
unmap_vtd_domain_page(context_entries);
{
int iommu_domid;
- clear_bit(iommu->index, &dom_iommu(domain)->arch.iommu_bitmap);
+ clear_bit(iommu->index, &dom_iommu(domain)->arch.vtd.iommu_bitmap);
iommu_domid = domain_iommu_domid(domain, iommu);
if ( iommu_domid == -1 )
if ( list_empty(&acpi_drhd_units) )
return;
- list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
+ list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.vtd.mapped_rmrrs, list )
{
list_del(&mrmrr->list);
xfree(mrmrr);
return;
spin_lock(&hd->arch.mapping_lock);
- iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
- hd->arch.pgd_maddr = 0;
+ iommu_free_pagetable(hd->arch.vtd.pgd_maddr,
+ agaw_to_level(hd->arch.vtd.agaw));
+ hd->arch.vtd.pgd_maddr = 0;
spin_unlock(&hd->arch.mapping_lock);
}
mfn_t pgd_mfn;
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
- dom_iommu(d)->arch.pgd_maddr =
+ dom_iommu(d)->arch.vtd.pgd_maddr =
pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
}
* No need to acquire hd->arch.mapping_lock: Both insertion and removal
* get done while holding pcidevs_lock.
*/
- list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
+ list_for_each_entry( mrmrr, &hd->arch.vtd.mapped_rmrrs, list )
{
if ( mrmrr->base == rmrr->base_address &&
mrmrr->end == rmrr->end_address )
mrmrr->base = rmrr->base_address;
mrmrr->end = rmrr->end_address;
mrmrr->count = 1;
- list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
+ list_add_tail(&mrmrr->list, &hd->arch.vtd.mapped_rmrrs);
return 0;
}
return;
hd = dom_iommu(d);
- printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
- vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0);
+ printk("p2m table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw));
+ vtd_dump_p2m_table_level(hd->arch.vtd.pgd_maddr,
+ agaw_to_level(hd->arch.vtd.agaw), 0, 0);
}
static int __init intel_iommu_quarantine_init(struct domain *d)
unsigned int level = agaw_to_level(agaw);
int rc;
- if ( hd->arch.pgd_maddr )
+ if ( hd->arch.vtd.pgd_maddr )
{
ASSERT_UNREACHABLE();
return 0;
spin_lock(&hd->arch.mapping_lock);
- hd->arch.pgd_maddr = alloc_pgtable_maddr(1, hd->node);
- if ( !hd->arch.pgd_maddr )
+ hd->arch.vtd.pgd_maddr = alloc_pgtable_maddr(1, hd->node);
+ if ( !hd->arch.vtd.pgd_maddr )
goto out;
- parent = map_vtd_domain_page(hd->arch.pgd_maddr);
+ parent = map_vtd_domain_page(hd->arch.vtd.pgd_maddr);
while ( level )
{
uint64_t maddr;
struct domain_iommu *hd = dom_iommu(d);
spin_lock_init(&hd->arch.mapping_lock);
- INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
return 0;
}
struct arch_iommu
{
- u64 pgd_maddr; /* io page directory machine address */
- spinlock_t mapping_lock; /* io page table lock */
- int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
- u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
- struct list_head mapped_rmrrs;
-
- /* amd iommu support */
- int paging_mode;
- struct page_info *root_table;
- struct guest_iommu *g_iommu;
+ spinlock_t mapping_lock; /* io page table lock */
+
+ union {
+ /* Intel VT-d */
+ struct {
+ uint64_t pgd_maddr; /* io page directory machine address */
+ unsigned int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
+ uint64_t iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
+ struct list_head mapped_rmrrs;
+ } vtd;
+ /* AMD IOMMU */
+ struct {
+ unsigned int paging_mode;
+ struct page_info *root_table;
+ struct guest_iommu *g_iommu;
+ } amd;
+ };
};
extern struct iommu_ops iommu_ops;