New key handler 'o' to dump the IOMMU p2m table for each domain.
Skips dumping table for domain 0.
Intel and AMD specific iommu_ops handler for dumping p2m table.
Incorporated feedback from Jan Beulich and Wei Wang.
Fixed indent printing with %*s.
Removed superflous superpage and other attribute prints.
Make next_level use consistent for AMD IOMMU dumps. Warn if found
inconsistent.
AMD IOMMU does not skip levels. Handle 2mb and 1gb IOMMU page size for
AMD.
Signed-off-by: Santosh Jodh <santosh.jodh@citrix.com>
Committed-by: Keir Fraser <keir@xen.org>
#include <xen/pci.h>
#include <xen/pci_regs.h>
#include <xen/paging.h>
+#include <xen/softirq.h>
#include <asm/hvm/iommu.h>
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
#include <asm/io_apic.h>
+static void amd_dump_p2m_table_level(struct page_info* pg, int level,
+ paddr_t gpa, int indent)
+{
+ paddr_t address;
+ void *table_vaddr, *pde;
+ paddr_t next_table_maddr;
+ int index, next_level, present;
+ u32 *entry;
+
+ if ( level < 1 )
+ return;
+
+ table_vaddr = __map_domain_page(pg);
+ if ( table_vaddr == NULL )
+ {
+ printk("Failed to map IOMMU domain page %"PRIpaddr"\n",
+ page_to_maddr(pg));
+ return;
+ }
+
+ for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
+ {
+ if ( !(index % 2) )
+ process_pending_softirqs();
+
+ pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+ next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
+ entry = (u32*)pde;
+
+ present = get_field_from_reg_u32(entry[0],
+ IOMMU_PDE_PRESENT_MASK,
+ IOMMU_PDE_PRESENT_SHIFT);
+
+ if ( !present )
+ continue;
+
+ next_level = get_field_from_reg_u32(entry[0],
+ IOMMU_PDE_NEXT_LEVEL_MASK,
+ IOMMU_PDE_NEXT_LEVEL_SHIFT);
+
+ if ( next_level && (next_level != (level - 1)) )
+ {
+ printk("IOMMU p2m table error. next_level = %d, expected %d\n",
+ next_level, level - 1);
+
+ continue;
+ }
+
+ address = gpa + amd_offset_level_address(index, level);
+ if ( next_level >= 1 )
+ amd_dump_p2m_table_level(
+ maddr_to_page(next_table_maddr), next_level,
+ address, indent + 1);
+ else
+ printk("%*sgfn: %08lx mfn: %08lx\n",
+ indent, "",
+ (unsigned long)PFN_DOWN(address),
+ (unsigned long)PFN_DOWN(next_table_maddr));
+ }
+
+ unmap_domain_page(table_vaddr);
+}
+
+static void amd_dump_p2m_table(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ if ( !hd->root_table )
+ return;
+
+ printk("p2m table has %d levels\n", hd->paging_mode);
+ amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
+}
+
const struct iommu_ops amd_iommu_ops = {
.init = amd_iommu_domain_init,
.dom0_init = amd_iommu_dom0_init,
.resume = amd_iommu_resume,
.share_p2m = amd_iommu_share_p2m,
.crash_shutdown = amd_iommu_suspend,
+ .dump_p2m_table = amd_dump_p2m_table,
};
#include <xen/paging.h>
#include <xen/guest_access.h>
#include <xen/softirq.h>
+#include <xen/keyhandler.h>
#include <xsm/xsm.h>
static void parse_iommu_param(char *s);
static int iommu_populate_page_table(struct domain *d);
+static void iommu_dump_p2m_table(unsigned char key);
/*
* The 'iommu' parameter enables the IOMMU. Optional comma separated
DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
+static struct keyhandler iommu_p2m_table = {
+ .diagnostic = 0,
+ .u.fn = iommu_dump_p2m_table,
+ .desc = "dump iommu p2m table"
+};
+
static void __init parse_iommu_param(char *s)
{
char *ss;
if ( !iommu_enabled )
return;
+ register_keyhandler('o', &iommu_p2m_table);
d->need_iommu = !!iommu_dom0_strict;
if ( need_iommu(d) )
{
return ret;
}
+static void iommu_dump_p2m_table(unsigned char key)
+{
+ struct domain *d;
+ const struct iommu_ops *ops;
+
+ if ( !iommu_enabled )
+ {
+ printk("IOMMU not enabled!\n");
+ return;
+ }
+
+ ops = iommu_get_ops();
+ for_each_domain(d)
+ {
+ if ( !d->domain_id )
+ continue;
+
+ if ( iommu_use_hap_pt(d) )
+ {
+ printk("\ndomain%d IOMMU p2m table shared with MMU: \n", d->domain_id);
+ continue;
+ }
+
+ printk("\ndomain%d IOMMU p2m table: \n", d->domain_id);
+ ops->dump_p2m_table(d);
+ }
+}
+
/*
* Local variables:
* mode: C
#include <xen/pci.h>
#include <xen/pci_regs.h>
#include <xen/keyhandler.h>
+#include <xen/softirq.h>
#include <asm/msi.h>
#include <asm/irq.h>
#if defined(__i386__) || defined(__x86_64__)
}
}
+static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
+ int indent)
+{
+ paddr_t address;
+ int i;
+ struct dma_pte *pt_vaddr, *pte;
+ int next_level;
+
+ if ( level < 1 )
+ return;
+
+ pt_vaddr = map_vtd_domain_page(pt_maddr);
+ if ( pt_vaddr == NULL )
+ {
+ printk("Failed to map VT-D domain page %"PRIpaddr"\n", pt_maddr);
+ return;
+ }
+
+ next_level = level - 1;
+ for ( i = 0; i < PTE_NUM; i++ )
+ {
+ if ( !(i % 2) )
+ process_pending_softirqs();
+
+ pte = &pt_vaddr[i];
+ if ( !dma_pte_present(*pte) )
+ continue;
+
+ address = gpa + offset_level_address(i, level);
+ if ( next_level >= 1 )
+ vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level,
+ address, indent + 1);
+ else
+ printk("%*sgfn: %08lx mfn: %08lx\n",
+ indent, "",
+ (unsigned long)(address >> PAGE_SHIFT_4K),
+ (unsigned long)(pte->val >> PAGE_SHIFT_4K));
+ }
+
+ unmap_vtd_domain_page(pt_vaddr);
+}
+
+static void vtd_dump_p2m_table(struct domain *d)
+{
+ struct hvm_iommu *hd;
+
+ if ( list_empty(&acpi_drhd_units) )
+ return;
+
+ hd = domain_hvm_iommu(d);
+ printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
+ vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
+}
+
const struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
.dom0_init = intel_iommu_dom0_init,
.crash_shutdown = vtd_crash_shutdown,
.iotlb_flush = intel_iommu_iotlb_flush,
.iotlb_flush_all = intel_iommu_iotlb_flush_all,
+ .dump_p2m_table = vtd_dump_p2m_table,
};
/*
#define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
#define address_level_offset(addr, level) \
((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
+#define offset_level_address(offset, level) \
+ ((u64)(offset) << level_to_offset_bits(level))
#define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
#define level_size(l) (1 << level_to_offset_bits(l))
#define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
#define PTE_PER_TABLE_ALLOC(entries) \
PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT)
+#define amd_offset_level_address(offset, level) \
+ ((u64)(offset) << (12 + (PTE_PER_TABLE_SHIFT * \
+ (level - IOMMU_PAGING_MODE_LEVEL_1))))
+
#define PCI_MIN_CAP_OFFSET 0x40
#define PCI_MAX_CAP_BLOCKS 48
#define PCI_CAP_PTR_MASK 0xFC
void (*crash_shutdown)(void);
void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count);
void (*iotlb_flush_all)(struct domain *d);
+ void (*dump_p2m_table)(struct domain *d);
};
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);