static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
{
- unsigned long mfn;
-
- if ( nr_amd_iommus > MAX_AMD_IOMMUS )
- {
- AMD_IOMMU_DEBUG("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
+ iommu->mmio_base = ioremap(iommu->mmio_base_phys,
+ IOMMU_MMIO_REGION_LENGTH);
+ if ( !iommu->mmio_base )
return -ENOMEM;
- }
-
- iommu->mmio_base = (void *)fix_to_virt(
- FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
- mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT);
- map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
- MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
FIX_KEXEC_BASE_0,
FIX_KEXEC_BASE_END = FIX_KEXEC_BASE_0 \
+ ((KEXEC_XEN_NO_PAGES >> 1) * KEXEC_IMAGE_NR) - 1,
- FIX_IOMMU_MMIO_BASE_0,
- FIX_IOMMU_MMIO_END = FIX_IOMMU_MMIO_BASE_0 + IOMMU_PAGES -1,
FIX_TBOOT_SHARED_BASE,
FIX_MSIX_IO_RESERV_BASE,
FIX_MSIX_IO_RESERV_END = FIX_MSIX_IO_RESERV_BASE + FIX_MSIX_MAX_PAGES -1,
#define IOMMU_CONTROL_DISABLED 0
#define IOMMU_CONTROL_ENABLED 1
-#define MMIO_PAGES_PER_IOMMU (IOMMU_MMIO_REGION_LENGTH / PAGE_SIZE_4K)
-#define IOMMU_PAGES (MMIO_PAGES_PER_IOMMU * MAX_AMD_IOMMUS)
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-#define MAX_AMD_IOMMUS 32
/* interrupt remapping table */
#define INT_REMAP_INDEX_DM_MASK 0x1C00