addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1);
return res;
}
- res = map_mmio_regions(d, addr & PAGE_MASK,
- PAGE_ALIGN(addr + size) - 1,
- addr & PAGE_MASK);
+ res = map_mmio_regions(d,
+ paddr_to_pfn(addr & PAGE_MASK),
+ paddr_to_pfn_aligned(addr + size),
+ paddr_to_pfn(addr & PAGE_MASK));
if ( res )
{
printk(XENLOG_ERR "Unable to map 0x%"PRIx64
* The second page is always mapped at +4K irrespective of the
* GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
*/
- ret = map_mmio_regions(d, d->arch.vgic.cbase,
- d->arch.vgic.cbase + PAGE_SIZE - 1,
- gicv2.vbase);
+ ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase),
+ paddr_to_pfn_aligned(d->arch.vgic.cbase + PAGE_SIZE),
+ paddr_to_pfn(gicv2.vbase));
if ( ret )
return ret;
if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
- d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
- gicv2.vbase + PAGE_SIZE);
+ ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
+ paddr_to_pfn_aligned(d->arch.vgic.cbase +
+ (2 * PAGE_SIZE)),
+ paddr_to_pfn(gicv2.vbase + PAGE_SIZE));
else
- ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
- d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
- gicv2.vbase + 16*PAGE_SIZE);
+ ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
+ paddr_to_pfn_aligned(d->arch.vgic.cbase +
+ (2 * PAGE_SIZE)),
+ paddr_to_pfn(gicv2.vbase + 16*PAGE_SIZE));
return ret;
}
}
int map_mmio_regions(struct domain *d,
- paddr_t start_gaddr,
- paddr_t end_gaddr,
- paddr_t maddr)
+ unsigned long start_gfn,
+ unsigned long end_gfn,
+ unsigned long mfn)
{
- return apply_p2m_changes(d, INSERT, start_gaddr, end_gaddr,
- maddr, MATTR_DEV, p2m_mmio_direct);
+ return apply_p2m_changes(d, INSERT,
+ pfn_to_paddr(start_gfn),
+ pfn_to_paddr(end_gfn),
+ pfn_to_paddr(mfn),
+ MATTR_DEV, p2m_mmio_direct);
}
int guest_physmap_add_entry(struct domain *d,
static int exynos5_specific_mapping(struct domain *d)
{
/* Map the chip ID */
- map_mmio_regions(d, EXYNOS5_PA_CHIPID, EXYNOS5_PA_CHIPID + PAGE_SIZE - 1,
- EXYNOS5_PA_CHIPID);
+ map_mmio_regions(d, paddr_to_pfn(EXYNOS5_PA_CHIPID),
+ paddr_to_pfn_aligned(EXYNOS5_PA_CHIPID + PAGE_SIZE),
+ paddr_to_pfn(EXYNOS5_PA_CHIPID));
/* Map the PWM region */
- map_mmio_regions(d, EXYNOS5_PA_TIMER,
- EXYNOS5_PA_TIMER + (PAGE_SIZE * 2) - 1,
- EXYNOS5_PA_TIMER);
+ map_mmio_regions(d, paddr_to_pfn(EXYNOS5_PA_TIMER),
+ paddr_to_pfn_aligned(EXYNOS5_PA_TIMER + (PAGE_SIZE * 2)),
+ paddr_to_pfn(EXYNOS5_PA_TIMER));
return 0;
}
static int omap5_specific_mapping(struct domain *d)
{
/* Map the PRM module */
- map_mmio_regions(d, OMAP5_PRM_BASE, OMAP5_PRM_BASE + (PAGE_SIZE * 2) - 1,
- OMAP5_PRM_BASE);
+ map_mmio_regions(d, paddr_to_pfn(OMAP5_PRM_BASE),
+ paddr_to_pfn_aligned(OMAP5_PRM_BASE + (PAGE_SIZE * 2)),
+ paddr_to_pfn(OMAP5_PRM_BASE));
/* Map the PRM_MPU */
- map_mmio_regions(d, OMAP5_PRCM_MPU_BASE,
- OMAP5_PRCM_MPU_BASE + PAGE_SIZE - 1,
- OMAP5_PRCM_MPU_BASE);
+ map_mmio_regions(d, paddr_to_pfn(OMAP5_PRCM_MPU_BASE),
+ paddr_to_pfn_aligned(OMAP5_PRCM_MPU_BASE + PAGE_SIZE),
+ paddr_to_pfn(OMAP5_PRCM_MPU_BASE));
/* Map the Wakeup Gen */
- map_mmio_regions(d, OMAP5_WKUPGEN_BASE, OMAP5_WKUPGEN_BASE + PAGE_SIZE - 1,
- OMAP5_WKUPGEN_BASE);
+ map_mmio_regions(d, paddr_to_pfn(OMAP5_WKUPGEN_BASE),
+ paddr_to_pfn_aligned(OMAP5_WKUPGEN_BASE + PAGE_SIZE),
+ paddr_to_pfn(OMAP5_WKUPGEN_BASE));
/* Map the on-chip SRAM */
- map_mmio_regions(d, OMAP5_SRAM_PA, OMAP5_SRAM_PA + (PAGE_SIZE * 32) - 1,
- OMAP5_SRAM_PA);
+ map_mmio_regions(d, paddr_to_pfn(OMAP5_SRAM_PA),
+ paddr_to_pfn_aligned(OMAP5_SRAM_PA + (PAGE_SIZE * 32)),
+ paddr_to_pfn(OMAP5_SRAM_PA));
return 0;
}
printk("Additional MMIO %"PRIpaddr"-%"PRIpaddr" (%s)\n",
start, end, what);
- ret = map_mmio_regions(d, start, end, start);
+ ret = map_mmio_regions(d, paddr_to_pfn(start),
+ paddr_to_pfn_aligned(end),
+ paddr_to_pfn(start));
if ( ret )
printk("Failed to map %s @ %"PRIpaddr" to dom%d\n",
what, start, d->domain_id);
#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
+/* Page-align address and convert to frame number format */
+#define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr))
static inline paddr_t __virt_to_maddr(vaddr_t va)
{
/* Setup p2m RAM mapping for domain d from start-end. */
int p2m_populate_ram(struct domain *d, paddr_t start, paddr_t end);
-/* Map MMIO regions in the p2m: start_gaddr and end_gaddr is the range
- * in the guest physical address space to map, starting from the machine
- * address maddr. */
-int map_mmio_regions(struct domain *d, paddr_t start_gaddr,
- paddr_t end_gaddr, paddr_t maddr);
+/* Map MMIO regions in the p2m: start_gfn and end_gfn is the range in the guest
+ * physical address space to map, starting from the machine frame number mfn. */
+int map_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long end_gfn,
+ unsigned long mfn);
int guest_physmap_add_entry(struct domain *d,
unsigned long gfn,