if ( need_mapping )
{
res = map_regions_p2mt(d,
- _gfn(paddr_to_pfn(addr)),
+ gaddr_to_gfn(addr),
PFN_UP(len),
- _mfn(paddr_to_pfn(addr)),
+ maddr_to_mfn(addr),
mr_data->p2mt);
if ( res < 0 )
addr = acpi_gbl_root_table_list.tables[i].address;
size = acpi_gbl_root_table_list.tables[i].length;
res = map_regions_p2mt(d,
- _gfn(paddr_to_pfn(addr)),
+ gaddr_to_gfn(addr),
PFN_UP(size),
- _mfn(paddr_to_pfn(addr)),
+ maddr_to_mfn(addr),
p2m_mmio_direct_c);
if ( res )
{
/* Map the EFI and ACPI tables to Dom0 */
rc = map_regions_p2mt(d,
- _gfn(paddr_to_pfn(d->arch.efi_acpi_gpa)),
+ gaddr_to_gfn(d->arch.efi_acpi_gpa),
PFN_UP(d->arch.efi_acpi_len),
_mfn(virt_to_mfn(d->arch.efi_acpi_table)),
p2m_mmio_direct_c);
return;
}
- dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
+ dst = map_domain_page(maddr_to_mfn(ma));
copy_from_paddr(dst + s, paddr + offs, l);
d->domain_id, v2m_data->addr, v2m_data->size,
v2m_data->spi_start, v2m_data->nr_spis);
- ret = map_mmio_regions(d, _gfn(paddr_to_pfn(v2m_data->addr)),
+ ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
PFN_UP(v2m_data->size),
- _mfn(paddr_to_pfn(v2m_data->addr)));
+ maddr_to_mfn(v2m_data->addr));
if ( ret )
{
printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
return;
}
- dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
+ dst = map_domain_page(maddr_to_mfn(ma));
copy_from_paddr(dst + s, paddr + offs, l);
if ( rc < 0 )
goto err;
- gfn = _gfn(paddr_to_pfn(ipa));
+ gfn = gaddr_to_gfn(ipa);
/*
* We do this first as this is faster in the default case when no
if ( !p2m->mem_access_enabled )
return true;
- rc = p2m_get_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), &xma);
+ rc = p2m_get_mem_access(v->domain, gaddr_to_gfn(gpa), &xma);
if ( rc )
return true;
/* First, handle rx2rw and n2rwx conversion automatically. */
if ( npfec.write_access && xma == XENMEM_access_rx2rw )
{
- rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
+ rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
0, ~0, XENMEM_access_rw, 0);
return false;
}
else if ( xma == XENMEM_access_n2rwx )
{
- rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
+ rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
0, ~0, XENMEM_access_rwx, 0);
}
{
/* A listener is not required, so clear the access
* restrictions. */
- rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
+ rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
0, ~0, XENMEM_access_rwx, 0);
}
}
break;
}
- ASSERT(!(pfn_to_paddr(mfn_x(mfn)) & ~PADDR_MASK));
+ ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK));
e.pt.base = mfn_x(mfn);
static inline lpae_t pte_of_xenaddr(vaddr_t va)
{
paddr_t ma = va + phys_offset;
- unsigned long mfn = ma >> PAGE_SHIFT;
- return mfn_to_xen_entry(_mfn(mfn), WRITEALLOC);
+
+ return mfn_to_xen_entry(maddr_to_mfn(ma), WRITEALLOC);
}
/* Map the FDT in the early boot page table */
/* Initialise xen second level entries ... */
/* ... Xen's text etc */
- pte = mfn_to_xen_entry(_mfn(xen_paddr>>PAGE_SHIFT), WRITEALLOC);
+ pte = mfn_to_xen_entry(maddr_to_mfn(xen_paddr), WRITEALLOC);
pte.pt.xn = 0;/* Contains our text mapping! */
xen_second[second_table_offset(XEN_VIRT_START)] = pte;
/* ... Boot Misc area for xen relocation */
dest_va = BOOT_RELOC_VIRT_START;
- pte = mfn_to_xen_entry(_mfn(xen_paddr >> PAGE_SHIFT), WRITEALLOC);
+ pte = mfn_to_xen_entry(maddr_to_mfn(xen_paddr), WRITEALLOC);
/* Map the destination in xen_second. */
xen_second[second_table_offset(dest_va)] = pte;
/* Map the destination in boot_second. */
/* Break up the Xen mapping into 4k pages and protect them separately. */
for ( i = 0; i < LPAE_ENTRIES; i++ )
{
- unsigned long mfn = paddr_to_pfn(xen_paddr) + i;
+ mfn_t mfn = mfn_add(maddr_to_mfn(xen_paddr), i);
unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);
if ( !is_kernel(va) )
break;
- pte = mfn_to_xen_entry(_mfn(mfn), WRITEALLOC);
+ pte = mfn_to_xen_entry(mfn, WRITEALLOC);
pte.pt.table = 1; /* 4k mappings always have this bit set */
if ( is_kernel_text(va) || is_kernel_inittext(va) )
{
p2m_type_t *t, p2m_access_t *a,
unsigned int *page_order)
{
- paddr_t addr = pfn_to_paddr(gfn_x(gfn));
+ paddr_t addr = gfn_to_gaddr(gfn);
unsigned int level = 0;
lpae_t entry, *table;
int rc;
p2m_set_permission(&e, t, a);
- ASSERT(!(pfn_to_paddr(mfn_x(mfn)) & ~PADDR_MASK));
+ ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK));
e.p2m.base = mfn_x(mfn);
p2m_type_t t,
p2m_access_t a)
{
- paddr_t addr = pfn_to_paddr(gfn_x(sgfn));
+ paddr_t addr = gfn_to_gaddr(sgfn);
unsigned int level = 0;
unsigned int target = 3 - (page_order / LPAE_SHIFT);
lpae_t *entry, *table, orig_pte;
if ( rc )
goto err;
- if ( !mfn_valid(_mfn(maddr >> PAGE_SHIFT)) )
+ if ( !mfn_valid(maddr_to_mfn(maddr)) )
goto err;
- page = mfn_to_page(maddr >> PAGE_SHIFT);
+ page = mfn_to_page(mfn_x(maddr_to_mfn(maddr)));
ASSERT(page);
if ( unlikely(!get_page(page, d)) )
static int exynos5250_specific_mapping(struct domain *d)
{
/* Map the chip ID */
- map_mmio_regions(d, _gfn(paddr_to_pfn(EXYNOS5_PA_CHIPID)), 1,
- _mfn(paddr_to_pfn(EXYNOS5_PA_CHIPID)));
+ map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
+ maddr_to_mfn(EXYNOS5_PA_CHIPID));
/* Map the PWM region */
- map_mmio_regions(d, _gfn(paddr_to_pfn(EXYNOS5_PA_TIMER)), 2,
- _mfn(paddr_to_pfn(EXYNOS5_PA_TIMER)));
+ map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
+ maddr_to_mfn(EXYNOS5_PA_TIMER));
return 0;
}
static int omap5_specific_mapping(struct domain *d)
{
/* Map the PRM module */
- map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_PRM_BASE)), 2,
- _mfn(paddr_to_pfn(OMAP5_PRM_BASE)));
+ map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
+ maddr_to_mfn(OMAP5_PRM_BASE));
/* Map the PRM_MPU */
- map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_PRCM_MPU_BASE)), 1,
- _mfn(paddr_to_pfn(OMAP5_PRCM_MPU_BASE)));
+ map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
+ maddr_to_mfn(OMAP5_PRCM_MPU_BASE));
/* Map the Wakeup Gen */
- map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_WKUPGEN_BASE)), 1,
- _mfn(paddr_to_pfn(OMAP5_WKUPGEN_BASE)));
+ map_mmio_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
+ maddr_to_mfn(OMAP5_WKUPGEN_BASE));
/* Map the on-chip SRAM */
- map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_SRAM_PA)), 32,
- _mfn(paddr_to_pfn(OMAP5_SRAM_PA)));
+ map_mmio_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
+ maddr_to_mfn(OMAP5_SRAM_PA));
return 0;
}
uint32_t *first = NULL, *second = NULL;
mfn_t mfn;
- mfn = gfn_to_mfn(d, _gfn(paddr_to_pfn(ttbr0)));
+ mfn = gfn_to_mfn(d, gaddr_to_gfn(ttbr0));
printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
printk(" TTBCR: 0x%08"PRIregister"\n", ttbcr);
printk(" TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n",
- ttbr0, pfn_to_paddr(mfn_x(mfn)));
+ ttbr0, mfn_to_maddr(mfn));
if ( ttbcr & TTBCR_EAE )
{
offset = addr >> (12+8);
printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
- offset, pfn_to_paddr(mfn_x(mfn)), first[offset]);
+ offset, mfn_to_maddr(mfn), first[offset]);
if ( !(first[offset] & 0x1) ||
(first[offset] & 0x2) )
goto done;
- mfn = gfn_to_mfn(d, _gfn(paddr_to_pfn(first[offset])));
+ mfn = gfn_to_mfn(d, gaddr_to_gfn(first[offset]));
if ( mfn_eq(mfn, INVALID_MFN) )
{
second = map_domain_page(mfn);
offset = (addr >> 12) & 0x3FF;
printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
- offset, pfn_to_paddr(mfn_x(mfn)), second[offset]);
+ offset, mfn_to_maddr(mfn), second[offset]);
done:
if (second) unmap_domain_page(second);
* with the Stage-2 page table. Walk the Stage-2 PT to check
* if the entry exists. If it's the case, return to the guest
*/
- mfn = gfn_to_mfn(current->domain, _gfn(paddr_to_pfn(info.gpa)));
+ mfn = gfn_to_mfn(current->domain, gaddr_to_gfn(info.gpa));
if ( !mfn_eq(mfn, INVALID_MFN) )
return;
- if ( try_map_mmio(_gfn(paddr_to_pfn(info.gpa))) )
+ if ( try_map_mmio(gaddr_to_gfn(info.gpa)) )
return;
break;
* Map the gic virtual cpu interface in the gic cpu interface
* region of the guest.
*/
- ret = map_mmio_regions(d, _gfn(paddr_to_pfn(cbase)), csize / PAGE_SIZE,
- _mfn(paddr_to_pfn(vbase)));
+ ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
+ maddr_to_mfn(vbase));
if ( ret )
return ret;