No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
#ifdef CONFIG_DOMAIN_PAGE
void *map_domain_page_global(unsigned long mfn)
{
- return vmap(&mfn, 1);
+ mfn_t m = _mfn(mfn);
+
+ return vmap(&m, 1);
}
void unmap_domain_page_global(const void *va)
*/
void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
{
- unsigned long pfn = PFN_DOWN(pa);
+ mfn_t mfn = _mfn(PFN_DOWN(pa));
unsigned int offs = pa & (PAGE_SIZE - 1);
unsigned int nr = PFN_UP(offs + len);
- void *ptr = __vmap(&pfn, nr, 1, 1, attributes);
+ void *ptr = __vmap(&mfn, nr, 1, 1, attributes);
if ( ptr == NULL )
return NULL;
void *map_domain_page_global(unsigned long mfn)
{
+ mfn_t m = _mfn(mfn);
ASSERT(!in_irq() && local_irq_is_enabled());
#ifdef NDEBUG
return mfn_to_virt(mfn);
#endif
- return vmap(&mfn, 1);
+ return vmap(&m, 1);
}
void unmap_domain_page_global(const void *ptr)
void __iomem *ioremap(paddr_t pa, size_t len)
{
- unsigned long pfn = PFN_DOWN(pa);
+ mfn_t mfn = _mfn(PFN_DOWN(pa));
void *va;
- WARN_ON(page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL));
+ WARN_ON(page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL));
/* The low first Mb is always mapped. */
if ( !((pa + len - 1) >> 20) )
unsigned int offs = pa & (PAGE_SIZE - 1);
unsigned int nr = PFN_UP(offs + len);
- va = __vmap(&pfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE) + offs;
+ va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE) + offs;
}
return (void __force __iomem *)va;
}
else
{
- unsigned long mfns[2];
+ mfn_t mfns[2];
/* Cross-page emulated writes are only supported for HVM guests;
* PV guests ought to know better */
/* Cross-page writes mean probably not a pagetable */
sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
- mfns[0] = mfn_x(sh_ctxt->mfn1);
- mfns[1] = mfn_x(sh_ctxt->mfn2);
+ mfns[0] = sh_ctxt->mfn1;
+ mfns[1] = sh_ctxt->mfn2;
map = vmap(mfns, 2);
if ( !map )
return MAPPING_UNHANDLEABLE;
spin_unlock(&vm_lock);
}
-void *__vmap(const unsigned long *mfn, unsigned int granularity,
+void *__vmap(const mfn_t *mfn, unsigned int granularity,
unsigned int nr, unsigned int align, unsigned int flags)
{
void *va = vm_alloc(nr * granularity, align);
for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
{
- if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
+ if ( map_pages_to_xen(cur, mfn_x(*mfn), granularity, flags) )
{
vunmap(va);
va = NULL;
return va;
}
-void *vmap(const unsigned long *mfn, unsigned int nr)
+void *vmap(const mfn_t *mfn, unsigned int nr)
{
return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR);
}
void *vmalloc(size_t size)
{
- unsigned long *mfn;
+ mfn_t *mfn;
size_t pages, i;
struct page_info *pg;
void *va;
ASSERT(size);
pages = PFN_UP(size);
- mfn = xmalloc_array(unsigned long, pages);
+ mfn = xmalloc_array(mfn_t, pages);
if ( mfn == NULL )
return NULL;
pg = alloc_domheap_page(NULL, 0);
if ( pg == NULL )
goto error;
- mfn[i] = page_to_mfn(pg);
+ mfn[i] = _mfn(page_to_mfn(pg));
}
va = vmap(mfn, pages);
error:
while ( i-- )
- free_domheap_page(mfn_to_page(mfn[i]));
+ free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
xfree(mfn);
return NULL;
}
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
if (system_state >= SYS_STATE_active) {
- unsigned long pfn = PFN_DOWN(phys);
+ mfn_t mfn = _mfn(PFN_DOWN(phys));
unsigned int offs = phys & (PAGE_SIZE - 1);
/* The low first Mb is always mapped. */
if ( !((phys + size - 1) >> 20) )
return __va(phys);
- return __vmap(&pfn, PFN_UP(offs + size), 1, 1, PAGE_HYPERVISOR_NOCACHE) + offs;
+ return __vmap(&mfn, PFN_UP(offs + size), 1, 1,
+ PAGE_HYPERVISOR_NOCACHE) + offs;
}
return __acpi_map_table(phys, size);
}
#if !defined(__XEN_VMAP_H__) && defined(VMAP_VIRT_START)
#define __XEN_VMAP_H__
-#include <xen/types.h>
+#include <xen/mm.h>
#include <asm/page.h>
void *vm_alloc(unsigned int nr, unsigned int align);
void vm_free(const void *);
-void *__vmap(const unsigned long *mfn, unsigned int granularity,
+void *__vmap(const mfn_t *mfn, unsigned int granularity,
unsigned int nr, unsigned int align, unsigned int flags);
-void *vmap(const unsigned long *mfn, unsigned int nr);
+void *vmap(const mfn_t *mfn, unsigned int nr);
void vunmap(const void *);
void *vmalloc(size_t size);
void *vzalloc(size_t size);