return vmap(&mfn, 1);
}
-void unmap_domain_page_global(const void *va)
+void unmap_domain_page_global(const void *ptr)
{
- vunmap(va);
+ vunmap(ptr);
}
/* Map a page of domheap memory */
}
/* Release a mapping taken with map_domain_page() */
-void unmap_domain_page(const void *va)
+void unmap_domain_page(const void *ptr)
{
unsigned long flags;
lpae_t *map = this_cpu(xen_dommap);
- int slot = ((unsigned long) va - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
+ int slot = ((unsigned long)ptr - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
- if ( !va )
+ if ( !ptr )
return;
local_irq_save(flags);
* Pass a VA within a page previously mapped in the context of the
* currently-executing VCPU via a call to map_domain_page().
*/
-void unmap_domain_page(const void *va);
+void unmap_domain_page(const void *ptr);
-/*
+/*
* Given a VA from map_domain_page(), return its underlying MFN.
*/
-mfn_t domain_page_map_to_mfn(const void *va);
+mfn_t domain_page_map_to_mfn(const void *ptr);
/*
* Similar to the above calls, except the mapping is accessible in all
* mappings can also be unmapped from any context.
*/
void *map_domain_page_global(mfn_t mfn);
-void unmap_domain_page_global(const void *va);
+void unmap_domain_page_global(const void *ptr);
#define __map_domain_page(pg) map_domain_page(page_to_mfn(pg))
#define map_domain_page(mfn) __mfn_to_virt(mfn_x(mfn))
#define __map_domain_page(pg) page_to_virt(pg)
-#define unmap_domain_page(va) ((void)(va))
-#define domain_page_map_to_mfn(va) _mfn(__virt_to_mfn((unsigned long)(va)))
+#define unmap_domain_page(ptr) ((void)(ptr))
+#define domain_page_map_to_mfn(ptr) _mfn(__virt_to_mfn((unsigned long)(ptr)))
static inline void *map_domain_page_global(mfn_t mfn)
{