/* Page-align address and convert to frame number format */
#define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr))
-static inline paddr_t __virt_to_maddr(vaddr_t va)
-{
- uint64_t par = va_to_par(va);
- return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
-}
-#define virt_to_maddr(va) __virt_to_maddr((vaddr_t)(va))
+#define virt_to_maddr(va) ({ \
+ vaddr_t va_ = (vaddr_t)(va); \
+ (va_to_par(va_) & PADDR_MASK & PAGE_MASK) | (va_ & ~PAGE_MASK); \
+})
#ifdef CONFIG_ARM_32
/**
/* shadow_io_bitmaps can't be declared static because
* they must fulfill hw requirements (page aligned section)
* and doing so triggers the ASSERT(va >= XEN_VIRT_START)
- * in __virt_to_maddr()
+ * in virt_to_maddr()
*
* So as a compromise pre-allocate them when xen boots.
* This function must be called from within start_xen() when
#define mfn_valid(mfn) __mfn_valid(mfn_x(mfn))
#define virt_to_mfn(va) __virt_to_mfn(va)
#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
-#define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va))
-#define maddr_to_virt(ma) __maddr_to_virt((unsigned long)(ma))
#define maddr_to_page(ma) __maddr_to_page(ma)
#define page_to_maddr(pg) __page_to_maddr(pg)
#define virt_to_page(va) __virt_to_page(va)
#define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
((unsigned long)(pdx) << PAGE_SHIFT)))
-static inline unsigned long __virt_to_maddr(unsigned long va)
+static inline unsigned long virt_to_maddr(unsigned long va)
{
ASSERT(va < DIRECTMAP_VIRT_END);
if ( va >= DIRECTMAP_VIRT_START )
return xen_phys_start + va - XEN_VIRT_START;
}
+#define virt_to_maddr(va) virt_to_maddr((unsigned long)(va))
-static inline void *__maddr_to_virt(unsigned long ma)
+static inline void *maddr_to_virt(unsigned long ma)
{
/* Offset in the direct map, accounting for pdx compression */
unsigned long va_offset = maddr_to_directmapoff(ma);