Add more safety when using xenheap_mfn_*.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
static paddr_t phys_offset;
/* Limits of the Xen heap */
-unsigned long xenheap_mfn_start __read_mostly = ~0UL;
-unsigned long xenheap_mfn_end __read_mostly;
+mfn_t xenheap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
+mfn_t xenheap_mfn_end __read_mostly;
vaddr_t xenheap_virt_end __read_mostly;
#ifdef CONFIG_ARM_64
vaddr_t xenheap_virt_start __read_mostly;
/* Record where the xenheap is, for translation routines. */
xenheap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
- xenheap_mfn_start = base_mfn;
- xenheap_mfn_end = base_mfn + nr_mfns;
+ xenheap_mfn_start = _mfn(base_mfn);
+ xenheap_mfn_end = _mfn(base_mfn + nr_mfns);
}
#else /* CONFIG_ARM_64 */
void __init setup_xenheap_mappings(unsigned long base_mfn,
mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1);
/* First call sets the xenheap physical and virtual offset. */
- if ( xenheap_mfn_start == ~0UL )
+ if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
{
- xenheap_mfn_start = base_mfn;
+ xenheap_mfn_start = _mfn(base_mfn);
xenheap_virt_start = DIRECTMAP_VIRT_START +
(base_mfn - mfn) * PAGE_SIZE;
}
- if ( base_mfn < xenheap_mfn_start )
+ if ( base_mfn < mfn_x(xenheap_mfn_start) )
panic("cannot add xenheap mapping at %lx below heap start %lx",
- base_mfn, xenheap_mfn_start);
+ base_mfn, mfn_x(xenheap_mfn_start));
end_mfn = base_mfn + nr_mfns;
* and enough mapped pages for copying the DTB.
*/
dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
- boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
- boot_mfn_end = xenheap_mfn_end;
+ boot_mfn_start = mfn_x(xenheap_mfn_end) - dtb_pages - 1;
+ boot_mfn_end = mfn_x(xenheap_mfn_end);
init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));
e = bank_end;
/* Avoid the xenheap */
- if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
- && pfn_to_paddr(xenheap_mfn_start) < e )
+ if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages))
+ && mfn_to_maddr(xenheap_mfn_start) < e )
{
- e = pfn_to_paddr(xenheap_mfn_start);
- n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
+ e = mfn_to_maddr(xenheap_mfn_start);
+ n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages));
}
dt_unreserved_regions(s, e, init_boot_pages, 0);
/* Add xenheap memory that was not already added to the boot
allocator. */
- init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
+ init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
pfn_to_paddr(boot_mfn_start));
}
#else /* CONFIG_ARM_64 */
total_pages += ram_size >> PAGE_SHIFT;
xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
- xenheap_mfn_start = ram_start >> PAGE_SHIFT;
- xenheap_mfn_end = ram_end >> PAGE_SHIFT;
+ xenheap_mfn_start = maddr_to_mfn(ram_start);
+ xenheap_mfn_end = maddr_to_mfn(ram_end);
/*
* Need enough mapped pages for copying the DTB.
#define PGC_count_width PG_shift(9)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
-extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
+extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
extern vaddr_t xenheap_virt_end;
#ifdef CONFIG_ARM_64
extern vaddr_t xenheap_virt_start;
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
#define is_xen_heap_mfn(mfn) ({ \
unsigned long _mfn = (mfn); \
- (_mfn >= xenheap_mfn_start && _mfn < xenheap_mfn_end); \
+ (_mfn >= mfn_x(xenheap_mfn_start) && \
+ _mfn < mfn_x(xenheap_mfn_end)); \
})
#else
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
static inline void *maddr_to_virt(paddr_t ma)
{
ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
- ma -= pfn_to_paddr(xenheap_mfn_start);
+ ma -= mfn_to_maddr(xenheap_mfn_start);
return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
}
#else
{
ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
return (void *)(XENHEAP_VIRT_START -
- pfn_to_paddr(xenheap_mfn_start) +
+ mfn_to_maddr(xenheap_mfn_start) +
((ma & ma_va_bottom_mask) |
((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
}
ASSERT(va < xenheap_virt_end);
pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
- pdx += pfn_to_pdx(xenheap_mfn_start);
+ pdx += pfn_to_pdx(mfn_x(xenheap_mfn_start));
return frame_table + pdx - frametable_base_pdx;
}