#define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
#endif
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+
/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
* e.g. 'badpage=0x3f45,0x8a321'.
* first_valid_mfn is exported because it is use in ARM specific NUMA
* helpers. See comment in asm-arm/numa.h.
*/
-unsigned long first_valid_mfn = ~0UL;
+mfn_t first_valid_mfn = INVALID_MFN_INITIALIZER;
static struct bootmem_region {
unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */
if ( pe <= ps )
return;
- first_valid_mfn = min_t(unsigned long, ps >> PAGE_SHIFT, first_valid_mfn);
+ first_valid_mfn = mfn_min(maddr_to_mfn(ps), first_valid_mfn);
bootmem_region_add(ps >> PAGE_SHIFT, pe >> PAGE_SHIFT);
#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 1 : ((b) - PAGE_SHIFT))
#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN : \
- (flsl(page_to_mfn(pg)) ? : 1))
+ (flsl(mfn_x(page_to_mfn(pg))) ? : 1))
typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
static void poison_one_page(struct page_info *pg)
{
#ifdef CONFIG_SCRUB_DEBUG
- mfn_t mfn = _mfn(page_to_mfn(pg));
+ mfn_t mfn = page_to_mfn(pg);
uint64_t *ptr;
if ( !scrub_debug )
static void check_one_page(struct page_info *pg)
{
#ifdef CONFIG_SCRUB_DEBUG
- mfn_t mfn = _mfn(page_to_mfn(pg));
+ mfn_t mfn = page_to_mfn(pg);
const uint64_t *ptr;
unsigned int i;
/* Ensure cache and RAM are consistent for platforms where the
* guest can control its own visibility of/through the cache.
*/
- flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags & MEMF_no_icache_flush));
+ flush_page_to_ram(mfn_x(page_to_mfn(&pg[i])),
+ !(memflags & MEMF_no_icache_flush));
}
spin_unlock(&heap_lock);
static void free_heap_pages(
struct page_info *pg, unsigned int order, bool need_scrub)
{
- unsigned long mask, mfn = page_to_mfn(pg);
+ unsigned long mask;
+ mfn_t mfn = page_to_mfn(pg);
unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
unsigned int zone = page_to_zone(pg);
/* This page is not a guest frame any more. */
page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
- set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
+ set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
if ( need_scrub )
{
{
mask = 1UL << order;
- if ( (page_to_mfn(pg) & mask) )
+ if ( (mfn_x(page_to_mfn(pg)) & mask) )
{
struct page_info *predecessor = pg - mask;
/* Merge with predecessor block? */
- if ( !mfn_valid(_mfn(page_to_mfn(predecessor))) ||
+ if ( !mfn_valid(page_to_mfn(predecessor)) ||
!page_state_is(predecessor, free) ||
(PFN_ORDER(predecessor) != order) ||
(phys_to_nid(page_to_maddr(predecessor)) != node) )
struct page_info *successor = pg + mask;
/* Merge with successor block? */
- if ( !mfn_valid(_mfn(page_to_mfn(successor))) ||
+ if ( !mfn_valid(page_to_mfn(successor)) ||
!page_state_is(successor, free) ||
(PFN_ORDER(successor) != order) ||
(phys_to_nid(page_to_maddr(successor)) != node) )
{
unsigned long nx, x, y = pg->count_info;
- ASSERT(page_is_ram_type(page_to_mfn(pg), RAM_TYPE_CONVENTIONAL));
+ ASSERT(page_is_ram_type(mfn_x(page_to_mfn(pg)), RAM_TYPE_CONVENTIONAL));
ASSERT(spin_is_locked(&heap_lock));
do {
}
*status = 0;
- pg = mfn_to_page(mfn);
+ pg = mfn_to_page(_mfn(mfn));
if ( is_xen_fixed_mfn(mfn) )
{
return -EINVAL;
}
- pg = mfn_to_page(mfn);
+ pg = mfn_to_page(_mfn(mfn));
spin_lock(&heap_lock);
*status = 0;
spin_lock(&heap_lock);
- pg = mfn_to_page(mfn);
+ pg = mfn_to_page(_mfn(mfn));
if ( page_state_is(pg, offlining) )
*status |= PG_OFFLINE_STATUS_OFFLINE_PENDING;
* Update first_valid_mfn to ensure those regions are covered.
*/
spin_lock(&heap_lock);
- first_valid_mfn = min_t(unsigned long, page_to_mfn(pg), first_valid_mfn);
+ first_valid_mfn = mfn_min(page_to_mfn(pg), first_valid_mfn);
spin_unlock(&heap_lock);
for ( i = 0; i < nr_pages; i++ )
if ( unlikely(!avail[nid]) )
{
- unsigned long s = page_to_mfn(pg + i);
- unsigned long e = page_to_mfn(pg + nr_pages - 1) + 1;
+ unsigned long s = mfn_x(page_to_mfn(pg + i));
+ unsigned long e = mfn_x(mfn_add(page_to_mfn(pg + nr_pages - 1), 1));
bool_t use_tail = (nid == phys_to_nid(pfn_to_paddr(e - 1))) &&
!(s & ((1UL << MAX_ORDER) - 1)) &&
(find_first_set_bit(e) <= find_first_set_bit(s));
unsigned long n;
- n = init_node_heap(nid, page_to_mfn(pg+i), nr_pages - i,
+ n = init_node_heap(nid, mfn_x(page_to_mfn(pg + i)), nr_pages - i,
&use_tail);
BUG_ON(i + n > nr_pages);
if ( n && !use_tail )
if ( (r->s < r->e) &&
(phys_to_nid(pfn_to_paddr(r->s)) == cpu_to_node(0)) )
{
- init_heap_pages(mfn_to_page(r->s), r->e - r->s);
+ init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
r->e = r->s;
break;
}
{
struct bootmem_region *r = &bootmem_region_list[i];
if ( r->s < r->e )
- init_heap_pages(mfn_to_page(r->s), r->e - r->s);
+ init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
}
nr_bootmem_regions = 0;
init_heap_pages(virt_to_page(bootmem_region_list), 1);
for ( mfn = start; mfn < end; mfn++ )
{
- pg = mfn_to_page(mfn);
+ pg = mfn_to_page(_mfn(mfn));
/* Check the mfn is valid and page is free. */
if ( !mfn_valid(_mfn(mfn)) || !page_state_is(pg, free) )
if ( !node_spanned_pages(i) )
continue;
/* Calculate Node memory start and end address. */
- start = max(node_start_pfn(i), first_valid_mfn);
+ start = max(node_start_pfn(i), mfn_x(first_valid_mfn));
end = min(node_start_pfn(i) + node_spanned_pages(i), max_page);
/* Just in case NODE has 1 page and starts below first_valid_mfn. */
end = max(end, start);
void init_domheap_pages(paddr_t ps, paddr_t pe)
{
- unsigned long smfn, emfn;
+ mfn_t smfn, emfn;
ASSERT(!in_irq());
- smfn = round_pgup(ps) >> PAGE_SHIFT;
- emfn = round_pgdown(pe) >> PAGE_SHIFT;
+ smfn = maddr_to_mfn(round_pgup(ps));
+ emfn = maddr_to_mfn(round_pgup(pe));
- if ( emfn <= smfn )
+ if ( mfn_x(emfn) <= mfn_x(smfn) )
return;
- init_heap_pages(mfn_to_page(smfn), emfn - smfn);
+ init_heap_pages(mfn_to_page(smfn), mfn_x(emfn) - mfn_x(smfn));
}