for_each_online_node ( i )
{
- paddr_t pa = pfn_to_paddr(node_start_pfn(i) + 1);
+ mfn_t mfn = _mfn(node_start_pfn(i) + 1);
printk("NODE%u start->%lu size->%lu free->%lu\n",
i, node_start_pfn(i), node_spanned_pages(i),
avail_node_heap_pages(i));
- /* Sanity check phys_to_nid() */
- if ( phys_to_nid(pa) != i )
- printk("phys_to_nid(%"PRIpaddr") -> %d should be %u\n",
- pa, phys_to_nid(pa), i);
+ /* Sanity check mfn_to_nid() */
+ if ( node_spanned_pages(i) > 1 && mfn_to_nid(mfn) != i )
+ printk("mfn_to_nid(%"PRI_mfn") -> %d should be %u\n",
+ mfn_x(mfn), mfn_to_nid(mfn), i);
}
j = cpumask_first(&cpu_online_map);
spin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
- i = phys_to_nid(page_to_maddr(page));
+ i = page_to_nid(page);
page_num_node[i]++;
}
spin_unlock(&d->page_alloc_lock);
return NULL;
}
- node = phys_to_nid(page_to_maddr(pg));
+ node = page_to_nid(pg);
zone = page_to_zone(pg);
buddy_order = PFN_ORDER(pg);
/* Remove any offlined page in the buddy pointed to by head. */
static int reserve_offlined_page(struct page_info *head)
{
- unsigned int node = phys_to_nid(page_to_maddr(head));
+ unsigned int node = page_to_nid(head);
int zone = page_to_zone(head), i, head_order = PFN_ORDER(head), count = 0;
struct page_info *cur_head;
unsigned int cur_order, first_dirty;
{
unsigned long mask;
mfn_t mfn = page_to_mfn(pg);
- unsigned int i, node = phys_to_nid(mfn_to_maddr(mfn));
+ unsigned int i, node = mfn_to_nid(mfn);
unsigned int zone = page_to_zone(pg);
bool pg_offlined = false;
!page_state_is(predecessor, free) ||
(predecessor->count_info & PGC_static) ||
(PFN_ORDER(predecessor) != order) ||
- (phys_to_nid(page_to_maddr(predecessor)) != node) )
+ (page_to_nid(predecessor) != node) )
break;
check_and_stop_scrub(predecessor);
!page_state_is(successor, free) ||
(successor->count_info & PGC_static) ||
(PFN_ORDER(successor) != order) ||
- (phys_to_nid(page_to_maddr(successor)) != node) )
+ (page_to_nid(successor) != node) )
break;
check_and_stop_scrub(successor);
static int reserve_heap_page(struct page_info *pg)
{
struct page_info *head = NULL;
- unsigned int i, node = phys_to_nid(page_to_maddr(pg));
+ unsigned int i, node = page_to_nid(pg);
unsigned int zone = page_to_zone(pg);
for ( i = 0; i <= MAX_ORDER; i++ )
bool need_scrub)
{
unsigned long s, e;
- unsigned int nid = phys_to_nid(page_to_maddr(pg));
+ unsigned int nid = page_to_nid(pg);
s = mfn_x(page_to_mfn(pg));
e = mfn_x(mfn_add(page_to_mfn(pg + nr_pages - 1), 1));
#ifdef CONFIG_SEPARATE_XENHEAP
unsigned int zone = page_to_zone(pg);
#endif
- unsigned int nid = phys_to_nid(page_to_maddr(pg));
+ unsigned int nid = page_to_nid(pg);
unsigned long left = nr_pages - i;
unsigned long contig_pages;
break;
#endif
- if ( nid != (phys_to_nid(page_to_maddr(pg + contig_pages))) )
+ if ( nid != (page_to_nid(pg + contig_pages)) )
break;
}
{
struct bootmem_region *r = &bootmem_region_list[i];
if ( (r->s < r->e) &&
- (phys_to_nid(pfn_to_paddr(r->s)) == cpu_to_node(0)) )
+ (mfn_to_nid(_mfn(r->s)) == cpu_to_node(0)) )
{
init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
r->e = r->s;
#ifndef _XEN_NUMA_H
#define _XEN_NUMA_H
+#include <xen/mm-frame.h>
#include <asm/numa.h>
#define NUMA_NO_NODE 0xFF
extern struct node_data node_data[];
-static inline nodeid_t __attribute_pure__ phys_to_nid(paddr_t addr)
+static inline nodeid_t mfn_to_nid(mfn_t mfn)
{
nodeid_t nid;
- ASSERT((paddr_to_pdx(addr) >> memnode_shift) < memnodemapsize);
- nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift];
+ unsigned long pdx = mfn_to_pdx(mfn);
+
+ ASSERT((pdx >> memnode_shift) < memnodemapsize);
+ nid = memnodemap[pdx >> memnode_shift];
ASSERT(nid < MAX_NUMNODES && node_data[nid].node_spanned_pages);
+
return nid;
}
paddr_t start, paddr_t size, bool hotplug);
extern void numa_set_processor_nodes_parsed(nodeid_t node);
+#else
+
+static inline nodeid_t mfn_to_nid(mfn_t mfn)
+{
+ return 0;
+}
+
#endif
+#define page_to_nid(pg) mfn_to_nid(page_to_mfn(pg))
+
#endif /* _XEN_NUMA_H */