MEMF_node() sub-flag type.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
BUG_ON(!pte_none(*pte));
- p = alloc_domheap_page(d);
+ p = alloc_domheap_page(d, 0);
if (unlikely(!p)) {
printk("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
return(p);
unsigned long new_mfn;
int ret;
- new = alloc_domheap_page(d);
+ new = alloc_domheap_page(d, 0);
if (new == NULL) {
gdprintk(XENLOG_INFO, "alloc_domheap_page() failed\n");
return -1;
BUG_ON(dom_p2m == NULL);
if (!opt_p2m_xenheap) {
- struct page_info *page = alloc_domheap_page(dom_p2m);
+ struct page_info *page = alloc_domheap_page(dom_p2m, 0);
if (page == NULL)
return NULL;
p = page_to_virt(page);
__func__, tlb_track->num_entries, tlb_track->limit);
return -ENOMEM;
}
- entry_page = alloc_domheap_page(NULL);
+ entry_page = alloc_domheap_page(NULL, 0);
if (entry_page == NULL) {
dprintk(XENLOG_WARNING,
"%s: domheap page failed. num_entries %d limit %d\n",
if (tlb_track == NULL)
goto out;
- hash_page = alloc_domheap_page(NULL);
+ hash_page = alloc_domheap_page(NULL, 0);
if (hash_page == NULL)
goto out;
#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/nmi.h>
+#include <xen/numa.h>
#include <xen/iommu.h>
#ifdef CONFIG_COMPAT
#include <compat/vcpu.h>
if ( !d->arch.mm_arg_xlat_l3 )
{
- pg = alloc_domheap_page(NULL);
+ pg = alloc_domheap_page(NULL, 0);
if ( !pg )
return -ENOMEM;
d->arch.mm_arg_xlat_l3 = page_to_virt(pg);
if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) )
{
- pg = alloc_domheap_page(NULL);
+ pg = alloc_domheap_page(NULL, 0);
if ( !pg )
return -ENOMEM;
clear_page(page_to_virt(pg));
l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]);
if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) )
{
- pg = alloc_domheap_page(NULL);
+ pg = alloc_domheap_page(NULL, 0);
if ( !pg )
return -ENOMEM;
clear_page(page_to_virt(pg));
}
l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]);
BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)]));
- pg = alloc_domheap_page(NULL);
+ pg = alloc_domheap_page(NULL, 0);
if ( !pg )
return -ENOMEM;
l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR);
static int setup_compat_l4(struct vcpu *v)
{
- struct page_info *pg = alloc_domheap_page(NULL);
+ struct page_info *pg = alloc_domheap_page(NULL, 0);
l4_pgentry_t *l4tab;
int rc;
#else /* __x86_64__ */
- if ( (pg = alloc_domheap_page(NULL)) == NULL )
+ pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ if ( pg == NULL )
goto fail;
d->arch.mm_perdomain_l2 = page_to_virt(pg);
clear_page(d->arch.mm_perdomain_l2);
l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
__PAGE_HYPERVISOR);
- if ( (pg = alloc_domheap_page(NULL)) == NULL )
+ pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ if ( pg == NULL )
goto fail;
d->arch.mm_perdomain_l3 = page_to_virt(pg);
clear_page(d->arch.mm_perdomain_l3);
}
else
{
- page = alloc_domheap_page(NULL);
+ page = alloc_domheap_page(NULL, 0);
if ( !page )
panic("Not enough RAM for domain 0 PML4.\n");
l4start = l4tab = page_to_virt(page);
#include <xen/sched.h>
#include <xen/domain_page.h>
#include <asm/hvm/support.h>
+#include <xen/numa.h>
#define PAT(x) (x)
static const uint32_t mask16[16] = {
for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
{
- if ( (pg = alloc_domheap_page(NULL)) == NULL )
+ pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ if ( pg == NULL )
break;
s->vram_page[i] = pg;
p = map_domain_page(page_to_mfn(pg));
#include <xen/sched.h>
#include <asm/current.h>
#include <asm/hvm/vmx/vmx.h>
+#include <xen/numa.h>
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
int vlapic_init(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
- unsigned int memflags = 0;
+ unsigned int memflags = MEMF_node(vcpu_to_node(v));
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
#ifdef __i386__
/* 32-bit VMX may be limited to 32-bit physical addresses. */
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- memflags = MEMF_bits(32);
+ memflags |= MEMF_bits(32);
#endif
- vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
+ vlapic->regs_page = alloc_domheap_page(NULL, memflags);
if ( vlapic->regs_page == NULL )
{
dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
{
dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
v->domain->domain_id, v->vcpu_id);
- return -ENOMEM;
+ return -ENOMEM;
}
clear_page(vlapic->regs);
#include <asm/hap.h>
#include <asm/paging.h>
#include <asm/domain.h>
+#include <xen/numa.h>
#include "private.h"
&& mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
{
free_domheap_page(pg);
- pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
+ pg = alloc_domheap_page(
+ NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
if ( likely(pg != NULL) )
{
void *p = hap_map_domain_page(page_to_mfn(pg));
if ( d->arch.paging.hap.total_pages < pages )
{
/* Need to allocate more memory from domheap */
- pg = alloc_domheap_page(NULL);
+ pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
if ( pg == NULL )
{
HAP_PRINTK("failed to allocate hap pages.\n");
#include <asm/p2m.h>
#include <asm/hap.h>
#include <asm/guest_access.h>
+#include <xen/numa.h>
#include <xsm/xsm.h>
#define hap_enabled(d) (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
{
mfn_t mfn;
- struct page_info *page = alloc_domheap_page(NULL);
+ struct page_info *page;
+ page = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
if ( unlikely(page == NULL) )
{
d->arch.paging.log_dirty.failed_allocs++;
#include <asm/current.h>
#include <asm/flushtlb.h>
#include <asm/shadow.h>
+#include <xen/numa.h>
#include "private.h"
{
/* Need to allocate more memory from domheap */
sp = (struct shadow_page_info *)
- alloc_domheap_pages(NULL, order, 0);
+ alloc_domheap_pages(NULL, order, MEMF_node(domain_to_node(d)));
if ( sp == NULL )
{
SHADOW_PRINTK("failed to allocate shadow pages.\n");
if ( !early_boot )
{
- struct page_info *pg = alloc_domheap_page(NULL);
+ struct page_info *pg = alloc_domheap_page(NULL, 0);
BUG_ON(pg == NULL);
return page_to_virt(pg);
}
struct page_info *l1_pg, *l2_pg, *l3_pg;
/* Create user-accessible L2 directory to map the MPT for guests. */
- if ( (l3_pg = alloc_domheap_page(NULL)) == NULL )
+ if ( (l3_pg = alloc_domheap_page(NULL, 0)) == NULL )
goto nomem;
l3_ro_mpt = page_to_virt(l3_pg);
clear_page(l3_ro_mpt);
1UL << L2_PAGETABLE_SHIFT);
if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
{
- if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
+ if ( (l2_pg = alloc_domheap_page(NULL, 0)) == NULL )
goto nomem;
va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
l2_ro_mpt = page_to_virt(l2_pg);
l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(
HIRO_COMPAT_MPT_VIRT_START)]);
- if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
+ if ( (l2_pg = alloc_domheap_page(NULL, 0)) == NULL )
goto nomem;
compat_idle_pg_table_l2 = l2_ro_mpt = page_to_virt(l2_pg);
clear_page(l2_ro_mpt);
struct page_info *new_page;
void *sp, *dp;
- new_page = alloc_domheap_pages(NULL, 0, MEMF_bits(max_bitsize));
+ new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
if ( new_page == NULL )
{
gop.status = GNTST_address_too_big;
#include <xen/errno.h>
#include <asm/current.h>
#include <asm/hardirq.h>
+#include <xen/numa.h>
#include <public/memory.h>
#include <xsm/xsm.h>
int preempted; /* Was the hypercall preempted? */
};
-static unsigned int select_local_cpu(struct domain *d)
-{
- struct vcpu *v = d->vcpu[0];
- return (v ? v->processor : 0);
-}
-
static void increase_reservation(struct memop_args *a)
{
struct page_info *page;
unsigned long i;
xen_pfn_t mfn;
struct domain *d = a->domain;
- unsigned int cpu = select_local_cpu(d);
+ unsigned int node = domain_to_node(d);
if ( !guest_handle_is_null(a->extent_list) &&
!guest_handle_okay(a->extent_list, a->nr_extents) )
goto out;
}
- page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+ page = alloc_domheap_pages(
+ d, a->extent_order, a->memflags | MEMF_node(node));
if ( unlikely(page == NULL) )
{
gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
unsigned long i, j;
xen_pfn_t gpfn, mfn;
struct domain *d = a->domain;
- unsigned int cpu = select_local_cpu(d);
+ unsigned int node = domain_to_node(d);
if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
return;
if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
goto out;
- page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+ page = alloc_domheap_pages(
+ d, a->extent_order, a->memflags | MEMF_node(node));
if ( unlikely(page == NULL) )
{
gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
unsigned long in_chunk_order, out_chunk_order;
xen_pfn_t gpfn, gmfn, mfn;
unsigned long i, j, k;
- unsigned int memflags = 0, cpu;
+ unsigned int memflags = 0;
long rc = 0;
struct domain *d;
struct page_info *page;
memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
-
- cpu = select_local_cpu(d);
+ memflags |= MEMF_node(domain_to_node(d));
for ( i = (exch.nr_exchanged >> in_chunk_order);
i < (exch.in.nr_extents >> in_chunk_order);
/* Allocate a chunk's worth of anonymous output pages. */
for ( j = 0; j < (1UL << out_chunk_order); j++ )
{
- page = __alloc_domheap_pages(
- NULL, cpu, exch.out.extent_order, memflags);
+ page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
if ( unlikely(page == NULL) )
{
rc = -ENOMEM;
#include <xen/numa.h>
#include <xen/nodemask.h>
#include <asm/page.h>
+#include <asm/numa.h>
#include <asm/flushtlb.h>
/*
/* Allocate 2^@order contiguous pages. */
static struct page_info *alloc_heap_pages(
unsigned int zone_lo, unsigned int zone_hi,
- unsigned int cpu, unsigned int order)
+ unsigned int node, unsigned int order)
{
unsigned int i, j, zone;
- unsigned int node = cpu_to_node(cpu), num_nodes = num_online_nodes();
+ unsigned int num_nodes = num_online_nodes();
unsigned long request = 1UL << order;
cpumask_t extra_cpus_mask, mask;
struct page_info *pg;
+ if ( node == NUMA_NO_NODE )
+ node = cpu_to_node(smp_processor_id());
+
ASSERT(node >= 0);
ASSERT(node < num_nodes);
ASSERT(zone_lo <= zone_hi);
ASSERT(!in_irq());
- pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, smp_processor_id(), order);
+ pg = alloc_heap_pages(
+ MEMZONE_XEN, MEMZONE_XEN, cpu_to_node(smp_processor_id()), order);
if ( unlikely(pg == NULL) )
goto no_memory;
}
-struct page_info *__alloc_domheap_pages(
- struct domain *d, unsigned int cpu, unsigned int order,
- unsigned int memflags)
+struct page_info *alloc_domheap_pages(
+ struct domain *d, unsigned int order, unsigned int memflags)
{
struct page_info *pg = NULL;
unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
+ unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
ASSERT(!in_irq());
if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
{
- pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
+ pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, node, order);
/* Failure? Then check if we can fall back to the DMA pool. */
if ( unlikely(pg == NULL) &&
if ( (pg == NULL) &&
((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
- cpu, order)) == NULL) )
+ node, order)) == NULL) )
return NULL;
if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
return pg;
}
-struct page_info *alloc_domheap_pages(
- struct domain *d, unsigned int order, unsigned int flags)
-{
- return __alloc_domheap_pages(d, smp_processor_id(), order, flags);
-}
-
void free_domheap_pages(struct page_info *pg, unsigned int order)
{
int i, drop_dom_ref;
#include <xen/xmalloc.h>
#include <xen/domain_page.h>
#include <xen/iommu.h>
+#include <xen/numa.h>
#include "iommu.h"
#include "dmar.h"
#include "../pci-direct.h"
if ( dma_pte_addr(*pte) == 0 )
{
- pg = alloc_domheap_page(NULL);
+ pg = alloc_domheap_page(
+ NULL, MEMF_node(domain_to_node(domain)));
vaddr = map_domain_page(page_to_mfn(pg));
if ( !vaddr )
{
#define clear_node_cpumask(cpu) do {} while (0)
#endif
-#define NUMA_NO_NODE 0xff
#endif
void init_domheap_pages(paddr_t ps, paddr_t pe);
struct page_info *alloc_domheap_pages(
struct domain *d, unsigned int order, unsigned int memflags);
-struct page_info *__alloc_domheap_pages(
- struct domain *d, unsigned int cpu, unsigned int order,
- unsigned int memflags);
void free_domheap_pages(struct page_info *pg, unsigned int order);
unsigned long avail_domheap_pages_region(
unsigned int node, unsigned int min_width, unsigned int max_width);
unsigned long avail_domheap_pages(void);
-#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
+#define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
#define free_domheap_page(p) (free_domheap_pages(p,0))
void scrub_heap_pages(void);
/* memflags: */
#define _MEMF_no_refcount 0
#define MEMF_no_refcount (1U<<_MEMF_no_refcount)
+#define _MEMF_node 8
+#define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
#define _MEMF_bits 24
#define MEMF_bits(n) ((n)<<_MEMF_bits)
#define NODES_SHIFT 0
#endif
+#define NUMA_NO_NODE 0xFF
+
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define vcpu_to_node(v) (cpu_to_node[(v)->processor])
+
+#define domain_to_node(d) \
+ (((d)->vcpu[0] != NULL) ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
+
#endif /* _XEN_NUMA_H */