a->memflags = MEMF_bits(address_bits);
}
- a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
- if ( r->mem_flags & XENMEMF_exact_node_request )
- a->memflags |= MEMF_exact_node;
+ if ( r->mem_flags & XENMEMF_vnode )
+ {
+ nodeid_t vnode, pnode;
+ struct domain *d = a->domain;
+
+ read_lock(&d->vnuma_rwlock);
+ if ( d->vnuma )
+ {
+ vnode = XENMEMF_get_node(r->mem_flags);
+ if ( vnode >= d->vnuma->nr_vnodes )
+ {
+ read_unlock(&d->vnuma_rwlock);
+ return -EINVAL;
+ }
+
+ pnode = d->vnuma->vnode_to_pnode[vnode];
+ if ( pnode != NUMA_NO_NODE )
+ {
+ a->memflags |= MEMF_node(pnode);
+ if ( r->mem_flags & XENMEMF_exact_node_request )
+ a->memflags |= MEMF_exact_node;
+ }
+ }
+ read_unlock(&d->vnuma_rwlock);
+ }
+ else
+ {
+ a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
+ if ( r->mem_flags & XENMEMF_exact_node_request )
+ a->memflags |= MEMF_exact_node;
+ }
return 0;
}
if ( unlikely(start_extent >= reservation.nr_extents) )
return start_extent;
+ d = rcu_lock_domain_by_any_id(reservation.domid);
+ if ( d == NULL )
+ return start_extent;
+ args.domain = d;
+
if ( construct_memop_from_reservation(&reservation, &args) )
+ {
+ rcu_unlock_domain(d);
return start_extent;
- args.nr_done = start_extent;
- args.preempted = 0;
+ }
+ args.nr_done = start_extent;
+ args.preempted = 0;
if ( op == XENMEM_populate_physmap
&& (reservation.mem_flags & XENMEMF_populate_on_demand) )
args.memflags |= MEMF_populate_on_demand;
- d = rcu_lock_domain_by_any_id(reservation.domid);
- if ( d == NULL )
- return start_extent;
- args.domain = d;
-
if ( xsm_memory_adjust_reservation(XSM_TARGET, current->domain, d) )
{
rcu_unlock_domain(d);
/* Flag to request allocation only from the node specified */
#define XENMEMF_exact_node_request (1<<17)
#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
+/* Flag to indicate the node specified is virtual node */
+#define XENMEMF_vnode (1<<18)
#endif
struct xen_memory_reservation {