int rc;
xen_pfn_t pfn, allocsz, mfn, total, pfn_base;
int i, j;
+ xen_vmemrange_t dummy_vmemrange[1];
+ unsigned int dummy_vnode_to_pnode[1];
+ xen_vmemrange_t *vmemranges;
+ unsigned int *vnode_to_pnode;
+ unsigned int nr_vmemranges, nr_vnodes;
rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
if ( rc )
*/
if ( dom->nr_vmemranges == 0 )
{
- dom->nr_vmemranges = 1;
- dom->vmemranges = xc_dom_malloc(dom, sizeof(*dom->vmemranges));
- dom->vmemranges[0].start = 0;
- dom->vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT;
- dom->vmemranges[0].flags = 0;
- dom->vmemranges[0].nid = 0;
-
- dom->nr_vnodes = 1;
- dom->vnode_to_pnode = xc_dom_malloc(dom,
- sizeof(*dom->vnode_to_pnode));
- dom->vnode_to_pnode[0] = XC_NUMA_NO_NODE;
+ nr_vmemranges = 1;
+ vmemranges = dummy_vmemrange;
+ vmemranges[0].start = 0;
+ vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT;
+ vmemranges[0].flags = 0;
+ vmemranges[0].nid = 0;
+
+ nr_vnodes = 1;
+ vnode_to_pnode = dummy_vnode_to_pnode;
+ vnode_to_pnode[0] = XC_NUMA_NO_NODE;
+ }
+ else
+ {
+ nr_vmemranges = dom->nr_vmemranges;
+ nr_vnodes = dom->nr_vnodes;
+ vmemranges = dom->vmemranges;
+ vnode_to_pnode = dom->vnode_to_pnode;
}
total = dom->p2m_size = 0;
- for ( i = 0; i < dom->nr_vmemranges; i++ )
+ for ( i = 0; i < nr_vmemranges; i++ )
{
- total += ((dom->vmemranges[i].end - dom->vmemranges[i].start)
+ total += ((vmemranges[i].end - vmemranges[i].start)
>> PAGE_SHIFT);
dom->p2m_size =
- dom->p2m_size > (dom->vmemranges[i].end >> PAGE_SHIFT) ?
- dom->p2m_size : (dom->vmemranges[i].end >> PAGE_SHIFT);
+ dom->p2m_size > (vmemranges[i].end >> PAGE_SHIFT) ?
+ dom->p2m_size : (vmemranges[i].end >> PAGE_SHIFT);
}
if ( total != dom->total_pages )
{
dom->p2m_host[pfn] = INVALID_P2M_ENTRY;
/* allocate guest memory */
- for ( i = 0; i < dom->nr_vmemranges; i++ )
+ for ( i = 0; i < nr_vmemranges; i++ )
{
unsigned int memflags;
uint64_t pages;
- unsigned int pnode = dom->vnode_to_pnode[dom->vmemranges[i].nid];
+ unsigned int pnode = vnode_to_pnode[vmemranges[i].nid];
memflags = 0;
if ( pnode != XC_NUMA_NO_NODE )
memflags |= XENMEMF_exact_node(pnode);
- pages = (dom->vmemranges[i].end - dom->vmemranges[i].start)
+ pages = (vmemranges[i].end - vmemranges[i].start)
>> PAGE_SHIFT;
- pfn_base = dom->vmemranges[i].start >> PAGE_SHIFT;
+ pfn_base = vmemranges[i].start >> PAGE_SHIFT;
for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
dom->p2m_host[pfn] = pfn;
uint64_t total_pages;
xen_vmemrange_t dummy_vmemrange[2];
unsigned int dummy_vnode_to_pnode[1];
- bool use_dummy = false;
+ xen_vmemrange_t *vmemranges;
+ unsigned int *vnode_to_pnode;
+ unsigned int nr_vmemranges, nr_vnodes;
memset(&elf, 0, sizeof(elf));
if ( elf_init(&elf, image, image_size) != 0 )
dummy_vmemrange[0].end = args->lowmem_end;
dummy_vmemrange[0].flags = 0;
dummy_vmemrange[0].nid = 0;
- args->nr_vmemranges = 1;
+ nr_vmemranges = 1;
if ( args->highmem_end > (1ULL << 32) )
{
dummy_vmemrange[1].flags = 0;
dummy_vmemrange[1].nid = 0;
- args->nr_vmemranges++;
+ nr_vmemranges++;
}
dummy_vnode_to_pnode[0] = XC_NUMA_NO_NODE;
- args->nr_vnodes = 1;
- args->vmemranges = dummy_vmemrange;
- args->vnode_to_pnode = dummy_vnode_to_pnode;
- use_dummy = true;
+ nr_vnodes = 1;
+ vmemranges = dummy_vmemrange;
+ vnode_to_pnode = dummy_vnode_to_pnode;
}
else
{
PERROR("Cannot enable vNUMA and PoD at the same time");
goto error_out;
}
+
+ nr_vmemranges = args->nr_vmemranges;
+ nr_vnodes = args->nr_vnodes;
+ vmemranges = args->vmemranges;
+ vnode_to_pnode = args->vnode_to_pnode;
}
total_pages = 0;
p2m_size = 0;
- for ( i = 0; i < args->nr_vmemranges; i++ )
+ for ( i = 0; i < nr_vmemranges; i++ )
{
- total_pages += ((args->vmemranges[i].end - args->vmemranges[i].start)
+ total_pages += ((vmemranges[i].end - vmemranges[i].start)
>> PAGE_SHIFT);
- p2m_size = p2m_size > (args->vmemranges[i].end >> PAGE_SHIFT) ?
- p2m_size : (args->vmemranges[i].end >> PAGE_SHIFT);
+ p2m_size = p2m_size > (vmemranges[i].end >> PAGE_SHIFT) ?
+ p2m_size : (vmemranges[i].end >> PAGE_SHIFT);
}
if ( total_pages != (args->mem_size >> PAGE_SHIFT) )
for ( i = 0; i < p2m_size; i++ )
page_array[i] = ((xen_pfn_t)-1);
- for ( vmemid = 0; vmemid < args->nr_vmemranges; vmemid++ )
+ for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ )
{
uint64_t pfn;
- for ( pfn = args->vmemranges[vmemid].start >> PAGE_SHIFT;
- pfn < args->vmemranges[vmemid].end >> PAGE_SHIFT;
+ for ( pfn = vmemranges[vmemid].start >> PAGE_SHIFT;
+ pfn < vmemranges[vmemid].end >> PAGE_SHIFT;
pfn++ )
page_array[pfn] = pfn;
}
xch, dom, 0xa0, 0, memflags, &page_array[0x00]);
stat_normal_pages = 0;
- for ( vmemid = 0; vmemid < args->nr_vmemranges; vmemid++ )
+ for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ )
{
unsigned int new_memflags = memflags;
uint64_t end_pages;
- unsigned int vnode = args->vmemranges[vmemid].nid;
- unsigned int pnode = args->vnode_to_pnode[vnode];
+ unsigned int vnode = vmemranges[vmemid].nid;
+ unsigned int pnode = vnode_to_pnode[vnode];
if ( pnode != XC_NUMA_NO_NODE )
new_memflags |= XENMEMF_exact_node(pnode);
- end_pages = args->vmemranges[vmemid].end >> PAGE_SHIFT;
+ end_pages = vmemranges[vmemid].end >> PAGE_SHIFT;
/*
* Consider vga hole belongs to the vmemrange that covers
* 0xA0000-0xC0000. Note that 0x00000-0xA0000 is populated just
* before this loop.
*/
- if ( args->vmemranges[vmemid].start == 0 )
+ if ( vmemranges[vmemid].start == 0 )
{
cur_pages = 0xc0;
stat_normal_pages += 0xc0;
}
else
- cur_pages = args->vmemranges[vmemid].start >> PAGE_SHIFT;
+ cur_pages = vmemranges[vmemid].start >> PAGE_SHIFT;
while ( (rc == 0) && (end_pages > cur_pages) )
{
error_out:
rc = -1;
out:
- if ( use_dummy )
- {
- args->nr_vnodes = 0;
- args->vmemranges = NULL;
- args->vnode_to_pnode = NULL;
- }
if ( elf_check_broken(&elf) )
ERROR("HVM ELF broken: %s", elf_check_broken(&elf));