There are two places where you'll find info on page sizes. The first
one is under <cpu/> element, where all supported pages sizes are
listed. Then the second one is under each <cell/> element which refers
to concrete NUMA node. At this place, the size of page's pool is
reported. So the capabilities XML looks something like this:
<capabilities>
<host>
<uuid>
01281cda-f352-cb11-a9db-
e905fe22010c</uuid>
<cpu>
<arch>x86_64</arch>
<model>Westmere</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='1' threads='1'/>
...
<pages unit='KiB' size='4'/>
<pages unit='KiB' size='2048'/>
<pages unit='KiB' size='
1048576'/>
</cpu>
...
<topology>
<cells num='4'>
<cell id='0'>
<memory unit='KiB'>
4054408</memory>
<pages unit='KiB' size='4'>
1013602</pages>
<pages unit='KiB' size='2048'>3</pages>
<pages unit='KiB' size='
1048576'>1</pages>
<distances/>
<cpus num='1'>
<cpu id='0' socket_id='0' core_id='0' siblings='0'/>
</cpus>
</cell>
<cell id='1'>
<memory unit='KiB'>
4071072</memory>
<pages unit='KiB' size='4'>
1017768</pages>
<pages unit='KiB' size='2048'>3</pages>
<pages unit='KiB' size='
1048576'>1</pages>
<distances/>
<cpus num='1'>
<cpu id='1' socket_id='0' core_id='0' siblings='1'/>
</cpus>
</cell>
...
</cells>
</topology>
...
</host>
<guest/>
</capabilities>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
<empty/>
</element>
</zeroOrMore>
+ <zeroOrMore>
+ <ref name='pagesElem'/>
+ </zeroOrMore>
</define>
<define name='power_management'>
<ref name='memory'/>
</optional>
+ <zeroOrMore>
+ <ref name='pagesElem'/>
+ </zeroOrMore>
+
<optional>
<element name='distances'>
<zeroOrMore>
<param name='pattern'>[a-zA-Z0-9\-_]+</param>
</data>
</define>
+
+ <define name='pagesElem'>
+ <element name='pages'>
+ <optional>
+ <attribute name='unit'>
+ <ref name='unit'/>
+ </attribute>
+ </optional>
+ <attribute name='size'>
+ <ref name='unsignedInt'/>
+ </attribute>
+ <ref name='unsignedInt'/>
+ </element>
+ </define>
</grammar>
VIR_FREE(cell->cpus);
VIR_FREE(cell->siblings);
+ VIR_FREE(cell->pageinfo);
VIR_FREE(cell);
}
}
VIR_FREE(caps->host.secModels);
+ VIR_FREE(caps->host.pagesSize);
virCPUDefFree(caps->host.cpu);
}
* @cpus: array of CPU definition structures, the pointer is stolen
* @nsiblings: number of sibling NUMA nodes
* @siblings: info on sibling NUMA nodes
+ * @npageinfo: number of pages at node @num
+ * @pageinfo: info on each single memory page
*
* Registers a new NUMA cell for a host, passing in a
* array of CPU IDs belonging to the cell
int ncpus,
virCapsHostNUMACellCPUPtr cpus,
int nsiblings,
- virCapsHostNUMACellSiblingInfoPtr siblings)
+ virCapsHostNUMACellSiblingInfoPtr siblings,
+ int npageinfo,
+ virCapsHostNUMACellPageInfoPtr pageinfo)
{
virCapsHostNUMACellPtr cell;
if (VIR_ALLOC(cell) < 0)
return -1;
- cell->ncpus = ncpus;
cell->num = num;
cell->mem = mem;
+ cell->ncpus = ncpus;
cell->cpus = cpus;
- cell->siblings = siblings;
cell->nsiblings = nsiblings;
+ cell->siblings = siblings;
+ cell->npageinfo = npageinfo;
+ cell->pageinfo = pageinfo;
caps->host.numaCell[caps->host.nnumaCell++] = cell;
virBufferAsprintf(buf, "<memory unit='KiB'>%llu</memory>\n",
cells[i]->mem);
+ for (j = 0; j < cells[i]->npageinfo; j++) {
+ virBufferAsprintf(buf, "<pages unit='KiB' size='%u'>%zu</pages>\n",
+ cells[i]->pageinfo[j].size,
+ cells[i]->pageinfo[j].avail);
+ }
+
if (cells[i]->nsiblings) {
virBufferAddLit(buf, "<distances>\n");
virBufferAdjustIndent(buf, 2);
}
virCPUDefFormatBuf(&buf, caps->host.cpu, 0);
+ for (i = 0; i < caps->host.nPagesSize; i++) {
+ virBufferAsprintf(&buf, "<pages unit='KiB' size='%u'/>\n",
+ caps->host.pagesSize[i]);
+ }
+
virBufferAdjustIndent(&buf, -2);
virBufferAddLit(&buf, "</cpu>\n");
unsigned int distance; /* distance to the node */
};
+typedef struct _virCapsHostNUMACellPageInfo virCapsHostNUMACellPageInfo;
+typedef virCapsHostNUMACellPageInfo *virCapsHostNUMACellPageInfoPtr;
+struct _virCapsHostNUMACellPageInfo {
+ unsigned int size; /* page size in kibibytes */
+ size_t avail; /* the size of pool */
+};
+
typedef struct _virCapsHostNUMACell virCapsHostNUMACell;
typedef virCapsHostNUMACell *virCapsHostNUMACellPtr;
struct _virCapsHostNUMACell {
virCapsHostNUMACellCPUPtr cpus;
int nsiblings;
virCapsHostNUMACellSiblingInfoPtr siblings;
+ int npageinfo;
+ virCapsHostNUMACellPageInfoPtr pageinfo;
};
typedef struct _virCapsHostSecModelLabel virCapsHostSecModelLabel;
virCapsHostSecModelPtr secModels;
virCPUDefPtr cpu;
+ int nPagesSize; /* size of pagesSize array */
+ unsigned int *pagesSize; /* page sizes support on the system */
unsigned char host_uuid[VIR_UUID_BUFLEN];
};
int ncpus,
virCapsHostNUMACellCPUPtr cpus,
int nsiblings,
- virCapsHostNUMACellSiblingInfoPtr siblings);
+ virCapsHostNUMACellSiblingInfoPtr siblings,
+ int npageinfo,
+ virCapsHostNUMACellPageInfoPtr pageinfo);
extern int
if (virCapabilitiesAddHostNUMACell(caps, i,
numa_info[i].size / 1024,
nr_cpus_node[i], cpus[i],
+ 0, NULL,
0, NULL) < 0) {
virCapabilitiesClearHostNUMACellCPUTopology(cpus[i],
nr_cpus_node[i]);
if (virCapabilitiesAddHostNUMACell(caps, 0,
nodeinfo.memory,
ncpus, cpus,
+ 0, NULL,
0, NULL) < 0)
goto error;
return ret;
}
+static int
+virNodeCapsGetPagesInfo(int node,
+ virCapsHostNUMACellPageInfoPtr *pageinfo,
+ int *npageinfo)
+{
+ int ret = -1;
+ unsigned int *pages_size = NULL, *pages_avail = NULL;
+ size_t npages, i;
+
+ if (virNumaGetPages(node, &pages_size, &pages_avail, NULL, &npages) < 0)
+ goto cleanup;
+
+ if (VIR_ALLOC_N(*pageinfo, npages) < 0)
+ goto cleanup;
+ *npageinfo = npages;
+
+ for (i = 0; i < npages; i++) {
+ (*pageinfo)[i].size = pages_size[i];
+ (*pageinfo)[i].avail = pages_avail[i];
+ }
+
+ ret = 0;
+
+ cleanup:
+ VIR_FREE(pages_avail);
+ VIR_FREE(pages_size);
+ return ret;
+}
+
int
nodeCapsInitNUMA(virCapsPtr caps)
{
virBitmapPtr cpumap = NULL;
virCapsHostNUMACellSiblingInfoPtr siblings = NULL;
int nsiblings = 0;
+ virCapsHostNUMACellPageInfoPtr pageinfo = NULL;
+ int npageinfo;
int ret = -1;
int ncpus = 0;
int cpu;
if (virNodeCapsGetSiblingInfo(n, &siblings, &nsiblings) < 0)
goto cleanup;
+ if (virNodeCapsGetPagesInfo(n, &pageinfo, &npageinfo) < 0)
+ goto cleanup;
+
/* Detect the amount of memory in the numa cell in KiB */
virNumaGetNodeMemory(n, &memory, NULL);
memory >>= 10;
if (virCapabilitiesAddHostNUMACell(caps, n, memory,
ncpus, cpus,
- nsiblings, siblings) < 0)
+ nsiblings, siblings,
+ npageinfo, pageinfo) < 0)
goto cleanup;
cpus = NULL;
siblings = NULL;
+ pageinfo = NULL;
}
ret = 0;
virBitmapFree(cpumap);
VIR_FREE(cpus);
VIR_FREE(siblings);
+ VIR_FREE(pageinfo);
if (ret < 0)
VIR_FREE(cpus);
}
+static int
+virQEMUCapsInitPages(virCapsPtr caps)
+{
+ int ret = -1;
+ unsigned int *pages_size = NULL;
+ size_t npages;
+
+ if (virNumaGetPages(-1 /* Magic constant for overall info */,
+ &pages_size, NULL, NULL, &npages) < 0)
+ goto cleanup;
+
+ caps->host.pagesSize = pages_size;
+ pages_size = NULL;
+ caps->host.nPagesSize = npages;
+ npages = 0;
+
+ ret = 0;
+ cleanup:
+ VIR_FREE(pages_size);
+ return ret;
+}
+
+
virCapsPtr virQEMUCapsInit(virQEMUCapsCachePtr cache)
{
virCapsPtr caps;
VIR_WARN("Failed to get host CPU");
/* Add the power management features of the host */
-
if (virNodeSuspendGetTargetMask(&caps->host.powerMgmt) < 0)
VIR_WARN("Failed to get host power management capabilities");
+ /* Add huge pages info */
+ if (virQEMUCapsInitPages(caps) < 0)
+ VIR_WARN("Failed to get pages info");
+
+ /* Add domain migration transport URI */
virCapabilitiesAddHostMigrateTransport(caps,
"tcp");
if (virCapabilitiesAddHostNUMACell(caps, i, 0,
privconn->cells[i].numCpus,
- cpu_cells, 0, NULL) < 0)
+ cpu_cells, 0, NULL, 0, NULL) < 0)
goto error;
}
if (virCapabilitiesAddHostNUMACell(caps, cell, 0,
nb_cpus, cpuInfo,
+ 0, NULL,
0, NULL) < 0)
goto error;
cpuInfo = NULL;
if (virCapabilitiesAddHostNUMACell(caps, cell_id,
max_mem_in_cell,
max_cpus_in_cell, cell_cpus,
- nsiblings, siblings) < 0)
+ nsiblings, siblings,
+ 0, NULL) < 0)
goto error;
cell_cpus = NULL;
if (virCapabilitiesAddHostNUMACell(caps, cell_id + seq,
MAX_MEM_IN_CELL,
MAX_CPUS_IN_CELL, cell_cpus,
+ 0, NULL,
0, NULL) < 0)
goto error;