This takes into account cpupools.
Add a helper to get the info for a single cpu pool, refactor libxl_list_cpupool
t use this. While there fix the leaks due to not disposing the partial list on
realloc failure in that function.
Fix the failure of sched_domain_output to free the poolinfo list.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Campbell <ian.campbell@citrix.com>
return 0;
}
+static int cpupool_info(libxl__gc *gc,
+ libxl_cpupoolinfo *info,
+ uint32_t poolid,
+ bool exact /* exactly poolid or >= poolid */)
+{
+ xc_cpupoolinfo_t *xcinfo;
+ int rc = ERROR_FAIL;
+
+ xcinfo = xc_cpupool_getinfo(CTX->xch, poolid);
+ if (xcinfo == NULL)
+ return ERROR_FAIL;
+
+ if (exact && xcinfo->cpupool_id != poolid)
+ goto out;
+
+ info->poolid = xcinfo->cpupool_id;
+ info->sched = xcinfo->sched_id;
+ info->n_dom = xcinfo->n_dom;
+ if (libxl_cpumap_alloc(CTX, &info->cpumap))
+ goto out;
+ memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
+
+ rc = 0;
+out:
+ xc_cpupool_infofree(CTX->xch, xcinfo);
+ return rc;
+}
+
+int libxl_cpupool_info(libxl_ctx *ctx,
+ libxl_cpupoolinfo *info, uint32_t poolid)
+{
+ GC_INIT(ctx);
+ int rc = cpupool_info(gc, info, poolid, true);
+ GC_FREE;
+ return rc;
+}
+
libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool)
{
- libxl_cpupoolinfo *ptr, *tmp;
+ GC_INIT(ctx);
+ libxl_cpupoolinfo info, *ptr, *tmp;
int i;
- xc_cpupoolinfo_t *info;
uint32_t poolid;
ptr = NULL;
poolid = 0;
for (i = 0;; i++) {
- info = xc_cpupool_getinfo(ctx->xch, poolid);
- if (info == NULL)
+ if (cpupool_info(gc, &info, poolid, false))
break;
tmp = realloc(ptr, (i + 1) * sizeof(libxl_cpupoolinfo));
if (!tmp) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
- free(ptr);
- xc_cpupool_infofree(ctx->xch, info);
- return NULL;
+ libxl_cpupoolinfo_list_free(ptr, i);
+ goto out;
}
ptr = tmp;
- ptr[i].poolid = info->cpupool_id;
- ptr[i].sched = info->sched_id;
- ptr[i].n_dom = info->n_dom;
- if (libxl_cpumap_alloc(ctx, &ptr[i].cpumap)) {
- xc_cpupool_infofree(ctx->xch, info);
- break;
- }
- memcpy(ptr[i].cpumap.map, info->cpumap, ptr[i].cpumap.size);
- poolid = info->cpupool_id + 1;
- xc_cpupool_infofree(ctx->xch, info);
+ ptr[i] = info;
+ poolid = info.poolid + 1;
}
*nb_pool = i;
+out:
+ GC_FREE;
return ptr;
}
}
}
- for (cpu = 0; cpu < nr_cpus; cpu++)
- libxl_cputopology_dispose(&topology[cpu]);
- free(topology);
+ libxl_cputopology_list_free(topology, nr_cpus);
out:
- for (p = 0; p < n_pools; p++) {
- libxl_cpupoolinfo_dispose(poolinfo + p);
- }
+ libxl_cpupoolinfo_list_free(poolinfo, n_pools);
return ret;
}
libxl_dominfo * libxl_list_domain(libxl_ctx*, int *nb_domain);
void libxl_dominfo_list_free(libxl_dominfo *list, int nr);
libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx*, int *nb_pool);
+void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nr);
libxl_vminfo * libxl_list_vm(libxl_ctx *ctx, int *nb_vm);
void libxl_vminfo_list_free(libxl_vminfo *list, int nr);
int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t poolid);
int libxl_domid_valid_guest(uint32_t domid);
return (info.flags >> XEN_DOMINF_shutdownshift) & XEN_DOMINF_shutdownmask;
}
+int libxl__domain_cpupool(libxl__gc *gc, uint32_t domid)
+{
+ xc_domaininfo_t info;
+ int ret;
+
+ ret = xc_domain_getinfolist(CTX->xch, domid, 1, &info);
+ if (ret != 1)
+ return ERROR_FAIL;
+ if (info.domain != domid)
+ return ERROR_FAIL;
+
+ return info.cpupool;
+}
+
+libxl_scheduler libxl__domain_scheduler(libxl__gc *gc, uint32_t domid)
+{
+ uint32_t cpupool = libxl__domain_cpupool(gc, domid);
+ libxl_cpupoolinfo poolinfo;
+ libxl_scheduler sched = LIBXL_SCHEDULER_UNKNOWN;
+ int rc;
+
+ if (cpupool < 0)
+ return sched;
+
+ rc = libxl_cpupool_info(CTX, &poolinfo, cpupool);
+ if (rc < 0)
+ goto out;
+
+ sched = poolinfo.sched;
+
+out:
+ libxl_cpupoolinfo_dispose(&poolinfo);
+ return sched;
+}
+
int libxl__build_pre(libxl__gc *gc, uint32_t domid,
libxl_domain_build_info *info, libxl__domain_build_state *state)
{
/* from xl_dom */
_hidden libxl_domain_type libxl__domain_type(libxl__gc *gc, uint32_t domid);
_hidden int libxl__domain_shutdown_reason(libxl__gc *gc, uint32_t domid);
+_hidden int libxl__domain_cpupool(libxl__gc *gc, uint32_t domid);
+_hidden libxl_scheduler libxl__domain_scheduler(libxl__gc *gc, uint32_t domid);
_hidden int libxl__sched_set_params(libxl__gc *gc, uint32_t domid, libxl_sched_params *scparams);
#define LIBXL__DOMAIN_IS_TYPE(gc, domid, type) \
libxl__domain_type((gc), (domid)) == LIBXL_DOMAIN_TYPE_##type
])
# Consistent with values defined in domctl.h
+# Except unknown which we have made up
libxl_scheduler = Enumeration("scheduler", [
+ (0, "unknown"),
(4, "sedf"),
(5, "credit"),
(6, "credit2"),
}
free(poolname);
}
- libxl_cpupoolinfo_dispose(poolinfo + i);
}
- free(poolinfo);
+ libxl_cpupoolinfo_list_free(poolinfo, nb_pools);
return ret;
}
free(list);
}
+void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nr)
+{
+ int i;
+ for (i = 0; i < nr; i++)
+ libxl_cpupoolinfo_dispose(&list[i]);
+ free(list);
+}
+
int libxl_domid_valid_guest(uint32_t domid)
{
/* returns 1 if the value _could_ be a valid guest domid, 0 otherwise
break;
}
}
- if (poolinfo) {
- for (p = 0; p < n_pools; p++) {
- libxl_cpupoolinfo_dispose(poolinfo + p);
- }
- }
+ if (poolinfo)
+ libxl_cpupoolinfo_list_free(poolinfo, n_pools);
return 0;
}
printf("\n");
}
}
- libxl_cpupoolinfo_dispose(poolinfo + p);
}
+ libxl_cpupoolinfo_list_free(poolinfo, n_pools);
+
return ret;
}