unsigned int flags)
{
xenUnifiedPrivatePtr priv = dom->conn->privateData;
- int ret;
virCheckFlags(VIR_DOMAIN_VCPU_LIVE |
VIR_DOMAIN_VCPU_CONFIG |
/* Try non-hypervisor methods first, then hypervisor direct method
* as a last resort.
*/
- if (priv->opened[XEN_UNIFIED_XEND_OFFSET]) {
- ret = xenDaemonDomainSetVcpusFlags(dom, nvcpus, flags);
- if (ret != -2)
- return ret;
- }
- if (priv->opened[XEN_UNIFIED_XM_OFFSET]) {
- ret = xenXMDomainSetVcpusFlags(dom, nvcpus, flags);
- if (ret != -2)
- return ret;
- }
- if (flags == VIR_DOMAIN_VCPU_LIVE)
- return xenHypervisorSetVcpus(dom, nvcpus);
-
- virReportError(VIR_ERR_NO_SUPPORT, __FUNCTION__);
- return -1;
+ if (dom->id < 0 && priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4)
+ return xenXMDomainSetVcpusFlags(dom, nvcpus, flags);
+ else
+ return xenDaemonDomainSetVcpusFlags(dom, nvcpus, flags);
}
static int
xenUnifiedDomainSetVcpus(virDomainPtr dom, unsigned int nvcpus)
{
+ xenUnifiedPrivatePtr priv = dom->conn->privateData;
unsigned int flags = VIR_DOMAIN_VCPU_LIVE;
/* Per the documented API, it is hypervisor-dependent whether this
* affects just _LIVE or _LIVE|_CONFIG; in xen's case, that
* depends on xendConfigVersion. */
- if (dom) {
- xenUnifiedPrivatePtr priv = dom->conn->privateData;
- if (priv->xendConfigVersion >= XEND_CONFIG_VERSION_3_0_4)
- flags |= VIR_DOMAIN_VCPU_CONFIG;
- return xenUnifiedDomainSetVcpusFlags(dom, nvcpus, flags);
- }
- return -1;
+ if (priv->xendConfigVersion >= XEND_CONFIG_VERSION_3_0_4)
+ flags |= VIR_DOMAIN_VCPU_CONFIG;
+
+ return xenUnifiedDomainSetVcpusFlags(dom, nvcpus, flags);
}
static int
unsigned char *cpumap, int maplen)
{
xenUnifiedPrivatePtr priv = dom->conn->privateData;
- int i;
-
- for (i = 0; i < XEN_UNIFIED_NR_DRIVERS; ++i)
- if (priv->opened[i] &&
- drivers[i]->xenDomainPinVcpu &&
- drivers[i]->xenDomainPinVcpu(dom, vcpu, cpumap, maplen) == 0)
- return 0;
- return -1;
+ if (dom->id < 0) {
+ if (priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4)
+ return xenXMDomainPinVcpu(dom, vcpu, cpumap, maplen);
+ else
+ return xenDaemonDomainPinVcpu(dom, vcpu, cpumap, maplen);
+ } else {
+ return xenHypervisorPinVcpu(dom, vcpu, cpumap, maplen);
+ }
}
static int
unsigned char *cpumaps, int maplen)
{
xenUnifiedPrivatePtr priv = dom->conn->privateData;
- int i, ret;
-
- for (i = 0; i < XEN_UNIFIED_NR_DRIVERS; ++i)
- if (priv->opened[i] && drivers[i]->xenDomainGetVcpus) {
- ret = drivers[i]->xenDomainGetVcpus(dom, info, maxinfo, cpumaps, maplen);
- if (ret > 0)
- return ret;
+ if (dom->id < 0) {
+ if (priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Cannot get VCPUs of inactive domain"));
+ return -1;
+ } else {
+ return xenDaemonDomainGetVcpus(dom, info, maxinfo, cpumaps, maplen);
}
- return -1;
+ } else {
+ return xenHypervisorGetVcpus(dom, info, maxinfo, cpumaps, maplen);
+ }
}
static int
xenUnifiedDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
{
xenUnifiedPrivatePtr priv = dom->conn->privateData;
- int ret;
virCheckFlags(VIR_DOMAIN_VCPU_LIVE |
VIR_DOMAIN_VCPU_CONFIG |
VIR_DOMAIN_VCPU_MAXIMUM, -1);
- if (priv->opened[XEN_UNIFIED_XEND_OFFSET]) {
- ret = xenDaemonDomainGetVcpusFlags(dom, flags);
- if (ret != -2)
- return ret;
- }
- if (priv->opened[XEN_UNIFIED_XM_OFFSET]) {
- ret = xenXMDomainGetVcpusFlags(dom, flags);
- if (ret != -2)
- return ret;
+ if (dom->id < 0) {
+ if (priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4)
+ return xenXMDomainGetVcpusFlags(dom, flags);
+ else
+ return xenDaemonDomainGetVcpusFlags(dom, flags);
+ } else {
+ if (flags == (VIR_DOMAIN_VCPU_CONFIG | VIR_DOMAIN_VCPU_MAXIMUM))
+ return xenHypervisorGetVcpuMax(dom);
+ else
+ return xenDaemonDomainGetVcpusFlags(dom, flags);
}
- if (flags == (VIR_DOMAIN_VCPU_CONFIG | VIR_DOMAIN_VCPU_MAXIMUM))
- return xenHypervisorGetVcpuMax(dom);
-
- virReportError(VIR_ERR_NO_SUPPORT, __FUNCTION__);
- return -1;
}
static int
* structure with direct calls in xen_unified.c.
*/
struct xenUnifiedDriver {
- virDrvDomainPinVcpu xenDomainPinVcpu;
- virDrvDomainGetVcpus xenDomainGetVcpus;
virDrvConnectListDefinedDomains xenListDefinedDomains;
virDrvConnectNumOfDefinedDomains xenNumOfDefinedDomains;
virDrvDomainCreate xenDomainCreate;
};
typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;
-/*
- * The information for a setmaxvcpu system hypercall
- */
-#define XEN_V0_OP_SETMAXVCPU 41
-#define XEN_V1_OP_SETMAXVCPU 41
-#define XEN_V2_OP_SETMAXVCPU 15
-
-struct xen_v0_setmaxvcpu {
- domid_t domain;
- uint32_t maxvcpu;
-};
-typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
-typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;
-
-struct xen_v2_setmaxvcpu {
- uint32_t maxvcpu;
-};
-typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;
-
/*
* The information for a setvcpumap system hypercall
* Note that between 1 and 2 the limitation to 64 physical CPU was lifted
xen_v0_getdomaininfolistop getdomaininfolist;
xen_v0_domainop domain;
xen_v0_setmaxmem setmaxmem;
- xen_v0_setmaxvcpu setmaxvcpu;
xen_v0_setvcpumap setvcpumap;
xen_v0_vcpuinfo getvcpuinfo;
uint8_t padding[128];
union {
xen_v2_setmaxmem setmaxmem;
xen_v2d5_setmaxmem setmaxmemd5;
- xen_v2_setmaxvcpu setmaxvcpu;
xen_v2_setvcpumap setvcpumap;
xen_v2d5_setvcpumap setvcpumapd5;
xen_v2_vcpuinfo getvcpuinfo;
#endif
struct xenUnifiedDriver xenHypervisorDriver = {
- .xenDomainPinVcpu = xenHypervisorPinVcpu,
- .xenDomainGetVcpus = xenHypervisorGetVcpus,
.xenDomainGetSchedulerType = xenHypervisorGetSchedulerType,
.xenDomainGetSchedulerParameters = xenHypervisorGetSchedulerParameters,
.xenDomainSetSchedulerParameters = xenHypervisorSetSchedulerParameters,
return ret;
}
-/**
- * virXen_setmaxvcpus:
- * @handle: the hypervisor handle
- * @id: the domain id
- * @vcpus: the numbers of vcpus
- *
- * Do a low level hypercall to change the max vcpus amount
- *
- * Returns 0 or -1 in case of failure
- */
-static int
-virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
-{
- int ret = -1;
-
- if (hv_versions.hypervisor > 1) {
- xen_op_v2_dom op;
-
- memset(&op, 0, sizeof(op));
- op.cmd = XEN_V2_OP_SETMAXVCPU;
- op.domain = (domid_t) id;
- op.u.setmaxvcpu.maxvcpu = vcpus;
- ret = xenHypervisorDoV2Dom(handle, &op);
- } else if (hv_versions.hypervisor == 1) {
- xen_op_v1 op;
-
- memset(&op, 0, sizeof(op));
- op.cmd = XEN_V1_OP_SETMAXVCPU;
- op.u.setmaxvcpu.domain = (domid_t) id;
- op.u.setmaxvcpu.maxvcpu = vcpus;
- ret = xenHypervisorDoV1Op(handle, &op);
- } else if (hv_versions.hypervisor == 0) {
- xen_op_v0 op;
-
- memset(&op, 0, sizeof(op));
- op.cmd = XEN_V0_OP_SETMAXVCPU;
- op.u.setmaxvcpu.domain = (domid_t) id;
- op.u.setmaxvcpu.maxvcpu = vcpus;
- ret = xenHypervisorDoV0Op(handle, &op);
- }
- return ret;
-}
/**
* virXen_setvcpumap:
}
-/**
- * xenHypervisorSetVcpus:
- * @domain: pointer to domain object
- * @nvcpus: the new number of virtual CPUs for this domain
- *
- * Dynamically change the number of virtual CPUs used by the domain.
- *
- * Returns 0 in case of success, -1 in case of failure.
- */
-
-int
-xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
-{
- int ret;
- xenUnifiedPrivatePtr priv = domain->conn->privateData;
-
- if (domain->id < 0 || nvcpus < 1)
- return -1;
-
- ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
- if (ret < 0)
- return -1;
- return 0;
-}
-
/**
* xenHypervisorPinVcpu:
* @domain: pointer to domain object
ATTRIBUTE_NONNULL (1);
int xenHypervisorCheckID (virConnectPtr conn,
int id);
-int xenHypervisorSetVcpus (virDomainPtr domain,
- unsigned int nvcpus)
- ATTRIBUTE_NONNULL (1);
int xenHypervisorPinVcpu (virDomainPtr domain,
unsigned int vcpu,
unsigned char *cpumap,
*
* Change virtual CPUs allocation of domain according to flags.
*
- * Returns 0 on success, -1 if an error message was issued, and -2 if
- * the unified driver should keep trying.
+ * Returns 0 on success, -1 if an error message was issued
*/
int
xenDaemonDomainSetVcpusFlags(virDomainPtr domain,
unsigned int flags)
{
char buf[VIR_UUID_BUFLEN];
- xenUnifiedPrivatePtr priv = domain->conn->privateData;
int max;
virCheckFlags(VIR_DOMAIN_VCPU_LIVE |
return -1;
}
- if ((domain->id < 0 && priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4) ||
- (flags & VIR_DOMAIN_VCPU_MAXIMUM))
- return -2;
-
- /* With xendConfigVersion 2, only _LIVE is supported. With
- * xendConfigVersion 3, only _LIVE|_CONFIG is supported for
- * running domains, or _CONFIG for inactive domains. */
- if (priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4) {
- if (flags & VIR_DOMAIN_VCPU_CONFIG) {
- virReportError(VIR_ERR_OPERATION_INVALID, "%s",
- _("Xend version does not support modifying "
- "persistent config"));
- return -1;
- }
- } else if (domain->id < 0) {
+ if (domain->id < 0) {
if (flags & VIR_DOMAIN_VCPU_LIVE) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain not running"));
* Extract information about virtual CPUs of domain according to flags.
*
* Returns the number of vcpus on success, -1 if an error message was
- * issued, and -2 if the unified driver should keep trying.
+ * issued
*/
int
{
struct sexpr *root;
int ret;
- xenUnifiedPrivatePtr priv = domain->conn->privateData;
virCheckFlags(VIR_DOMAIN_VCPU_LIVE |
VIR_DOMAIN_VCPU_CONFIG |
VIR_DOMAIN_VCPU_MAXIMUM, -1);
- /* If xendConfigVersion is 2, then we can only report _LIVE (and
- * xm_internal reports _CONFIG). If it is 3, then _LIVE and
- * _CONFIG are always in sync for a running system. */
- if (domain->id < 0 && priv->xendConfigVersion < XEND_CONFIG_VERSION_3_0_4)
- return -2;
if (domain->id < 0 && (flags & VIR_DOMAIN_VCPU_LIVE)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain not active"));
ret = MIN(vcpus, ret);
}
if (!ret)
- ret = -2;
+ ret = -1;
sexpr_free(root);
return ret;
}
}
struct xenUnifiedDriver xenDaemonDriver = {
- .xenDomainPinVcpu = xenDaemonDomainPinVcpu,
- .xenDomainGetVcpus = xenDaemonDomainGetVcpus,
.xenListDefinedDomains = xenDaemonListDefinedDomains,
.xenNumOfDefinedDomains = xenDaemonNumOfDefinedDomains,
.xenDomainCreate = xenDaemonDomainCreate,
#define XM_XML_ERROR "Invalid xml"
struct xenUnifiedDriver xenXMDriver = {
- .xenDomainPinVcpu = xenXMDomainPinVcpu,
.xenListDefinedDomains = xenXMListDefinedDomains,
.xenNumOfDefinedDomains = xenXMNumOfDefinedDomains,
.xenDomainCreate = xenXMDomainCreate,
*
* Change virtual CPUs allocation of domain according to flags.
*
- * Returns 0 on success, -1 if an error message was issued, and -2 if
- * the unified driver should keep trying.
+ * Returns 0 on success, -1 if an error message was issued
*/
int
xenXMDomainSetVcpusFlags(virDomainPtr domain,
VIR_DOMAIN_VCPU_CONFIG |
VIR_DOMAIN_VCPU_MAXIMUM, -1);
- if (domain->id != -1)
- return -2;
if (flags & VIR_DOMAIN_VCPU_LIVE) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
* Extract information about virtual CPUs of domain according to flags.
*
* Returns the number of vcpus on success, -1 if an error message was
- * issued, and -2 if the unified driver should keep trying.
+ * issued
*/
int
xenXMDomainGetVcpusFlags(virDomainPtr domain, unsigned int flags)
VIR_DOMAIN_VCPU_CONFIG |
VIR_DOMAIN_VCPU_MAXIMUM, -1);
- if (domain->id != -1)
- return -2;
if (flags & VIR_DOMAIN_VCPU_LIVE) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain not active"));
return -1;
virReportError(VIR_ERR_INVALID_ARG, __FUNCTION__);
return -1;
}
- if (domain->id != -1) {
- virReportError(VIR_ERR_INVALID_ARG,
- "%s", _("not inactive domain"));
- return -1;
- }
xenUnifiedLock(priv);