} else if ((state == VIR_DOMAIN_CRASHED &&
reason == VIR_DOMAIN_CRASHED_PANICKED) ||
state == VIR_DOMAIN_PAUSED) {
- if (qemuProcessStartCPUs(driver, vm, dom->conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL)
* this returns (whether returning success or failure).
*/
static int
-qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
+qemuDomainSaveInternal(virQEMUDriverPtr driver,
virDomainObjPtr vm, const char *path,
int compressed, const char *compressedpath,
const char *xmlin, unsigned int flags)
if (ret < 0) {
if (was_running && virDomainObjIsActive(vm)) {
virErrorPtr save_err = virSaveLastError();
- if (qemuProcessStartCPUs(driver, vm, dom->conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
QEMU_ASYNC_JOB_SAVE) < 0) {
VIR_WARN("Unable to resume guest CPUs after save failure");
goto cleanup;
}
- ret = qemuDomainSaveInternal(driver, dom, vm, path, compressed,
+ ret = qemuDomainSaveInternal(driver, vm, path, compressed,
compressedpath, dxml, flags);
cleanup:
VIR_INFO("Saving state of domain '%s' to '%s'", vm->def->name, name);
- ret = qemuDomainSaveInternal(driver, dom, vm, name, compressed,
+ ret = qemuDomainSaveInternal(driver, vm, name, compressed,
compressedpath, NULL, flags);
if (ret == 0)
vm->hasManagedSave = true;
}
if (resume && virDomainObjIsActive(vm)) {
- if (qemuProcessStartCPUs(driver, vm, dom->conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_DUMP) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("Dump failed"));
- ret = qemuProcessStartCPUs(driver, vm, NULL,
+ ret = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_DUMP);
/* If it was running before, resume it now unless caller requested pause. */
if (header->was_running && !start_paused) {
- if (qemuProcessStartCPUs(driver, vm, conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_RESTORED,
asyncJob) < 0) {
if (virGetLastError() == NULL)
/* The domain is expected to be locked and active. */
static int
-qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
- virQEMUDriverPtr driver,
+qemuDomainSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
unsigned int flags)
cleanup:
if (resume && virDomainObjIsActive(vm) &&
- qemuProcessStartCPUs(driver, vm, conn,
+ qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
static int
-qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
- virQEMUDriverPtr driver,
+qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
unsigned int flags)
cleanup:
if (resume && virDomainObjIsActive(vm) &&
- qemuProcessStartCPUs(driver, vm, conn,
+ qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY ||
snap->def->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) {
/* external checkpoint or disk snapshot */
- if (qemuDomainSnapshotCreateActiveExternal(domain->conn, driver,
+ if (qemuDomainSnapshotCreateActiveExternal(driver,
vm, snap, flags) < 0)
goto endjob;
} else {
/* internal checkpoint */
- if (qemuDomainSnapshotCreateActiveInternal(domain->conn, driver,
+ if (qemuDomainSnapshotCreateActiveInternal(driver,
vm, snap, flags) < 0)
goto endjob;
}
_("guest unexpectedly quit"));
goto endjob;
}
- rc = qemuProcessStartCPUs(driver, vm, snapshot->domain->conn,
+ rc = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
QEMU_ASYNC_JOB_START);
if (rc < 0)
VIR_DEBUG("Restoring pre-migration state due to migration error");
/* we got here through some sort of failure; start the domain again */
- if (qemuProcessStartCPUs(driver, vm, conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuProcessFinishStartup(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
goto stopjob;
* >= 0.10.6 to work properly. This isn't strictly necessary on
* older qemu's, but it also doesn't hurt anything there
*/
- if (qemuProcessStartCPUs(driver, vm, dconn,
+ if (qemuProcessStartCPUs(driver, vm,
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
: VIR_DOMAIN_RUNNING_MIGRATED,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
int
qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
- virConnectPtr conn,
const char *path,
char **secret,
size_t *secretLen)
*secret = NULL;
*secretLen = 0;
- QEMU_MONITOR_CALLBACK(mon, ret, diskSecretLookup, conn, mon->vm,
+ QEMU_MONITOR_CALLBACK(mon, ret, diskSecretLookup, mon->vm,
path, secret, secretLen);
return ret;
}
int
-qemuMonitorStartCPUs(qemuMonitorPtr mon,
- virConnectPtr conn)
+qemuMonitorStartCPUs(qemuMonitorPtr mon)
{
QEMU_CHECK_MONITOR(mon);
if (mon->json)
- return qemuMonitorJSONStartCPUs(mon, conn);
+ return qemuMonitorJSONStartCPUs(mon);
else
- return qemuMonitorTextStartCPUs(mon, conn);
+ return qemuMonitorTextStartCPUs(mon);
}
typedef void (*qemuMonitorErrorNotifyCallback)(qemuMonitorPtr mon,
virDomainObjPtr vm,
void *opaque);
-/* XXX we'd really like to avoid virConnectPtr here
- * It is required so the callback can find the active
- * secret driver. Need to change this to work like the
- * security drivers do, to avoid this
- */
typedef int (*qemuMonitorDiskSecretLookupCallback)(qemuMonitorPtr mon,
- virConnectPtr conn,
virDomainObjPtr vm,
const char *path,
char **secret,
# define qemuMonitorHMPCommand(mon, cmd, reply) \
qemuMonitorHMPCommandWithFd(mon, cmd, -1, reply)
-/* XXX same comment about virConnectPtr as above */
int qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
- virConnectPtr conn,
const char *path,
char **secret,
size_t *secretLen);
qemuMonitorDumpStatsPtr stats,
const char *error);
-int qemuMonitorStartCPUs(qemuMonitorPtr mon,
- virConnectPtr conn);
+int qemuMonitorStartCPUs(qemuMonitorPtr mon);
int qemuMonitorStopCPUs(qemuMonitorPtr mon);
typedef enum {
int
-qemuMonitorJSONStartCPUs(qemuMonitorPtr mon,
- virConnectPtr conn ATTRIBUTE_UNUSED)
+qemuMonitorJSONStartCPUs(qemuMonitorPtr mon)
{
int ret;
virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("cont", NULL);
int qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon);
-int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon,
- virConnectPtr conn);
+int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon);
int qemuMonitorJSONStopCPUs(qemuMonitorPtr mon);
int qemuMonitorJSONGetStatus(qemuMonitorPtr mon,
bool *running,
qemuMonitorMessagePtr msg,
const char *data,
size_t len ATTRIBUTE_UNUSED,
- void *opaque)
+ void *opaque ATTRIBUTE_UNUSED)
{
- virConnectPtr conn = opaque;
char *path;
char *passphrase = NULL;
size_t passphrase_len = 0;
/* Fetch the disk password if possible */
res = qemuMonitorGetDiskSecret(mon,
- conn,
path,
&passphrase,
&passphrase_len);
}
int
-qemuMonitorTextStartCPUs(qemuMonitorPtr mon,
- virConnectPtr conn)
+qemuMonitorTextStartCPUs(qemuMonitorPtr mon)
{
char *reply;
if (qemuMonitorTextCommandWithHandler(mon, "cont",
qemuMonitorSendDiskPassphrase,
- conn,
+ NULL,
-1, &reply) < 0)
return -1;
int scm_fd,
char **reply);
-int qemuMonitorTextStartCPUs(qemuMonitorPtr mon,
- virConnectPtr conn);
+int qemuMonitorTextStartCPUs(qemuMonitorPtr mon);
int qemuMonitorTextStopCPUs(qemuMonitorPtr mon);
int qemuMonitorTextGetStatus(qemuMonitorPtr mon,
bool *running,
static int
qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
- virConnectPtr conn,
virDomainObjPtr vm,
const char *path,
char **secretRet,
size_t *secretLen,
void *opaque ATTRIBUTE_UNUSED)
{
+ virConnectPtr conn = NULL;
virDomainDiskDefPtr disk;
int ret = -1;
goto cleanup;
}
+ conn = virGetConnectSecret();
ret = qemuProcessGetVolumeQcowPassphrase(conn, disk, secretRet, secretLen);
cleanup:
+ virObjectUnref(conn);
virObjectUnlock(vm);
return ret;
}
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_CRASHED)
reason = VIR_DOMAIN_RUNNING_CRASHED;
- if (qemuProcessStartCPUs(driver, vm, NULL,
+ if (qemuProcessStartCPUs(driver, vm,
reason,
QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL)
*/
int
qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
- virConnectPtr conn, virDomainRunningReason reason,
+ virDomainRunningReason reason,
qemuDomainAsyncJob asyncJob)
{
int ret = -1;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto release;
- ret = qemuMonitorStartCPUs(priv->mon, conn);
+ ret = qemuMonitorStartCPUs(priv->mon);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
ret = -1;
static int
qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- virConnectPtr conn,
qemuMigrationJobPhase phase,
virDomainState state,
int reason)
* and hope we are all set */
VIR_DEBUG("Incoming migration finished, resuming domain %s",
vm->def->name);
- if (qemuProcessStartCPUs(driver, vm, conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
static int
qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- virConnectPtr conn,
qemuMigrationJobPhase phase,
virDomainState state,
int reason,
if (state == VIR_DOMAIN_PAUSED &&
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
- if (qemuProcessStartCPUs(driver, vm, conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
static int
qemuProcessRecoverJob(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- virConnectPtr conn,
const struct qemuDomainJobObj *job,
unsigned int *stopFlags)
{
switch (job->asyncJob) {
case QEMU_ASYNC_JOB_MIGRATION_OUT:
- if (qemuProcessRecoverMigrationOut(driver, vm, conn, job->phase,
+ if (qemuProcessRecoverMigrationOut(driver, vm, job->phase,
state, reason, stopFlags) < 0)
return -1;
break;
case QEMU_ASYNC_JOB_MIGRATION_IN:
- if (qemuProcessRecoverMigrationIn(driver, vm, conn, job->phase,
+ if (qemuProcessRecoverMigrationIn(driver, vm, job->phase,
state, reason) < 0)
return -1;
break;
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
- if (qemuProcessStartCPUs(driver, vm, conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain '%s' after migration to file",
* Finish starting a new domain.
*/
int
-qemuProcessFinishStartup(virConnectPtr conn,
- virQEMUDriverPtr driver,
+qemuProcessFinishStartup(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob,
bool startCPUs,
if (startCPUs) {
VIR_DEBUG("Starting domain CPUs");
- if (qemuProcessStartCPUs(driver, vm, conn,
+ if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_BOOTED,
asyncJob) < 0) {
if (!virGetLastError())
qemuMigrationRunIncoming(driver, vm, incoming->deferredURI, asyncJob) < 0)
goto stop;
- if (qemuProcessFinishStartup(conn, driver, vm, asyncJob,
+ if (qemuProcessFinishStartup(driver, vm, asyncJob,
!(flags & VIR_QEMU_PROCESS_START_PAUSED),
incoming ?
VIR_DOMAIN_PAUSED_MIGRATION :
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0)
+ if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
goto error;
if (qemuProcessUpdateDevices(driver, obj) < 0)
int qemuProcessStartCPUs(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- virConnectPtr conn,
virDomainRunningReason reason,
qemuDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(virQEMUDriverPtr driver,
virNetDevVPortProfileOp vmop,
unsigned int flags);
-int qemuProcessFinishStartup(virConnectPtr conn,
- virQEMUDriverPtr driver,
+int qemuProcessFinishStartup(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob,
bool startCPUs,
goto cleanup;
}
- if (qemuMonitorJSONStartCPUs(qemuMonitorTestGetMonitor(test), NULL) < 0)
+ if (qemuMonitorJSONStartCPUs(qemuMonitorTestGetMonitor(test)) < 0)
goto cleanup;
if (qemuMonitorGetStatus(qemuMonitorTestGetMonitor(test),