return device_num;
}
-int vfio_block_multiple_devices_migration(Error **errp)
+int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
{
int ret;
return 0;
}
+ if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+ error_setg(errp, "Migration is currently not supported with multiple "
+ "VFIO devices");
+ return -EINVAL;
+ }
+
error_setg(&multiple_devices_migration_blocker,
"Migration is currently not supported with multiple "
"VFIO devices");
return false;
}
-int vfio_block_giommu_migration(Error **errp)
+int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp)
{
int ret;
return 0;
}
+ if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+ error_setg(errp,
+ "Migration is currently not supported with vIOMMU enabled");
+ return -EINVAL;
+ }
+
error_setg(&giommu_migration_blocker,
"Migration is currently not supported with vIOMMU enabled");
ret = migrate_add_blocker(giommu_migration_blocker, errp);
feature->argsz = sizeof(buf);
feature->flags = VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIGRATION;
if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
- if (errno == ENOTTY) {
- error_report("%s: VFIO migration is not supported in kernel",
- vbasedev->name);
- } else {
- error_report("%s: Failed to query VFIO migration support, err: %s",
- vbasedev->name, strerror(errno));
- }
-
return -errno;
}
return 0;
}
+static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
+{
+ int ret;
+
+ if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+ error_propagate(errp, err);
+ return -EINVAL;
+ }
+
+ vbasedev->migration_blocker = error_copy(err);
+ error_free(err);
+
+ ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
+ if (ret < 0) {
+ error_free(vbasedev->migration_blocker);
+ vbasedev->migration_blocker = NULL;
+ }
+
+ return ret;
+}
+
/* ---------------------------------------------------------------------- */
int64_t vfio_mig_bytes_transferred(void)
int vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
{
- int ret = -ENOTSUP;
+ Error *err = NULL;
+ int ret;
- if (!vbasedev->enable_migration) {
- goto add_blocker;
+ if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
+ error_setg(&err, "%s: Migration is disabled for VFIO device",
+ vbasedev->name);
+ return vfio_block_migration(vbasedev, err, errp);
}
ret = vfio_migration_init(vbasedev);
if (ret) {
- goto add_blocker;
+ if (ret == -ENOTTY) {
+ error_setg(&err, "%s: VFIO migration is not supported in kernel",
+ vbasedev->name);
+ } else {
+ error_setg(&err,
+ "%s: Migration couldn't be initialized for VFIO device, "
+ "err: %d (%s)",
+ vbasedev->name, ret, strerror(-ret));
+ }
+
+ return vfio_block_migration(vbasedev, err, errp);
+ }
+
+ if (!vbasedev->dirty_pages_supported) {
+ if (vbasedev->enable_migration == ON_OFF_AUTO_AUTO) {
+ error_setg(&err,
+ "%s: VFIO device doesn't support device dirty tracking",
+ vbasedev->name);
+ return vfio_block_migration(vbasedev, err, errp);
+ }
+
+ warn_report("%s: VFIO device doesn't support device dirty tracking",
+ vbasedev->name);
}
- ret = vfio_block_multiple_devices_migration(errp);
+ ret = vfio_block_multiple_devices_migration(vbasedev, errp);
if (ret) {
return ret;
}
- ret = vfio_block_giommu_migration(errp);
+ ret = vfio_block_giommu_migration(vbasedev, errp);
if (ret) {
return ret;
}
- trace_vfio_migration_probe(vbasedev->name);
+ trace_vfio_migration_realize(vbasedev->name);
return 0;
-
-add_blocker:
- error_setg(&vbasedev->migration_blocker,
- "VFIO device doesn't support migration");
-
- ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
- if (ret < 0) {
- error_free(vbasedev->migration_blocker);
- vbasedev->migration_blocker = NULL;
- }
- return ret;
}
void vfio_migration_exit(VFIODevice *vbasedev)
VFIO_FEATURE_ENABLE_REQ_BIT, true),
DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
- DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice,
- vbasedev.enable_migration, false),
+ DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
+ vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
vbasedev.ram_block_discard_allowed, false),
vfio_load_device_config_state(const char *name) " (%s)"
vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d"
-vfio_migration_probe(const char *name) " (%s)"
+vfio_migration_realize(const char *name) " (%s)"
vfio_migration_set_state(const char *name, const char *state) " (%s) state %s"
vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s"
vfio_save_block(const char *name, int data_size) " (%s) data_size %d"
bool needs_reset;
bool no_mmap;
bool ram_block_discard_allowed;
- bool enable_migration;
+ OnOffAuto enable_migration;
VFIODeviceOps *ops;
unsigned int num_irqs;
unsigned int num_regions;
extern VFIOGroupList vfio_group_list;
bool vfio_mig_active(void);
-int vfio_block_multiple_devices_migration(Error **errp);
+int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
void vfio_unblock_multiple_devices_migration(void);
-int vfio_block_giommu_migration(Error **errp);
+int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp);
int64_t vfio_mig_bytes_transferred(void);
void vfio_reset_bytes_transferred(void);