struct x86_vendor *vendors;
struct x86_feature *features;
struct x86_model *models;
+ struct x86_feature *migrate_blockers;
};
static struct x86_map* virCPUx86Map = NULL;
}
+static struct x86_feature *
+x86FeatureCopy(const struct x86_feature *src)
+{
+ struct x86_feature *feature;
+
+ if (VIR_ALLOC(feature) < 0)
+ return NULL;
+
+ if (VIR_STRDUP(feature->name, src->name) < 0)
+ goto error;
+
+ if ((feature->data = x86DataCopy(src->data)) == NULL)
+ goto error;
+
+ return feature;
+
+ error:
+ x86FeatureFree(feature);
+ return NULL;
+}
+
+
static struct x86_feature *
x86FeatureFind(const struct x86_map *map,
const char *name)
int ret = 0;
size_t i;
int n;
+ char *str = NULL;
+ bool migratable = true;
+ struct x86_feature *migrate_blocker = NULL;
if (!(feature = x86FeatureNew()))
goto error;
goto ignore;
}
+ str = virXPathString("string(@migratable)", ctxt);
+ if (STREQ_NULLABLE(str, "no"))
+ migratable = false;
+
n = virXPathNodeSet("./cpuid", ctxt, &nodes);
if (n < 0)
goto ignore;
goto error;
}
+ if (!migratable) {
+ if ((migrate_blocker = x86FeatureCopy(feature)) == NULL)
+ goto error;
+
+ migrate_blocker->next = map->migrate_blockers;
+ map->migrate_blockers = migrate_blocker;
+ }
+
if (map->features == NULL) {
map->features = feature;
} else {
out:
ctxt->node = ctxt_node;
VIR_FREE(nodes);
+ VIR_FREE(str);
return ret;
ignore:
x86FeatureFree(feature);
+ x86FeatureFree(migrate_blocker);
goto out;
}
x86VendorFree(vendor);
}
+ while (map->migrate_blockers != NULL) {
+ struct x86_feature *migrate_blocker = map->migrate_blockers;
+ map->migrate_blockers = migrate_blocker->next;
+ x86FeatureFree(migrate_blocker);
+ }
+
VIR_FREE(map);
}
const virCPUDef *host)
{
virCPUDefPtr oldguest = NULL;
+ const struct x86_map *map;
+ const struct x86_feature *feat;
size_t i;
int ret = -1;
guest->match = VIR_CPU_MATCH_EXACT;
- /* no updates are required */
- if (guest->nfeatures == 0) {
- virCPUDefFreeModel(guest);
- return virCPUDefCopyModel(guest, host, true);
- }
+ if (!(map = virCPUx86GetMap()))
+ goto cleanup;
/* update the host model according to the desired configuration */
if (!(oldguest = virCPUDefCopy(guest)))
if (virCPUDefCopyModel(guest, host, true) < 0)
goto cleanup;
+ /* Remove non-migratable features by default
+ * Note: this only works as long as no CPU model contains non-migratable
+ * features directly */
+ for (i = 0; i < guest->nfeatures; i++) {
+ for (feat = map->migrate_blockers; feat; feat = feat->next) {
+ if (STREQ(feat->name, guest->features[i].name))
+ VIR_DELETE_ELEMENT_INPLACE(guest->features, i, guest->nfeatures);
+ }
+ }
+
for (i = 0; i < oldguest->nfeatures; i++) {
if (virCPUDefUpdateFeature(guest,
oldguest->features[i].name,
DO_TEST_UPDATE("x86", "host", "host-model", VIR_CPU_COMPARE_IDENTICAL);
DO_TEST_UPDATE("x86", "host", "host-model-nofallback", VIR_CPU_COMPARE_IDENTICAL);
DO_TEST_UPDATE("x86", "host", "host-passthrough", VIR_CPU_COMPARE_IDENTICAL);
+ DO_TEST_UPDATE("x86", "host-invtsc", "host-model", VIR_CPU_COMPARE_SUPERSET);
/* computing baseline CPUs */
DO_TEST_BASELINE("x86", "incompatible-vendors", 0, -1);
--- /dev/null
+<cpu mode='host-model' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='osxsave'/>
+ <feature policy='require' name='pcid'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='smx'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='vme'/>
+</cpu>