v->vcpu_info_mfn = INVALID_MFN;
}
+static void vcpu_destroy(struct vcpu *v)
+{
+ free_cpumask_var(v->cpu_hard_affinity);
+ free_cpumask_var(v->cpu_hard_affinity_tmp);
+ free_cpumask_var(v->cpu_hard_affinity_saved);
+ free_cpumask_var(v->cpu_soft_affinity);
+
+ free_vcpu_struct(v);
+}
+
struct vcpu *vcpu_create(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
{
!zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
!zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
!zalloc_cpumask_var(&v->cpu_soft_affinity) )
- goto fail_free;
+ goto fail;
if ( is_idle_domain(d) )
{
goto fail_wq;
if ( arch_vcpu_create(v) != 0 )
- {
- sched_destroy_vcpu(v);
- fail_wq:
- destroy_waitqueue_vcpu(v);
- fail_free:
- free_cpumask_var(v->cpu_hard_affinity);
- free_cpumask_var(v->cpu_hard_affinity_tmp);
- free_cpumask_var(v->cpu_hard_affinity_saved);
- free_cpumask_var(v->cpu_soft_affinity);
- free_vcpu_struct(v);
- return NULL;
- }
+ goto fail_sched;
d->vcpu[vcpu_id] = v;
if ( vcpu_id != 0 )
vcpu_check_shutdown(v);
return v;
+
+ fail_sched:
+ sched_destroy_vcpu(v);
+ fail_wq:
+ destroy_waitqueue_vcpu(v);
+ fail:
+ vcpu_destroy(v);
+
+ return NULL;
}
static int late_hwdom_init(struct domain *d)
for ( i = d->max_vcpus - 1; i >= 0; i-- )
if ( (v = d->vcpu[i]) != NULL )
- {
- free_cpumask_var(v->cpu_hard_affinity);
- free_cpumask_var(v->cpu_hard_affinity_tmp);
- free_cpumask_var(v->cpu_hard_affinity_saved);
- free_cpumask_var(v->cpu_soft_affinity);
- free_vcpu_struct(v);
- }
+ vcpu_destroy(v);
if ( d->target != NULL )
put_domain(d->target);