if ( hvm_tsc_scaling_supported )
d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
+ rc = viridian_domain_init(d);
+ if ( rc )
+ goto fail2;
+
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
goto fail2;
hvm_destroy_cacheattr_region_list(d);
destroy_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0);
fail:
+ viridian_domain_deinit(d);
return rc;
}
if ( hvm_funcs.nhvm_domain_relinquish_resources )
hvm_funcs.nhvm_domain_relinquish_resources(d);
- viridian_domain_deinit(d);
-
hvm_destroy_all_ioreq_servers(d);
msixtbl_pt_cleanup(d);
}
destroy_vpci_mmcfg(d);
+
+ viridian_domain_deinit(d);
}
static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
&& (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
goto fail5;
+ rc = viridian_vcpu_init(v);
+ if ( rc )
+ goto fail5;
+
rc = hvm_all_ioreq_servers_add_vcpu(d, v);
if ( rc != 0 )
goto fail6;
fail2:
hvm_vcpu_cacheattr_destroy(v);
fail1:
+ viridian_vcpu_deinit(v);
return rc;
}
void hvm_vcpu_destroy(struct vcpu *v)
{
- viridian_vcpu_deinit(v);
-
hvm_all_ioreq_servers_remove_vcpu(v->domain, v);
if ( hvm_altp2m_supported() )
vlapic_destroy(v);
hvm_vcpu_cacheattr_destroy(v);
+
+ viridian_vcpu_deinit(v);
}
void hvm_vcpu_down(struct vcpu *v)
void viridian_time_ref_count_freeze(struct domain *d);
void viridian_time_ref_count_thaw(struct domain *d);
+int viridian_vcpu_init(struct vcpu *v);
+int viridian_domain_init(struct domain *d);
+
void viridian_vcpu_deinit(struct vcpu *v);
void viridian_domain_deinit(struct domain *d);