This implies that the individual destroy functions will have to remain
capable of being called for a vCPU that the corresponding init function
was never run on.
Once at it, also clean up some inefficiencies in the corresponding
parameter validation code.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
rc = -EPERM;
break;
}
+ if ( !a.value )
+ break;
if ( a.value > 1 )
rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- rc = -EINVAL;
/* Remove the check below once we have
* shadow-on-shadow.
*/
- if ( cpu_has_svm && !paging_mode_hap(d) && a.value )
+ if ( cpu_has_svm && !paging_mode_hap(d) )
rc = -EINVAL;
/* Set up NHVM state for any vcpus that are already up */
if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+ {
for_each_vcpu(d, v)
if ( rc == 0 )
rc = nestedhvm_vcpu_initialise(v);
+ if ( rc )
+ for_each_vcpu(d, v)
+ nestedhvm_vcpu_destroy(v);
+ }
break;
case HVM_PARAM_BUFIOREQ_EVTCHN:
rc = -EINVAL;
void
nestedhvm_vcpu_destroy(struct vcpu *v)
{
- if ( nestedhvm_enabled(v->domain) && hvm_funcs.nhvm_vcpu_destroy )
+ if ( hvm_funcs.nhvm_vcpu_destroy )
hvm_funcs.nhvm_vcpu_destroy(v);
}