There is no need to hold the global domctl lock across domain_kill() -
the domain lock is fully sufficient here, and parallel cleanup after
multiple domains performs quite a bit better this way.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
if ( d == current->domain )
return -EINVAL;
- /* Protected by domctl_lock. */
+ /* Protected by d->domain_lock. */
switch ( d->is_dying )
{
case DOMDYING_alive:
+ domain_unlock(d);
domain_pause(d);
+ domain_lock(d);
+ /*
+ * With the domain lock dropped, d->is_dying may have changed. Call
+ * ourselves recursively if so, which is safe as then we won't come
+ * back here.
+ */
+ if ( d->is_dying != DOMDYING_alive )
+ return domain_kill(d);
d->is_dying = DOMDYING_dying;
- spin_barrier(&d->domain_lock);
evtchn_destroy(d);
gnttab_release_mappings(d);
tmem_destroy(d->tmem_client);
break;
case XEN_DOMCTL_destroydomain:
+ domctl_lock_release();
+ domain_lock(d);
ret = domain_kill(d);
+ domain_unlock(d);
if ( ret == -ERESTART )
ret = hypercall_create_continuation(
__HYPERVISOR_domctl, "h", u_domctl);
- break;
+ goto domctl_out_unlock_domonly;
case XEN_DOMCTL_setnodeaffinity:
{