struct cpu_user_regs *regs;
struct hvm_io_op *io_opp;
unsigned long gmfn;
+ struct domain *d = v->domain;
io_opp = &v->arch.hvm_vcpu.io_op;
regs = &io_opp->io_context;
- vio = get_vio(v->domain, v->vcpu_id);
+ vio = get_vio(d, v->vcpu_id);
p = &vio->vp_ioreq;
if ( p->state != STATE_IORESP_READY )
memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
/* Has memory been dirtied? */
- if ( p->dir == IOREQ_READ && p->data_is_ptr )
+ if ( (p->dir == IOREQ_READ) && p->data_is_ptr )
{
gmfn = get_mfn_from_gpfn(paging_gva_to_gfn(v, p->data));
- mark_dirty(v->domain, gmfn);
+ mark_dirty(d, gmfn);
}
+
+ vcpu_end_shutdown_deferral(v);
}
/*
atomic_set(&d->refcnt, 1);
spin_lock_init(&d->big_lock);
spin_lock_init(&d->page_alloc_lock);
+ spin_lock_init(&d->shutdown_lock);
INIT_LIST_HEAD(&d->page_list);
INIT_LIST_HEAD(&d->xenpage_list);
xfree(d);
}
+static void __domain_finalise_shutdown(struct domain *d)
+{
+ struct vcpu *v;
+
+ BUG_ON(!spin_is_locked(&d->shutdown_lock));
+
+ if ( d->is_shut_down )
+ return;
+
+ for_each_vcpu ( d, v )
+ if ( !v->paused_for_shutdown )
+ return;
+
+ d->is_shut_down = 1;
+
+ for_each_vcpu ( d, v )
+ vcpu_sleep_nosync(v);
+
+ send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+}
+
+static void vcpu_check_shutdown(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+
+ spin_lock(&d->shutdown_lock);
+
+ if ( d->is_shutting_down )
+ {
+ if ( !v->paused_for_shutdown )
+ atomic_inc(&v->pause_count);
+ v->paused_for_shutdown = 1;
+ v->defer_shutdown = 0;
+ __domain_finalise_shutdown(d);
+ }
+
+ spin_unlock(&d->shutdown_lock);
+}
+
struct vcpu *alloc_vcpu(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
{
if ( vcpu_id != 0 )
d->vcpu[v->vcpu_id-1]->next_in_list = v;
+ /* Must be called after making new vcpu visible to for_each_vcpu(). */
+ vcpu_check_shutdown(v);
+
return v;
}
void __domain_crash(struct domain *d)
{
- if ( d->is_shutdown )
+ if ( d->is_shutting_down )
{
/* Print nothing: the domain is already shutting down. */
}
if ( d->domain_id == 0 )
dom0_shutdown(reason);
- atomic_inc(&d->pause_count);
- if ( !xchg(&d->is_shutdown, 1) )
- d->shutdown_code = reason;
- else
- domain_unpause(d);
+ spin_lock(&d->shutdown_lock);
+
+ if ( d->is_shutting_down )
+ {
+ spin_unlock(&d->shutdown_lock);
+ return;
+ }
+
+ d->is_shutting_down = 1;
+ d->shutdown_code = reason;
+
+ smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
for_each_vcpu ( d, v )
- vcpu_sleep_nosync(v);
+ {
+ if ( v->defer_shutdown )
+ continue;
+ atomic_inc(&v->pause_count);
+ v->paused_for_shutdown = 1;
+ }
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ __domain_finalise_shutdown(d);
+
+ spin_unlock(&d->shutdown_lock);
+}
+
+void domain_resume(struct domain *d)
+{
+ struct vcpu *v;
+
+ /*
+ * Some code paths assume that shutdown status does not get reset under
+ * their feet (e.g., some assertions make this assumption).
+ */
+ domain_pause(d);
+
+ spin_lock(&d->shutdown_lock);
+
+ d->is_shutting_down = d->is_shut_down = 0;
+
+ for_each_vcpu ( d, v )
+ {
+ if ( v->paused_for_shutdown )
+ vcpu_unpause(v);
+ v->paused_for_shutdown = 0;
+ }
+
+ spin_unlock(&d->shutdown_lock);
+
+ domain_unpause(d);
+}
+
+int vcpu_start_shutdown_deferral(struct vcpu *v)
+{
+ v->defer_shutdown = 1;
+ smp_mb(); /* set deferral status /then/ check for shutdown */
+ if ( unlikely(v->domain->is_shutting_down) )
+ vcpu_check_shutdown(v);
+ return v->defer_shutdown;
+}
+
+void vcpu_end_shutdown_deferral(struct vcpu *v)
+{
+ v->defer_shutdown = 0;
+ smp_mb(); /* clear deferral status /then/ check for shutdown */
+ if ( unlikely(v->domain->is_shutting_down) )
+ vcpu_check_shutdown(v);
}
void domain_pause_for_debugger(void)
bool_t nmi_pending;
/* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
bool_t nmi_masked;
+ /* Require shutdown to be deferred for some asynchronous operation? */
+ bool_t defer_shutdown;
+ /* VCPU is paused following shutdown request (d->is_shutting_down)? */
+ bool_t paused_for_shutdown;
unsigned long pause_flags;
atomic_t pause_count;
bool_t is_paused_by_controller;
/* Guest has shut down (inc. reason code)? */
- bool_t is_shutdown;
+ spinlock_t shutdown_lock;
+ bool_t is_shutting_down; /* in process of shutting down? */
+ bool_t is_shut_down; /* fully shut down? */
int shutdown_code;
atomic_t pause_count;
void domain_destroy(struct domain *d);
void domain_kill(struct domain *d);
void domain_shutdown(struct domain *d, u8 reason);
+void domain_resume(struct domain *d);
void domain_pause_for_debugger(void);
+int vcpu_start_shutdown_deferral(struct vcpu *v);
+void vcpu_end_shutdown_deferral(struct vcpu *v);
+
/*
* Mark specified domain as crashed. This function always returns, even if the
* caller is the specified domain. The domain is not synchronously descheduled