if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
{
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- vcpu_pause_nosync(v);
+ mem_event_vcpu_pause(v);
}
req.gfn = value;
return rc;
}
+void mem_event_vcpu_pause(struct vcpu *v)
+{
+ ASSERT(v == current);
+
+ atomic_inc(&v->mem_event_pause_count);
+ vcpu_pause_nosync(v);
+}
+
+void mem_event_vcpu_unpause(struct vcpu *v)
+{
+ int old, new, prev = v->mem_event_pause_count.counter;
+
+ /* All unpause requests as a result of toolstack responses. Prevent
+ * underflow of the vcpu pause count. */
+ do
+ {
+ old = prev;
+ new = old - 1;
+
+ if ( new < 0 )
+ {
+ printk(XENLOG_G_WARNING
+ "d%d:v%d mem_event: Too many unpause attempts\n",
+ v->domain->domain_id, v->vcpu_id);
+ return;
+ }
+
+ prev = cmpxchg(&v->mem_event_pause_count.counter, old, new);
+ } while ( prev != old );
+
+ vcpu_unpause(v);
+}
/*
* Local variables:
if ( v->domain == d )
{
req.flags = MEM_EVENT_FLAG_VCPU_PAUSED;
- vcpu_pause_nosync(v);
+ mem_event_vcpu_pause(v);
}
req.p2mt = p2m_ram_shared;
/* Unpause domain/vcpu */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- vcpu_unpause(v);
+ mem_event_vcpu_unpause(v);
}
return 0;
/* Pause domain if request came from guest and gfn has paging type */
if ( p2m_is_paging(p2mt) && v->domain == d )
{
- vcpu_pause_nosync(v);
+ mem_event_vcpu_pause(v);
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
}
/* No need to inform pager if the gfn is not in the page-out path */
}
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- vcpu_unpause(v);
+ mem_event_vcpu_unpause(v);
}
}
/* Pause the current VCPU */
if ( p2ma != p2m_access_n2rwx )
- vcpu_pause_nosync(v);
+ mem_event_vcpu_pause(v);
/* VCPU may be paused, return whether we promoted automatically */
return (p2ma == p2m_access_n2rwx);
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- vcpu_unpause(v);
+ mem_event_vcpu_unpause(v);
}
}
int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
XEN_GUEST_HANDLE(void) u_domctl);
+void mem_event_vcpu_pause(struct vcpu *v);
+void mem_event_vcpu_unpause(struct vcpu *v);
+
#endif /* __MEM_EVENT_H__ */
unsigned long pause_flags;
atomic_t pause_count;
+ /* VCPU paused for mem_event replies. */
+ atomic_t mem_event_pause_count;
+
/* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
u16 virq_to_evtchn[NR_VIRQS];
spinlock_t virq_lock;