#define consumer_is_xen(e) (!!(e)->xen_consumer)
+/*
+ * Lock an event channel exclusively. This is allowed only when the channel is
+ * free or unbound either when taking or when releasing the lock, as any
+ * concurrent operation on the event channel using evtchn_read_trylock() will
+ * just assume the event channel is free or unbound at the moment when the
+ * evtchn_read_trylock() returns false.
+ */
+static inline void evtchn_write_lock(struct evtchn *evtchn)
+{
+ write_lock(&evtchn->lock);
+
+#ifndef NDEBUG
+ evtchn->old_state = evtchn->state;
+#endif
+}
+
+static inline unsigned int old_state(const struct evtchn *evtchn)
+{
+#ifndef NDEBUG
+ return evtchn->old_state;
+#else
+ return ECS_RESERVED; /* Just to allow things to build. */
+#endif
+}
+
+static inline void evtchn_write_unlock(struct evtchn *evtchn)
+{
+ /* Enforce lock discipline. */
+ ASSERT(old_state(evtchn) == ECS_FREE || old_state(evtchn) == ECS_UNBOUND ||
+ evtchn->state == ECS_FREE || evtchn->state == ECS_UNBOUND);
+
+ write_unlock(&evtchn->lock);
+}
+
/*
* The function alloc_unbound_xen_event_channel() allows an arbitrary
* notifier function to be specified. However, very few unique functions
return NULL;
}
chn[i].port = port + i;
- spin_lock_init(&chn[i].lock);
+ rwlock_init(&chn[i].lock);
}
return chn;
}
int port;
domid_t dom = alloc->dom;
long rc;
- unsigned long flags;
d = rcu_lock_domain_by_any_id(dom);
if ( d == NULL )
if ( rc )
goto out;
- spin_lock_irqsave(&chn->lock, flags);
+ evtchn_write_lock(chn);
chn->state = ECS_UNBOUND;
if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
chn->u.unbound.remote_domid = current->domain->domain_id;
evtchn_port_init(d, chn);
- spin_unlock_irqrestore(&chn->lock, flags);
+ evtchn_write_unlock(chn);
alloc->port = port;
}
-static unsigned long double_evtchn_lock(struct evtchn *lchn,
- struct evtchn *rchn)
+static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn)
{
- unsigned long flags;
-
if ( lchn <= rchn )
{
- spin_lock_irqsave(&lchn->lock, flags);
+ evtchn_write_lock(lchn);
if ( lchn != rchn )
- spin_lock(&rchn->lock);
+ evtchn_write_lock(rchn);
}
else
{
- spin_lock_irqsave(&rchn->lock, flags);
- spin_lock(&lchn->lock);
+ evtchn_write_lock(rchn);
+ evtchn_write_lock(lchn);
}
-
- return flags;
}
-static void double_evtchn_unlock(struct evtchn *lchn, struct evtchn *rchn,
- unsigned long flags)
+static void double_evtchn_unlock(struct evtchn *lchn, struct evtchn *rchn)
{
if ( lchn != rchn )
- spin_unlock(&lchn->lock);
- spin_unlock_irqrestore(&rchn->lock, flags);
+ evtchn_write_unlock(lchn);
+ evtchn_write_unlock(rchn);
}
static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
int lport, rport = bind->remote_port;
domid_t rdom = bind->remote_dom;
long rc;
- unsigned long flags;
if ( rdom == DOMID_SELF )
rdom = current->domain->domain_id;
if ( rc )
goto out;
- flags = double_evtchn_lock(lchn, rchn);
+ double_evtchn_lock(lchn, rchn);
lchn->u.interdomain.remote_dom = rd;
lchn->u.interdomain.remote_port = rport;
*/
evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
- double_evtchn_unlock(lchn, rchn, flags);
+ double_evtchn_unlock(lchn, rchn);
bind->local_port = lport;
struct domain *d = current->domain;
int virq = bind->virq, vcpu = bind->vcpu;
int rc = 0;
- unsigned long flags;
if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
chn = evtchn_from_port(d, port);
- spin_lock_irqsave(&chn->lock, flags);
+ evtchn_write_lock(chn);
chn->state = ECS_VIRQ;
chn->notify_vcpu_id = vcpu;
chn->u.virq = virq;
evtchn_port_init(d, chn);
- spin_unlock_irqrestore(&chn->lock, flags);
+ evtchn_write_unlock(chn);
v->virq_to_evtchn[virq] = bind->port = port;
struct domain *d = current->domain;
int port, vcpu = bind->vcpu;
long rc = 0;
- unsigned long flags;
if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
(d->vcpu[vcpu] == NULL) )
chn = evtchn_from_port(d, port);
- spin_lock_irqsave(&chn->lock, flags);
+ evtchn_write_lock(chn);
chn->state = ECS_IPI;
chn->notify_vcpu_id = vcpu;
evtchn_port_init(d, chn);
- spin_unlock_irqrestore(&chn->lock, flags);
+ evtchn_write_unlock(chn);
bind->port = port;
struct pirq *info;
int port = 0, pirq = bind->pirq;
long rc;
- unsigned long flags;
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
goto out;
}
- spin_lock_irqsave(&chn->lock, flags);
+ evtchn_write_lock(chn);
chn->state = ECS_PIRQ;
chn->u.pirq.irq = pirq;
link_pirq_port(port, chn, v);
evtchn_port_init(d, chn);
- spin_unlock_irqrestore(&chn->lock, flags);
+ evtchn_write_unlock(chn);
bind->port = port;
struct evtchn *chn1, *chn2;
int port2;
long rc = 0;
- unsigned long flags;
again:
spin_lock(&d1->event_lock);
BUG_ON(chn2->state != ECS_INTERDOMAIN);
BUG_ON(chn2->u.interdomain.remote_dom != d1);
- flags = double_evtchn_lock(chn1, chn2);
+ double_evtchn_lock(chn1, chn2);
evtchn_free(d1, chn1);
chn2->state = ECS_UNBOUND;
chn2->u.unbound.remote_domid = d1->domain_id;
- double_evtchn_unlock(chn1, chn2, flags);
+ double_evtchn_unlock(chn1, chn2);
goto out;
BUG();
}
- spin_lock_irqsave(&chn1->lock, flags);
+ evtchn_write_lock(chn1);
evtchn_free(d1, chn1);
- spin_unlock_irqrestore(&chn1->lock, flags);
+ evtchn_write_unlock(chn1);
out:
if ( d2 != NULL )
struct evtchn *lchn, *rchn;
struct domain *rd;
int rport, ret = 0;
- unsigned long flags;
if ( !port_is_valid(ld, lport) )
return -EINVAL;
lchn = evtchn_from_port(ld, lport);
- spin_lock_irqsave(&lchn->lock, flags);
+ evtchn_read_lock(lchn);
/* Guest cannot send via a Xen-attached event channel. */
if ( unlikely(consumer_is_xen(lchn)) )
}
out:
- spin_unlock_irqrestore(&lchn->lock, flags);
+ evtchn_read_unlock(lchn);
return ret;
}
d = v->domain;
chn = evtchn_from_port(d, port);
- spin_lock(&chn->lock);
- evtchn_port_set_pending(d, v->vcpu_id, chn);
- spin_unlock(&chn->lock);
+ if ( evtchn_read_trylock(chn) )
+ {
+ evtchn_port_set_pending(d, v->vcpu_id, chn);
+ evtchn_read_unlock(chn);
+ }
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
goto out;
chn = evtchn_from_port(d, port);
- spin_lock(&chn->lock);
- evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
- spin_unlock(&chn->lock);
+ if ( evtchn_read_trylock(chn) )
+ {
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
+ evtchn_read_unlock(chn);
+ }
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
{
int port;
struct evtchn *chn;
- unsigned long flags;
/*
* PV guests: It should not be possible to race with __evtchn_close(). The
}
chn = evtchn_from_port(d, port);
- spin_lock_irqsave(&chn->lock, flags);
- evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
- spin_unlock_irqrestore(&chn->lock, flags);
+ if ( evtchn_read_trylock(chn) )
+ {
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
+ evtchn_read_unlock(chn);
+ }
}
static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
{
struct domain *d = current->domain;
struct evtchn *evtchn;
- unsigned long flags;
if ( unlikely(!port_is_valid(d, port)) )
return -EINVAL;
evtchn = evtchn_from_port(d, port);
- spin_lock_irqsave(&evtchn->lock, flags);
+
+ evtchn_read_lock(evtchn);
+
evtchn_port_unmask(d, evtchn);
- spin_unlock_irqrestore(&evtchn->lock, flags);
+
+ evtchn_read_unlock(evtchn);
return 0;
}
{
struct evtchn *chn;
int port, rc;
- unsigned long flags;
spin_lock(&ld->event_lock);
if ( rc )
goto out;
- spin_lock_irqsave(&chn->lock, flags);
+ evtchn_write_lock(chn);
chn->state = ECS_UNBOUND;
chn->xen_consumer = get_xen_consumer(notification_fn);
chn->notify_vcpu_id = lvcpu;
chn->u.unbound.remote_domid = remote_domid;
- spin_unlock_irqrestore(&chn->lock, flags);
+ evtchn_write_unlock(chn);
write_atomic(&ld->xen_evtchns, ld->xen_evtchns + 1);
{
struct evtchn *lchn, *rchn;
struct domain *rd;
- unsigned long flags;
if ( !port_is_valid(ld, lport) )
{
lchn = evtchn_from_port(ld, lport);
- spin_lock_irqsave(&lchn->lock, flags);
+ if ( !evtchn_read_trylock(lchn) )
+ return;
if ( likely(lchn->state == ECS_INTERDOMAIN) )
{
evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
}
- spin_unlock_irqrestore(&lchn->lock, flags);
+ evtchn_read_unlock(lchn);
}
void evtchn_check_pollers(struct domain *d, unsigned int port)