for ( i = 0; i < action->nr_guests; i++ )
{
+ struct evtchn *evtchn;
+ unsigned int pending = 2, masked = 2;
+
d = action->guest[i];
pirq = domain_irq_to_pirq(d, irq);
info = pirq_info(d, pirq);
+ evtchn = evtchn_from_port(d, info->evtchn);
+ local_irq_disable();
+ if ( spin_trylock(&evtchn->lock) )
+ {
+ pending = evtchn_is_pending(d, evtchn);
+ masked = evtchn_is_masked(d, evtchn);
+ spin_unlock(&evtchn->lock);
+ }
+ local_irq_enable();
printk("%u:%3d(%c%c%c)",
- d->domain_id, pirq,
- evtchn_port_is_pending(d, info->evtchn) ? 'P' : '-',
- evtchn_port_is_masked(d, info->evtchn) ? 'M' : '-',
- (info->masked ? 'M' : '-'));
+ d->domain_id, pirq, "-P?"[pending],
+ "-M?"[masked], info->masked ? 'M' : '-');
if ( i != action->nr_guests )
printk(",");
}
if ( port_is_valid(guest, port) )
{
struct evtchn *chn = evtchn_from_port(guest, port);
+ unsigned long flags;
+ spin_lock_irqsave(&chn->lock, flags);
evtchn_port_set_pending(guest, chn->notify_vcpu_id, chn);
+ spin_unlock_irqrestore(&chn->lock, flags);
}
}
}
}
-static bool evtchn_2l_is_pending(const struct domain *d, evtchn_port_t port)
+static bool evtchn_2l_is_pending(const struct domain *d,
+ const struct evtchn *evtchn)
{
+ evtchn_port_t port = evtchn->port;
unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
ASSERT(port < max_ports);
guest_test_bit(d, port, &shared_info(d, evtchn_pending)));
}
-static bool evtchn_2l_is_masked(const struct domain *d, evtchn_port_t port)
+static bool evtchn_2l_is_masked(const struct domain *d,
+ const struct evtchn *evtchn)
{
+ evtchn_port_t port = evtchn->port;
unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
ASSERT(port < max_ports);
if ( port_is_valid(d, port) )
{
- if ( evtchn_from_port(d, port)->state != ECS_FREE ||
- evtchn_port_is_busy(d, port) )
+ const struct evtchn *chn = evtchn_from_port(d, port);
+
+ if ( chn->state != ECS_FREE || evtchn_is_busy(d, chn) )
return -EBUSY;
}
else
unsigned long flags;
int port;
struct domain *d;
+ struct evtchn *chn;
ASSERT(!virq_is_global(virq));
goto out;
d = v->domain;
- evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port));
+ chn = evtchn_from_port(d, port);
+ spin_lock(&chn->lock);
+ evtchn_port_set_pending(d, v->vcpu_id, chn);
+ spin_unlock(&chn->lock);
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
goto out;
chn = evtchn_from_port(d, port);
+ spin_lock(&chn->lock);
evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
+ spin_unlock(&chn->lock);
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
{
int port;
struct evtchn *chn;
+ unsigned long flags;
/*
* PV guests: It should not be possible to race with __evtchn_close(). The
}
chn = evtchn_from_port(d, port);
+ spin_lock_irqsave(&chn->lock, flags);
evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
+ spin_unlock_irqrestore(&chn->lock, flags);
}
static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
{
struct domain *d = current->domain;
struct evtchn *evtchn;
+ unsigned long flags;
if ( unlikely(!port_is_valid(d, port)) )
return -EINVAL;
evtchn = evtchn_from_port(d, port);
+ spin_lock_irqsave(&evtchn->lock, flags);
evtchn_port_unmask(d, evtchn);
+ spin_unlock_irqrestore(&evtchn->lock, flags);
return 0;
}
printk(" %4u [%d/%d/",
port,
- evtchn_port_is_pending(d, port),
- evtchn_port_is_masked(d, port));
+ evtchn_is_pending(d, chn),
+ evtchn_is_masked(d, chn));
evtchn_port_print_state(d, chn);
printk("]: s=%d n=%d x=%d",
chn->state, chn->notify_vcpu_id, chn->xen_consumer);
evtchn_fifo_set_pending(v, evtchn);
}
-static bool evtchn_fifo_is_pending(const struct domain *d, evtchn_port_t port)
+static bool evtchn_fifo_is_pending(const struct domain *d,
+ const struct evtchn *evtchn)
{
- const event_word_t *word = evtchn_fifo_word_from_port(d, port);
+ const event_word_t *word = evtchn_fifo_word_from_port(d, evtchn->port);
return word && guest_test_bit(d, EVTCHN_FIFO_PENDING, word);
}
-static bool_t evtchn_fifo_is_masked(const struct domain *d, evtchn_port_t port)
+static bool_t evtchn_fifo_is_masked(const struct domain *d,
+ const struct evtchn *evtchn)
{
- const event_word_t *word = evtchn_fifo_word_from_port(d, port);
+ const event_word_t *word = evtchn_fifo_word_from_port(d, evtchn->port);
return !word || guest_test_bit(d, EVTCHN_FIFO_MASKED, word);
}
-static bool_t evtchn_fifo_is_busy(const struct domain *d, evtchn_port_t port)
+static bool_t evtchn_fifo_is_busy(const struct domain *d,
+ const struct evtchn *evtchn)
{
- const event_word_t *word = evtchn_fifo_word_from_port(d, port);
+ const event_word_t *word = evtchn_fifo_word_from_port(d, evtchn->port);
return word && guest_test_bit(d, EVTCHN_FIFO_LINKED, word);
}
return true;
}
+#ifdef CONFIG_PV_SHIM
+# include <asm/pv/shim.h>
+# define arch_evtchn_is_special(chn) \
+ (pv_shim && (chn)->port && (chn)->state == ECS_RESERVED)
+#endif
+
#endif
return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET);
}
+/*
+ * "usable" as in "by a guest", i.e. Xen consumed channels are assumed to be
+ * taken care of separately where used for Xen's internal purposes.
+ */
+static bool evtchn_usable(const struct evtchn *evtchn)
+{
+ if ( evtchn->xen_consumer )
+ return false;
+
+#ifdef arch_evtchn_is_special
+ if ( arch_evtchn_is_special(evtchn) )
+ return true;
+#endif
+
+ BUILD_BUG_ON(ECS_FREE > ECS_RESERVED);
+ return evtchn->state > ECS_RESERVED;
+}
+
/* Wait on a Xen-attached event channel. */
#define wait_on_xen_event_channel(port, condition) \
do { \
/*
* Low-level event channel port ops.
+ *
+ * All hooks have to be called with a lock held which prevents the channel
+ * from changing state. This may be the domain event lock, the per-channel
+ * lock, or in the case of sending interdomain events also the other side's
+ * per-channel lock. Exceptions apply in certain cases for the PV shim.
*/
struct evtchn_port_ops {
void (*init)(struct domain *d, struct evtchn *evtchn);
void (*set_pending)(struct vcpu *v, struct evtchn *evtchn);
void (*clear_pending)(struct domain *d, struct evtchn *evtchn);
void (*unmask)(struct domain *d, struct evtchn *evtchn);
- bool (*is_pending)(const struct domain *d, evtchn_port_t port);
- bool (*is_masked)(const struct domain *d, evtchn_port_t port);
+ bool (*is_pending)(const struct domain *d, const struct evtchn *evtchn);
+ bool (*is_masked)(const struct domain *d, const struct evtchn *evtchn);
/*
* Is the port unavailable because it's still being cleaned up
* after being closed?
*/
- bool (*is_busy)(const struct domain *d, evtchn_port_t port);
+ bool (*is_busy)(const struct domain *d, const struct evtchn *evtchn);
int (*set_priority)(struct domain *d, struct evtchn *evtchn,
unsigned int priority);
void (*print_state)(struct domain *d, const struct evtchn *evtchn);
unsigned int vcpu_id,
struct evtchn *evtchn)
{
- d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
+ if ( evtchn_usable(evtchn) )
+ d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
}
static inline void evtchn_port_clear_pending(struct domain *d,
struct evtchn *evtchn)
{
- d->evtchn_port_ops->clear_pending(d, evtchn);
+ if ( evtchn_usable(evtchn) )
+ d->evtchn_port_ops->clear_pending(d, evtchn);
}
static inline void evtchn_port_unmask(struct domain *d,
struct evtchn *evtchn)
{
- d->evtchn_port_ops->unmask(d, evtchn);
+ if ( evtchn_usable(evtchn) )
+ d->evtchn_port_ops->unmask(d, evtchn);
}
-static inline bool evtchn_port_is_pending(const struct domain *d,
- evtchn_port_t port)
+static inline bool evtchn_is_pending(const struct domain *d,
+ const struct evtchn *evtchn)
{
- return d->evtchn_port_ops->is_pending(d, port);
+ return evtchn_usable(evtchn) && d->evtchn_port_ops->is_pending(d, evtchn);
}
-static inline bool evtchn_port_is_masked(const struct domain *d,
- evtchn_port_t port)
+static inline bool evtchn_port_is_pending(struct domain *d, evtchn_port_t port)
{
- return d->evtchn_port_ops->is_masked(d, port);
+ struct evtchn *evtchn = evtchn_from_port(d, port);
+ bool rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&evtchn->lock, flags);
+ rc = evtchn_is_pending(d, evtchn);
+ spin_unlock_irqrestore(&evtchn->lock, flags);
+
+ return rc;
+}
+
+static inline bool evtchn_is_masked(const struct domain *d,
+ const struct evtchn *evtchn)
+{
+ return !evtchn_usable(evtchn) || d->evtchn_port_ops->is_masked(d, evtchn);
+}
+
+static inline bool evtchn_port_is_masked(struct domain *d, evtchn_port_t port)
+{
+ struct evtchn *evtchn = evtchn_from_port(d, port);
+ bool rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&evtchn->lock, flags);
+ rc = evtchn_is_masked(d, evtchn);
+ spin_unlock_irqrestore(&evtchn->lock, flags);
+
+ return rc;
}
-static inline bool evtchn_port_is_busy(const struct domain *d,
- evtchn_port_t port)
+static inline bool evtchn_is_busy(const struct domain *d,
+ const struct evtchn *evtchn)
{
return d->evtchn_port_ops->is_busy &&
- d->evtchn_port_ops->is_busy(d, port);
+ d->evtchn_port_ops->is_busy(d, evtchn);
}
static inline int evtchn_port_set_priority(struct domain *d,
{
if ( !d->evtchn_port_ops->set_priority )
return -ENOSYS;
+ if ( !evtchn_usable(evtchn) )
+ return -EACCES;
return d->evtchn_port_ops->set_priority(d, evtchn, priority);
}