return NULL;
}
+/*
+ * Allocate a given port and ensure all the buckets up to that ports
+ * have been allocated.
+ *
+ * The last part is important because the rest of the event channel code
+ * relies on all the buckets up to d->valid_evtchns to be valid. However,
+ * event channels may be sparsed when allocating the static evtchn port
+ * numbers that are scattered in nature.
+ */
int evtchn_allocate_port(struct domain *d, evtchn_port_t port)
{
if ( port > d->max_evtchn_port || port >= max_evtchns(d) )
}
else
{
- struct evtchn *chn;
- struct evtchn **grp;
+ unsigned int alloc_port = read_atomic(&d->valid_evtchns);
- if ( !group_from_port(d, port) )
+ do
{
- grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
- if ( !grp )
- return -ENOMEM;
- group_from_port(d, port) = grp;
- }
+ struct evtchn *chn;
+ struct evtchn **grp;
- chn = alloc_evtchn_bucket(d, port);
- if ( !chn )
- return -ENOMEM;
- bucket_from_port(d, port) = chn;
+ if ( !group_from_port(d, alloc_port) )
+ {
+ grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+ if ( !grp )
+ return -ENOMEM;
+ group_from_port(d, alloc_port) = grp;
+ }
- /*
- * d->valid_evtchns is used to check whether the bucket can be
- * accessed without the per-domain lock. Therefore,
- * d->valid_evtchns should be seen *after* the new bucket has
- * been setup.
- */
- smp_wmb();
- write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ chn = alloc_evtchn_bucket(d, alloc_port);
+ if ( !chn )
+ return -ENOMEM;
+ bucket_from_port(d, alloc_port) = chn;
+
+ /*
+ * d->valid_evtchns is used to check whether the bucket can be
+ * accessed without the per-domain lock. Therefore,
+ * d->valid_evtchns should be seen *after* the new bucket has
+ * been setup.
+ */
+ smp_wmb();
+ alloc_port += EVTCHNS_PER_BUCKET;
+ write_atomic(&d->valid_evtchns, alloc_port);
+ } while ( port >= alloc_port );
}
write_atomic(&d->active_evtchns, d->active_evtchns + 1);